language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/migrate_to_manifest_only/declarative_component_schema.py | {
"start": 2553,
"end": 3132
} | class ____(BaseModel):
class Config:
extra = Extra.allow
type: Literal["CustomAuthenticator"]
class_name: str = Field(
...,
description="Fully-qualified name of the class that will be implementing the custom authentication strategy. Has to be a sub class of DeclarativeAuthenticator. The format is `source_<name>.<package>.<class_name>`.",
examples=["source_railz.components.ShortLivedTokenAuthenticator"],
title="Class Name",
)
parameters: Optional[Dict[str, Any]] = Field(None, alias="$parameters")
| CustomAuthenticator |
python | kamyu104__LeetCode-Solutions | Python/decremental-string-concatenation.py | {
"start": 34,
"end": 1052
} | class ____(object):
def minimizeConcatenatedLength(self, words):
"""
:type words: List[str]
:rtype: int
"""
dp = [[float("-inf")]*26 for _ in xrange(2)]
dp[0][ord(words[0][-1])-ord('a')] = dp[1][ord(words[0][0])-ord('a')] = 0
for i in xrange(1, len(words)):
new_dp = [[float("-inf")]*26 for _ in xrange(2)]
for right in xrange(2):
for c in xrange(26):
if dp[right][c] == float("-inf"):
continue
l = c if right else ord(words[i-1][0])-ord('a')
r = c if not right else ord(words[i-1][-1])-ord('a')
new_dp[0][r] = max(new_dp[0][r], dp[right][c]+int(ord(words[i][-1])-ord('a') == l))
new_dp[1][l] = max(new_dp[1][l], dp[right][c]+int(r == ord(words[i][0])-ord('a')))
dp = new_dp
return sum(len(w) for w in words)-max(dp[right][c] for right in xrange(2) for c in xrange(26))
| Solution |
python | python-pillow__Pillow | docs/example/DdsImagePlugin.py | {
"start": 5963,
"end": 7521
} | class ____(ImageFile.ImageFile):
format = "DDS"
format_description = "DirectDraw Surface"
def _open(self) -> None:
if not _accept(self.fp.read(4)):
msg = "not a DDS file"
raise SyntaxError(msg)
(header_size,) = struct.unpack("<I", self.fp.read(4))
if header_size != 124:
msg = f"Unsupported header size {repr(header_size)}"
raise OSError(msg)
header_bytes = self.fp.read(header_size - 4)
if len(header_bytes) != 120:
msg = f"Incomplete header: {len(header_bytes)} bytes"
raise OSError(msg)
header = BytesIO(header_bytes)
flags, height, width = struct.unpack("<3I", header.read(12))
self._size = (width, height)
self._mode = "RGBA"
pitch, depth, mipmaps = struct.unpack("<3I", header.read(12))
struct.unpack("<11I", header.read(44)) # reserved
# pixel format
pfsize, pfflags = struct.unpack("<2I", header.read(8))
fourcc = header.read(4)
bitcount, rmask, gmask, bmask, amask = struct.unpack("<5I", header.read(20))
if fourcc == b"DXT1":
self.decoder = "DXT1"
elif fourcc == b"DXT5":
self.decoder = "DXT5"
else:
msg = f"Unimplemented pixel format {repr(fourcc)}"
raise NotImplementedError(msg)
self.tile = [
ImageFile._Tile(self.decoder, (0, 0) + self.size, 0, (self.mode, 0, 1))
]
def load_seek(self, pos: int) -> None:
pass
| DdsImageFile |
python | huggingface__transformers | tests/models/speecht5/test_modeling_speecht5.py | {
"start": 12426,
"end": 28781
} | class ____(ModelTesterMixin, unittest.TestCase, GenerationTesterMixin):
all_model_classes = (SpeechT5ForSpeechToText,) if is_torch_available() else ()
is_encoder_decoder = True
def setUp(self):
self.model_tester = SpeechT5ForSpeechToTextTester(self)
self.config_tester = ConfigTester(self, config_class=SpeechT5Config, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_save_load_strict(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True)
self.assertEqual(info["missing_keys"], set())
def test_model_forward(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_forward(*config_and_inputs)
def test_decoder_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
@unittest.skip(reason="skipped because of dropout")
def test_batching_equivalence(self):
pass
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
seq_len = getattr(self.model_tester, "seq_length", None)
decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len)
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len)
decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
subsampled_encoder_seq_length = model.speecht5.encoder.prenet._get_feat_extract_output_lengths(
encoder_seq_length
)
subsampled_encoder_key_length = model.speecht5.encoder.prenet._get_feat_extract_output_lengths(
encoder_key_length
)
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length],
)
out_len = len(outputs)
correct_outlen = 5
# loss is at first position
if "labels" in inputs_dict:
correct_outlen += 1 # loss is added to beginning
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
self.assertEqual(out_len, correct_outlen)
# decoder attentions
decoder_attentions = outputs.decoder_attentions
self.assertIsInstance(decoder_attentions, (list, tuple))
self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length],
)
# cross attentions
cross_attentions = outputs.cross_attentions
self.assertIsInstance(cross_attentions, (list, tuple))
self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(cross_attentions[0].shape[-3:]),
[
self.model_tester.num_attention_heads,
decoder_seq_length,
subsampled_encoder_key_length,
],
)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
added_hidden_states = 2
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length],
)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = [
"input_values",
"attention_mask",
"decoder_input_ids",
"decoder_attention_mask",
]
expected_arg_names.extend(["encoder_outputs"])
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
if hasattr(self.model_tester, "encoder_seq_length"):
seq_length = self.model_tester.encoder_seq_length
else:
seq_length = self.model_tester.seq_length
subsampled_seq_length = model.speecht5.encoder.prenet._get_feat_extract_output_lengths(seq_length)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[subsampled_seq_length, self.model_tester.hidden_size],
)
if config.is_encoder_decoder:
hidden_states = outputs.decoder_hidden_states
self.assertIsInstance(hidden_states, (list, tuple))
self.assertEqual(len(hidden_states), expected_num_layers)
seq_len = getattr(self.model_tester, "seq_length", None)
decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[decoder_seq_length, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
# this model has no inputs_embeds
@unittest.skip(reason="Model has no input_embeds")
def test_inputs_embeds(self):
pass
def test_resize_embeddings_untied(self):
original_config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.test_resize_embeddings:
self.skipTest(reason="test_resize_embeddings is set to False")
original_config.tie_word_embeddings = False
# if model cannot untied embeddings -> leave test
if original_config.tie_word_embeddings:
self.skipTest(reason="Model cannot untie embeddings")
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
model = model_class(config).to(torch_device)
model.eval()
# if no output embeddings -> leave test
if model.get_output_embeddings() is None:
continue
# Check that resizing the token embeddings with a larger vocab size increases the model's vocab size
model_vocab_size = config.vocab_size
model.resize_token_embeddings(model_vocab_size + 10)
self.assertEqual(model.config.vocab_size, model_vocab_size + 10)
output_embeds = model.get_output_embeddings()
self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10)
# Check bias if present
if output_embeds.bias is not None:
self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size
model.resize_token_embeddings(model_vocab_size - 15)
self.assertEqual(model.config.vocab_size, model_vocab_size - 15)
# Check that it actually resizes the embeddings matrix
output_embeds = model.get_output_embeddings()
self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15)
# Check bias if present
if output_embeds.bias is not None:
self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
if "decoder_input_ids" in inputs_dict:
inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
def test_resize_tokens_embeddings(self):
original_config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.test_resize_embeddings:
self.skipTest(reason="test_resize_embeddings is set to False")
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
model = model_class(config)
model.to(torch_device)
if self.model_tester.is_training is False:
model.eval()
model_vocab_size = config.vocab_size
# Retrieve the embeddings and clone theme
model_embed = model.resize_token_embeddings(model_vocab_size)
cloned_embeddings = model_embed.weight.clone()
# Check that resizing the token embeddings with a larger vocab size increases the model's vocab size
model_embed = model.resize_token_embeddings(model_vocab_size + 10)
self.assertEqual(model.config.vocab_size, model_vocab_size + 10)
# Check that it actually resizes the embeddings matrix
self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size
model_embed = model.resize_token_embeddings(model_vocab_size - 15)
self.assertEqual(model.config.vocab_size, model_vocab_size - 15)
# Check that it actually resizes the embeddings matrix
self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15)
# make sure that decoder_input_ids are resized
if "decoder_input_ids" in inputs_dict:
inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that adding and removing tokens has not modified the first part of the embedding matrix.
models_equal = True
for p1, p2 in zip(cloned_embeddings, model_embed.weight):
if p1.data.ne(p2.data).sum() > 0:
models_equal = False
self.assertTrue(models_equal)
@unittest.skip(reason="Decoder cannot keep gradients")
def test_retain_grad_hidden_states_attentions(self):
# decoder cannot keep gradients
pass
@unittest.skip(reason="Training is not supported yet")
def test_training(self):
pass
@unittest.skip(reason="Training is not supported yet")
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
# overwrite from test_modeling_common
def _mock_init_weights(self, module):
if hasattr(module, "weight") and module.weight is not None:
module.weight.fill_(3)
if hasattr(module, "weight_g") and module.weight_g is not None:
module.weight_g.data.fill_(3)
if hasattr(module, "weight_v") and module.weight_v is not None:
module.weight_v.data.fill_(3)
if hasattr(module, "bias") and module.bias is not None:
module.bias.fill_(3)
if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None:
module.masked_spec_embed.data.fill_(3)
@unittest.skip(reason="Temporarily broken") # TODO (joao, eustache): have a look at this test
def test_generate_without_input_ids(self):
pass
@unittest.skip(reason="Very flaky") # TODO (joao, eustache): have a look at this test
def test_generate_continue_from_past_key_values(self):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
| SpeechT5ForSpeechToTextTest |
python | langchain-ai__langchain | libs/core/langchain_core/prompts/string.py | {
"start": 11903,
"end": 14047
} | class ____(BasePromptTemplate, ABC):
"""String prompt that exposes the format method, returning a prompt."""
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the LangChain object.
Returns:
`["langchain", "prompts", "base"]`
"""
return ["langchain", "prompts", "base"]
def format_prompt(self, **kwargs: Any) -> PromptValue:
"""Format the prompt with the inputs.
Args:
**kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
"""
return StringPromptValue(text=self.format(**kwargs))
async def aformat_prompt(self, **kwargs: Any) -> PromptValue:
"""Async format the prompt with the inputs.
Args:
**kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
"""
return StringPromptValue(text=await self.aformat(**kwargs))
def pretty_repr(
self,
html: bool = False, # noqa: FBT001,FBT002
) -> str:
"""Get a pretty representation of the prompt.
Args:
html: Whether to return an HTML-formatted string.
Returns:
A pretty representation of the prompt.
"""
# TODO: handle partials
dummy_vars = {
input_var: "{" + f"{input_var}" + "}" for input_var in self.input_variables
}
if html:
dummy_vars = {
k: get_colored_text(v, "yellow") for k, v in dummy_vars.items()
}
return self.format(**dummy_vars)
def pretty_print(self) -> None:
"""Print a pretty representation of the prompt."""
print(self.pretty_repr(html=is_interactive_env())) # noqa: T201
def is_subsequence(child: Sequence, parent: Sequence) -> bool:
"""Return True if child is subsequence of parent."""
if len(child) == 0 or len(parent) == 0:
return False
if len(parent) < len(child):
return False
return all(child[i] == parent[i] for i in range(len(child)))
| StringPromptTemplate |
python | xlwings__xlwings | tests/test_shape.py | {
"start": 521,
"end": 3372
} | class ____(TestBase):
def test_name(self):
filename = os.path.join(this_dir, "sample_picture.png")
self.wb1.sheets[0].pictures.add(filename, name="pic1")
sh = self.wb1.sheets[0].shapes[0]
self.assertEqual(sh.name, "pic1")
sh.name = "yoyoyo"
self.assertEqual(sh.name, "yoyoyo")
@unittest.skipIf(pathlib is None, "pathlib unavailable")
def test_name_pathlib(self):
filename = pathlib.Path(this_dir) / "sample_picture.png"
self.wb1.sheets[0].pictures.add(filename, name="pic1")
sh = self.wb1.sheets[0].shapes[0]
self.assertEqual(sh.name, "pic1")
sh.name = "yoyoyo"
self.assertEqual(sh.name, "yoyoyo")
def test_coordinates(self):
filename = os.path.join(this_dir, "sample_picture.png")
self.wb1.sheets[0].pictures.add(
filename, name="pic1", left=0, top=0, width=200, height=100
)
sh = self.wb1.sheets[0].shapes[0]
for a, init, neu in (
("left", 0, 50),
("top", 0, 50),
("width", 200, 150),
("height", 100, 160),
):
self.assertEqual(getattr(sh, a), init)
setattr(sh, a, neu)
self.assertEqual(getattr(sh, a), neu)
def test_picture_object(self):
filename = os.path.join(this_dir, "sample_picture.png")
self.wb1.sheets[0].pictures.add(filename, name="pic1")
self.assertEqual(
self.wb1.sheets[0].shapes[0], self.wb1.sheets[0].shapes["pic1"]
)
def test_delete(self):
filename = os.path.join(this_dir, "sample_picture.png")
self.wb1.sheets[0].pictures.add(filename, name="pic1")
self.assertTrue("pic1" in self.wb1.sheets[0].shapes)
self.wb1.sheets[0].shapes[0].delete()
self.assertFalse("pic1" in self.wb1.sheets[0].shapes)
def test_type(self):
filename = os.path.join(this_dir, "sample_picture.png")
self.wb1.sheets[0].pictures.add(filename, name="pic1")
self.assertEqual(self.wb1.sheets[0].shapes[0].type, "picture")
def test_scale_width(self):
filename = os.path.join(this_dir, "sample_picture.png")
pic = self.wb1.sheets[0].pictures.add(filename, name="pic1")
w, h = int(pic.width), int(pic.height)
self.wb1.sheets[0].shapes["pic1"].scale_width(factor=2)
self.assertEqual(int(pic.width), w * 2)
self.assertEqual(int(pic.height), h * 2)
def test_scale_height(self):
filename = os.path.join(this_dir, "sample_picture.png")
pic = self.wb1.sheets[0].pictures.add(filename, name="pic1")
w, h = int(pic.width), int(pic.height)
self.wb1.sheets[0].shapes["pic1"].scale_height(factor=2)
self.assertEqual(int(pic.width), w * 2)
self.assertEqual(int(pic.height), h * 2)
| TestShape |
python | python-pillow__Pillow | src/PIL/TiffImagePlugin.py | {
"start": 38988,
"end": 42134
} | class ____(ImageFileDirectory_v2):
"""This class represents the **legacy** interface to a TIFF tag directory.
Exposes a dictionary interface of the tags in the directory::
ifd = ImageFileDirectory_v1()
ifd[key] = 'Some Data'
ifd.tagtype[key] = TiffTags.ASCII
print(ifd[key])
('Some Data',)
Also contains a dictionary of tag types as read from the tiff image file,
:attr:`~PIL.TiffImagePlugin.ImageFileDirectory_v1.tagtype`.
Values are returned as a tuple.
.. deprecated:: 3.0.0
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self._legacy_api = True
tags = property(lambda self: self._tags_v1)
tagdata = property(lambda self: self._tagdata)
# defined in ImageFileDirectory_v2
tagtype: dict[int, int]
"""Dictionary of tag types"""
@classmethod
def from_v2(cls, original: ImageFileDirectory_v2) -> ImageFileDirectory_v1:
"""Returns an
:py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v1`
instance with the same data as is contained in the original
:py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v2`
instance.
:returns: :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v1`
"""
ifd = cls(prefix=original.prefix)
ifd._tagdata = original._tagdata
ifd.tagtype = original.tagtype
ifd.next = original.next # an indicator for multipage tiffs
return ifd
def to_v2(self) -> ImageFileDirectory_v2:
"""Returns an
:py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v2`
instance with the same data as is contained in the original
:py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v1`
instance.
:returns: :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v2`
"""
ifd = ImageFileDirectory_v2(prefix=self.prefix)
ifd._tagdata = dict(self._tagdata)
ifd.tagtype = dict(self.tagtype)
ifd._tags_v2 = dict(self._tags_v2)
return ifd
def __contains__(self, tag: object) -> bool:
return tag in self._tags_v1 or tag in self._tagdata
def __len__(self) -> int:
return len(set(self._tagdata) | set(self._tags_v1))
def __iter__(self) -> Iterator[int]:
return iter(set(self._tagdata) | set(self._tags_v1))
def __setitem__(self, tag: int, value: Any) -> None:
for legacy_api in (False, True):
self._setitem(tag, value, legacy_api)
def __getitem__(self, tag: int) -> Any:
if tag not in self._tags_v1: # unpack on the fly
data = self._tagdata[tag]
typ = self.tagtype[tag]
size, handler = self._load_dispatch[typ]
for legacy in (False, True):
self._setitem(tag, handler(self, data, legacy), legacy)
val = self._tags_v1[tag]
if not isinstance(val, (tuple, bytes)):
val = (val,)
return val
# undone -- switch this pointer
ImageFileDirectory = ImageFileDirectory_v1
##
# Image plugin for TIFF files.
| ImageFileDirectory_v1 |
python | django__django | tests/gis_tests/rasterapp/test_rasterfield.py | {
"start": 737,
"end": 17427
} | class ____(TransactionTestCase):
available_apps = ["gis_tests.rasterapp"]
def setUp(self):
rast = GDALRaster(
{
"srid": 4326,
"origin": [0, 0],
"scale": [-1, 1],
"skew": [0, 0],
"width": 5,
"height": 5,
"nr_of_bands": 2,
"bands": [{"data": range(25)}, {"data": range(25, 50)}],
}
)
model_instance = RasterModel.objects.create(
rast=rast,
rastprojected=rast,
geom="POINT (-95.37040 29.70486)",
)
RasterRelatedModel.objects.create(rastermodel=model_instance)
def test_field_null_value(self):
"""
Test creating a model where the RasterField has a null value.
"""
r = RasterModel.objects.create(rast=None)
r.refresh_from_db()
self.assertIsNone(r.rast)
def test_access_band_data_directly_from_queryset(self):
RasterModel.objects.create(rast=JSON_RASTER)
qs = RasterModel.objects.all()
qs[0].rast.bands[0].data()
def test_deserialize_with_pixeltype_flags(self):
no_data = 3
rast = GDALRaster(
{
"srid": 4326,
"origin": [0, 0],
"scale": [-1, 1],
"skew": [0, 0],
"width": 1,
"height": 1,
"nr_of_bands": 1,
"bands": [{"data": [no_data], "nodata_value": no_data}],
}
)
r = RasterModel.objects.create(rast=rast)
RasterModel.objects.filter(pk=r.pk).update(
rast=Func(F("rast"), function="ST_SetBandIsNoData"),
)
r.refresh_from_db()
band = r.rast.bands[0].data()
if numpy:
band = band.flatten().tolist()
self.assertEqual(band, [no_data])
self.assertEqual(r.rast.bands[0].nodata_value, no_data)
def test_model_creation(self):
"""
Test RasterField through a test model.
"""
# Create model instance from JSON raster
r = RasterModel.objects.create(rast=JSON_RASTER)
r.refresh_from_db()
# Test raster metadata properties
self.assertEqual((5, 5), (r.rast.width, r.rast.height))
self.assertEqual([0.0, -1.0, 0.0, 0.0, 0.0, 1.0], r.rast.geotransform)
self.assertIsNone(r.rast.bands[0].nodata_value)
# Compare srs
self.assertEqual(r.rast.srs.srid, 4326)
# Compare pixel values
band = r.rast.bands[0].data()
# If numpy, convert result to list
if numpy:
band = band.flatten().tolist()
# Loop through rows in band data and assert single
# value is as expected.
self.assertEqual(
[
0.0,
1.0,
2.0,
3.0,
4.0,
5.0,
6.0,
7.0,
8.0,
9.0,
10.0,
11.0,
12.0,
13.0,
14.0,
15.0,
16.0,
17.0,
18.0,
19.0,
20.0,
21.0,
22.0,
23.0,
24.0,
],
band,
)
def test_implicit_raster_transformation(self):
"""
Test automatic transformation of rasters with srid different from the
field srid.
"""
# Parse json raster
rast = json.loads(JSON_RASTER)
# Update srid to another value
rast["srid"] = 3086
# Save model and get it from db
r = RasterModel.objects.create(rast=rast)
r.refresh_from_db()
# Confirm raster has been transformed to the default srid
self.assertEqual(r.rast.srs.srid, 4326)
# Confirm geotransform is in lat/lon
expected = [
-87.9298551266551,
9.459646421449934e-06,
0.0,
23.94249275457565,
0.0,
-9.459646421449934e-06,
]
for val, exp in zip(r.rast.geotransform, expected):
self.assertAlmostEqual(exp, val)
def test_verbose_name_arg(self):
"""
RasterField should accept a positional verbose name argument.
"""
self.assertEqual(
RasterModel._meta.get_field("rast").verbose_name, "A Verbose Raster Name"
)
def test_all_gis_lookups_with_rasters(self):
"""
Evaluate all possible lookups for all input combinations (i.e.
raster-raster, raster-geom, geom-raster) and for projected and
unprojected coordinate systems. This test just checks that the lookup
can be called, but doesn't check if the result makes logical sense.
"""
from django.contrib.gis.db.backends.postgis.operations import PostGISOperations
# Create test raster and geom.
rast = GDALRaster(json.loads(JSON_RASTER))
stx_pnt = GEOSGeometry("POINT (-95.370401017314293 29.704867409475465)", 4326)
stx_pnt.transform(3086)
lookups = [
(name, lookup)
for name, lookup in BaseSpatialField.get_lookups().items()
if issubclass(lookup, GISLookup)
]
self.assertNotEqual(lookups, [], "No lookups found")
# Loop through all the GIS lookups.
for name, lookup in lookups:
# Construct lookup filter strings.
combo_keys = [
field + name
for field in [
"rast__",
"rast__",
"rastprojected__0__",
"rast__",
"rastprojected__",
"geom__",
"rast__",
]
]
if issubclass(lookup, DistanceLookupBase):
# Set lookup values for distance lookups.
combo_values = [
(rast, 50, "spheroid"),
(rast, 0, 50, "spheroid"),
(rast, 0, D(km=1)),
(stx_pnt, 0, 500),
(stx_pnt, D(km=1000)),
(rast, 500),
(json.loads(JSON_RASTER), 500),
]
elif name == "relate":
# Set lookup values for the relate lookup.
combo_values = [
(rast, "T*T***FF*"),
(rast, 0, "T*T***FF*"),
(rast, 0, "T*T***FF*"),
(stx_pnt, 0, "T*T***FF*"),
(stx_pnt, "T*T***FF*"),
(rast, "T*T***FF*"),
(json.loads(JSON_RASTER), "T*T***FF*"),
]
elif name == "isvalid":
# The isvalid lookup doesn't make sense for rasters.
continue
elif PostGISOperations.gis_operators[name].func:
# Set lookup values for all function based operators.
combo_values = [
rast,
(rast, 0),
(rast, 0),
(stx_pnt, 0),
stx_pnt,
rast,
json.loads(JSON_RASTER),
]
else:
# Override band lookup for these, as it's not supported.
combo_keys[2] = "rastprojected__" + name
# Set lookup values for all other operators.
combo_values = [
rast,
None,
rast,
stx_pnt,
stx_pnt,
rast,
json.loads(JSON_RASTER),
]
# Create query filter combinations.
self.assertEqual(
len(combo_keys),
len(combo_values),
"Number of lookup names and values should be the same",
)
combos = [x for x in zip(combo_keys, combo_values) if x[1]]
self.assertEqual(
[(n, x) for n, x in enumerate(combos) if x in combos[:n]],
[],
"There are repeated test lookups",
)
combos = [{k: v} for k, v in combos]
for combo in combos:
# Apply this query filter.
qs = RasterModel.objects.filter(**combo)
# Evaluate normal filter qs.
self.assertIn(qs.count(), [0, 1])
# Evaluate on conditional Q expressions.
qs = RasterModel.objects.filter(Q(**combos[0]) & Q(**combos[1]))
self.assertIn(qs.count(), [0, 1])
def test_dwithin_gis_lookup_output_with_rasters(self):
"""
Check the logical functionality of the dwithin lookup for different
input parameters.
"""
# Create test raster and geom.
rast = GDALRaster(json.loads(JSON_RASTER))
stx_pnt = GEOSGeometry("POINT (-95.370401017314293 29.704867409475465)", 4326)
stx_pnt.transform(3086)
# Filter raster with different lookup raster formats.
qs = RasterModel.objects.filter(rastprojected__dwithin=(rast, D(km=1)))
self.assertEqual(qs.count(), 1)
qs = RasterModel.objects.filter(
rastprojected__dwithin=(json.loads(JSON_RASTER), D(km=1))
)
self.assertEqual(qs.count(), 1)
qs = RasterModel.objects.filter(rastprojected__dwithin=(JSON_RASTER, D(km=1)))
self.assertEqual(qs.count(), 1)
# Filter in an unprojected coordinate system.
qs = RasterModel.objects.filter(rast__dwithin=(rast, 40))
self.assertEqual(qs.count(), 1)
# Filter with band index transform.
qs = RasterModel.objects.filter(rast__1__dwithin=(rast, 1, 40))
self.assertEqual(qs.count(), 1)
qs = RasterModel.objects.filter(rast__1__dwithin=(rast, 40))
self.assertEqual(qs.count(), 1)
qs = RasterModel.objects.filter(rast__dwithin=(rast, 1, 40))
self.assertEqual(qs.count(), 1)
# Filter raster by geom.
qs = RasterModel.objects.filter(rast__dwithin=(stx_pnt, 500))
self.assertEqual(qs.count(), 1)
qs = RasterModel.objects.filter(rastprojected__dwithin=(stx_pnt, D(km=10000)))
self.assertEqual(qs.count(), 1)
qs = RasterModel.objects.filter(rast__dwithin=(stx_pnt, 5))
self.assertEqual(qs.count(), 0)
qs = RasterModel.objects.filter(rastprojected__dwithin=(stx_pnt, D(km=100)))
self.assertEqual(qs.count(), 0)
# Filter geom by raster.
qs = RasterModel.objects.filter(geom__dwithin=(rast, 500))
self.assertEqual(qs.count(), 1)
# Filter through related model.
qs = RasterRelatedModel.objects.filter(rastermodel__rast__dwithin=(rast, 40))
self.assertEqual(qs.count(), 1)
# Filter through related model with band index transform
qs = RasterRelatedModel.objects.filter(rastermodel__rast__1__dwithin=(rast, 40))
self.assertEqual(qs.count(), 1)
# Filter through conditional statements.
qs = RasterModel.objects.filter(
Q(rast__dwithin=(rast, 40))
& Q(rastprojected__dwithin=(stx_pnt, D(km=10000)))
)
self.assertEqual(qs.count(), 1)
# Filter through different lookup.
qs = RasterModel.objects.filter(rastprojected__bbcontains=rast)
self.assertEqual(qs.count(), 1)
def test_lookup_input_tuple_too_long(self):
rast = GDALRaster(json.loads(JSON_RASTER))
msg = "Tuple too long for lookup bbcontains."
with self.assertRaisesMessage(ValueError, msg):
RasterModel.objects.filter(rast__bbcontains=(rast, 1, 2))
def test_lookup_input_band_not_allowed(self):
rast = GDALRaster(json.loads(JSON_RASTER))
qs = RasterModel.objects.filter(rast__bbcontains=(rast, 1))
msg = "Band indices are not allowed for this operator, it works on bbox only."
with self.assertRaisesMessage(ValueError, msg):
qs.count()
def test_isvalid_lookup_with_raster_error(self):
qs = RasterModel.objects.filter(rast__isvalid=True)
msg = (
"IsValid function requires a GeometryField in position 1, got RasterField."
)
with self.assertRaisesMessage(TypeError, msg):
qs.count()
def test_result_of_gis_lookup_with_rasters(self):
# Point is in the interior
qs = RasterModel.objects.filter(
rast__contains=GEOSGeometry("POINT (-0.5 0.5)", 4326)
)
self.assertEqual(qs.count(), 1)
# Point is in the exterior
qs = RasterModel.objects.filter(
rast__contains=GEOSGeometry("POINT (0.5 0.5)", 4326)
)
self.assertEqual(qs.count(), 0)
# A point on the boundary is not contained properly
qs = RasterModel.objects.filter(
rast__contains_properly=GEOSGeometry("POINT (0 0)", 4326)
)
self.assertEqual(qs.count(), 0)
# Raster is located left of the point
qs = RasterModel.objects.filter(rast__left=GEOSGeometry("POINT (1 0)", 4326))
self.assertEqual(qs.count(), 1)
def test_lookup_with_raster_bbox(self):
rast = GDALRaster(json.loads(JSON_RASTER))
# Shift raster upward
rast.origin.y = 2
# The raster in the model is not strictly below
qs = RasterModel.objects.filter(rast__strictly_below=rast)
self.assertEqual(qs.count(), 0)
# Shift raster further upward
rast.origin.y = 6
# The raster in the model is strictly below
qs = RasterModel.objects.filter(rast__strictly_below=rast)
self.assertEqual(qs.count(), 1)
def test_lookup_with_polygonized_raster(self):
rast = GDALRaster(json.loads(JSON_RASTER))
# Move raster to overlap with the model point on the left side
rast.origin.x = -95.37040 + 1
rast.origin.y = 29.70486
# Raster overlaps with point in model
qs = RasterModel.objects.filter(geom__intersects=rast)
self.assertEqual(qs.count(), 1)
# Change left side of raster to be nodata values
rast.bands[0].data(data=[0, 0, 0, 1, 1], shape=(5, 1))
rast.bands[0].nodata_value = 0
qs = RasterModel.objects.filter(geom__intersects=rast)
# Raster does not overlap anymore after polygonization
# where the nodata zone is not included.
self.assertEqual(qs.count(), 0)
def test_lookup_value_error(self):
# Test with invalid dict lookup parameter
obj = {}
msg = "Couldn't create spatial object from lookup value '%s'." % obj
with self.assertRaisesMessage(ValueError, msg):
RasterModel.objects.filter(geom__intersects=obj)
# Test with invalid string lookup parameter
obj = "00000"
msg = "Couldn't create spatial object from lookup value '%s'." % obj
with self.assertRaisesMessage(ValueError, msg):
RasterModel.objects.filter(geom__intersects=obj)
def test_db_function_errors(self):
"""
Errors are raised when using DB functions with raster content.
"""
point = GEOSGeometry("SRID=3086;POINT (-697024.9213808845 683729.1705516104)")
rast = GDALRaster(json.loads(JSON_RASTER))
msg = "Distance function requires a geometric argument in position 2."
with self.assertRaisesMessage(TypeError, msg):
RasterModel.objects.annotate(distance_from_point=Distance("geom", rast))
with self.assertRaisesMessage(TypeError, msg):
RasterModel.objects.annotate(
distance_from_point=Distance("rastprojected", rast)
)
msg = (
"Distance function requires a GeometryField in position 1, got RasterField."
)
with self.assertRaisesMessage(TypeError, msg):
RasterModel.objects.annotate(
distance_from_point=Distance("rastprojected", point)
).count()
def test_lhs_with_index_rhs_without_index(self):
with CaptureQueriesContext(connection) as queries:
RasterModel.objects.filter(
rast__0__contains=json.loads(JSON_RASTER)
).exists()
# It's easier to check the indexes in the generated SQL than to write
# tests that cover all index combinations.
self.assertRegex(queries[-1]["sql"], r"WHERE ST_Contains\([^)]*, 1, [^)]*, 1\)")
| RasterFieldTest |
python | getsentry__sentry | src/sentry/types/grouphash_metadata.py | {
"start": 3945,
"end": 4296
} | class ____(TypedDict):
"""
Data gathered when grouping errors generated by Django templates
"""
# The name of the template with the invalid template variable
template_name: NotRequired[str]
# The text of the line in the template containing the invalid variable
template_context_line: NotRequired[str]
| TemplateHashingMetadata |
python | encode__django-rest-framework | tests/browsable_api/test_browsable_nested_api.py | {
"start": 508,
"end": 793
} | class ____(ListCreateAPIView):
renderer_classes = (BrowsableAPIRenderer, )
serializer_class = NestedSerializerTestSerializer
queryset = [{'nested': {'one': 1, 'two': 2}}]
urlpatterns = [
path('api/', NestedSerializersView.as_view(), name='api'),
]
| NestedSerializersView |
python | ansible__ansible | lib/ansible/module_utils/facts/system/env.py | {
"start": 839,
"end": 1261
} | class ____(BaseFactCollector):
name = 'env'
_fact_ids = set() # type: t.Set[str]
def collect(self, module=None, collected_facts=None):
env_facts = {}
env_facts['env'] = {}
for k, v in os.environ.items():
env_facts['env'][k] = v
return env_facts
def __getattr__(importable_name):
return _no_six.deprecate(importable_name, __name__, "iteritems")
| EnvFactCollector |
python | kamyu104__LeetCode-Solutions | Python/count-complete-subarrays-in-an-array.py | {
"start": 93,
"end": 638
} | class ____(object):
def countCompleteSubarrays(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums_set = set(nums)
result = left = 0
cnt = collections.Counter()
for right in xrange(len(nums)):
cnt[nums[right]] += 1
while len(cnt) == len(nums_set):
cnt[nums[left]] -= 1
if cnt[nums[left]] == 0:
del cnt[nums[left]]
left += 1
result += left
return result
| Solution |
python | encode__httpx | httpx/_decoders.py | {
"start": 4728,
"end": 6299
} | class ____(ContentDecoder):
"""
Handle 'zstd' RFC 8878 decoding.
Requires `pip install zstandard`.
Can be installed as a dependency of httpx using `pip install httpx[zstd]`.
"""
# inspired by the ZstdDecoder implementation in urllib3
def __init__(self) -> None:
if zstandard is None: # pragma: no cover
raise ImportError(
"Using 'ZStandardDecoder', ..."
"Make sure to install httpx using `pip install httpx[zstd]`."
) from None
self.decompressor = zstandard.ZstdDecompressor().decompressobj()
self.seen_data = False
def decode(self, data: bytes) -> bytes:
assert zstandard is not None
self.seen_data = True
output = io.BytesIO()
try:
output.write(self.decompressor.decompress(data))
while self.decompressor.eof and self.decompressor.unused_data:
unused_data = self.decompressor.unused_data
self.decompressor = zstandard.ZstdDecompressor().decompressobj()
output.write(self.decompressor.decompress(unused_data))
except zstandard.ZstdError as exc:
raise DecodingError(str(exc)) from exc
return output.getvalue()
def flush(self) -> bytes:
if not self.seen_data:
return b""
ret = self.decompressor.flush() # note: this is a no-op
if not self.decompressor.eof:
raise DecodingError("Zstandard data is incomplete") # pragma: no cover
return bytes(ret)
| ZStandardDecoder |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 37592,
"end": 37908
} | class ____(Blockwise):
_parameters = ["frame", "position"]
operation = operator.getitem
_preserves_partitioning_information = True
@functools.cached_property
def _meta(self):
return self.frame._meta[self.position]
def _divisions(self):
return self.frame.divisions
| AlignGetitem |
python | python__mypy | mypy/nodes.py | {
"start": 66580,
"end": 66930
} | class ____(Expression):
"""Complex literal"""
__slots__ = ("value",)
__match_args__ = ("value",)
value: complex
def __init__(self, value: complex) -> None:
super().__init__()
self.value = value
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_complex_expr(self)
| ComplexExpr |
python | allegroai__clearml | clearml/backend_api/services/v2_13/projects.py | {
"start": 23659,
"end": 26916
} | class ____(NonStrictDataModel):
"""
:param metric: Metric name
:type metric: str
:param metric_hash: Metric name hash. Used instead of the metric name when
categorizing last metrics events in task objects.
:type metric_hash: str
:param variant: Variant name
:type variant: str
:param variant_hash: Variant name hash. Used instead of the variant name when
categorizing last metrics events in task objects.
:type variant_hash: str
"""
_schema = {
"properties": {
"metric": {"description": "Metric name", "type": ["string", "null"]},
"metric_hash": {
"description": "Metric name hash. Used instead of the metric name when categorizing\n last metrics events in task objects.",
"type": ["string", "null"],
},
"variant": {"description": "Variant name", "type": ["string", "null"]},
"variant_hash": {
"description": "Variant name hash. Used instead of the variant name when categorizing\n last metrics events in task objects.",
"type": ["string", "null"],
},
},
"type": "object",
}
def __init__(
self,
metric: Optional[str] = None,
metric_hash: Optional[str] = None,
variant: Optional[str] = None,
variant_hash: Optional[str] = None,
**kwargs: Any
) -> None:
super(MetricVariantResult, self).__init__(**kwargs)
self.metric = metric
self.metric_hash = metric_hash
self.variant = variant
self.variant_hash = variant_hash
@schema_property("metric")
def metric(self) -> Optional[str]:
return self._property_metric
@metric.setter
def metric(self, value: Optional[str]) -> None:
if value is None:
self._property_metric = None
return
self.assert_isinstance(value, "metric", six.string_types)
self._property_metric = value
@schema_property("metric_hash")
def metric_hash(self) -> Optional[str]:
return self._property_metric_hash
@metric_hash.setter
def metric_hash(self, value: Optional[str]) -> None:
if value is None:
self._property_metric_hash = None
return
self.assert_isinstance(value, "metric_hash", six.string_types)
self._property_metric_hash = value
@schema_property("variant")
def variant(self) -> Optional[str]:
return self._property_variant
@variant.setter
def variant(self, value: Optional[str]) -> None:
if value is None:
self._property_variant = None
return
self.assert_isinstance(value, "variant", six.string_types)
self._property_variant = value
@schema_property("variant_hash")
def variant_hash(self) -> Optional[str]:
return self._property_variant_hash
@variant_hash.setter
def variant_hash(self, value: Optional[str]) -> None:
if value is None:
self._property_variant_hash = None
return
self.assert_isinstance(value, "variant_hash", six.string_types)
self._property_variant_hash = value
| MetricVariantResult |
python | jmcnamara__XlsxWriter | xlsxwriter/workbook.py | {
"start": 1617,
"end": 64615
} | class ____(xmlwriter.XMLwriter):
"""
A class for writing the Excel XLSX Workbook file.
"""
###########################################################################
#
# Public API.
#
###########################################################################
chartsheet_class = Chartsheet
worksheet_class = Worksheet
def __init__(
self,
filename: Optional[Union[str, IO[AnyStr], os.PathLike]] = None,
options: Optional[Dict[str, Any]] = None,
) -> None:
"""
Constructor.
"""
if options is None:
options = {}
super().__init__()
self.filename = filename
self.tmpdir = options.get("tmpdir", None)
self.date_1904 = options.get("date_1904", False)
self.strings_to_numbers = options.get("strings_to_numbers", False)
self.strings_to_formulas = options.get("strings_to_formulas", True)
self.strings_to_urls = options.get("strings_to_urls", True)
self.nan_inf_to_errors = options.get("nan_inf_to_errors", False)
self.default_date_format = options.get("default_date_format", None)
self.constant_memory = options.get("constant_memory", False)
self.in_memory = options.get("in_memory", False)
self.excel2003_style = options.get("excel2003_style", False)
self.excel_2023_theme = options.get("excel_2023_theme", False)
self.remove_timezone = options.get("remove_timezone", False)
self.use_future_functions = options.get("use_future_functions", False)
self.default_row_height = options.get("default_row_height", 20)
self.default_col_width = options.get("default_column_width", 64)
self.default_format_properties = options.get(
"default_format_properties", {"font_name": "Calibri", "font_size": 11}
)
self.max_url_length = options.get("max_url_length", 2079)
if self.max_url_length < 255:
self.max_url_length = 2079
if options.get("use_zip64"):
self.allow_zip64 = True
else:
self.allow_zip64 = False
self.worksheet_meta = WorksheetMeta()
self.selected = 0
self.fileclosed = 0
self.filehandle = None
self.internal_fh = 0
self.sheet_name = "Sheet"
self.chart_name = "Chart"
self.sheetname_count = 0
self.chartname_count = 0
self.worksheets_objs = []
self.charts = []
self.drawings = []
self.sheetnames = {}
self.formats = []
self.xf_formats = []
self.xf_format_indices = {}
self.dxf_formats = []
self.dxf_format_indices = {}
self.palette = []
self.font_count = 0
self.num_formats = []
self.defined_names = []
self.named_ranges = []
self.custom_colors = []
self.doc_properties = {}
self.custom_properties = []
self.createtime = datetime.now(timezone.utc)
self.num_vml_files = 0
self.num_comment_files = 0
self.x_window = 240
self.y_window = 15
self.window_width = 16095
self.window_height = 9660
self.tab_ratio = 600
self.str_table = SharedStringTable()
self.vba_project = None
self.vba_project_is_stream = False
self.vba_project_signature = None
self.vba_project_signature_is_stream = False
self.vba_codename = None
self.image_types = {}
self.images = []
self.border_count = 0
self.fill_count = 0
self.drawing_count = 0
self.calc_mode = "auto"
self.calc_on_load = True
self.calc_id = 124519
self.has_comments = False
self.read_only = 0
self.has_metadata = False
self.has_embedded_images = False
self.has_dynamic_functions = False
self.has_embedded_descriptions = False
self.embedded_images = EmbeddedImages()
self.feature_property_bags = set()
self.default_theme_version: str = "124226"
self.theme_xml: str = THEME_XML_2007
# We can't do 'constant_memory' mode while doing 'in_memory' mode.
if self.in_memory:
self.constant_memory = False
# Add the default cell format.
if self.excel_2023_theme:
format_properties = {
"font_name": "Aptos Narrow",
"font_size": 11,
"font_scheme": "minor",
}
self.default_format_properties = format_properties.copy()
self.default_col_width = 64
self.default_row_height = 20
self.default_theme_version = "202300"
self.theme_xml: str = THEME_XML_2023
format_properties["xf_index"] = 0
self.add_format(format_properties)
elif self.excel2003_style:
# This is a deprecated workaround for Excel 2003 style default format.
format_properties = {
"font_name": "Arial",
"font_size": 10,
"font_family": 0,
"font_scheme": False,
"theme": -1,
}
format_properties["xf_index"] = 0
self.add_format(format_properties)
self.default_format_properties = {
"font_name": "Arial",
"font_size": 10,
"font_scheme": False,
"theme": -1,
}
else:
format_properties = self.default_format_properties.copy()
format_properties["xf_index"] = 0
self.add_format(format_properties)
# Store the theme font name to cell format properties.
self.default_format_properties["theme_font_name"] = (
self.default_format_properties.get("font_name", "Calibri")
)
# Add a default URL format.
format_properties = self.default_format_properties.copy()
format_properties["hyperlink"] = True
format_properties["font_scheme"] = "none"
self.default_url_format = self.add_format(format_properties)
# Add the default date format.
if self.default_date_format is not None:
self.default_date_format = self.add_format(
{"num_format": self.default_date_format}
)
(self.max_digit_width, self.cell_padding, self.max_col_width) = (
self._default_column_metrics(self.default_col_width)
)
def __enter__(self):
"""Return self object to use with "with" statement."""
return self
def __exit__(self, type, value, traceback) -> None:
# pylint: disable=redefined-builtin
"""Close workbook when exiting "with" statement."""
self.close()
def add_worksheet(
self, name: Optional[str] = None, worksheet_class=None
) -> Worksheet:
"""
Add a new worksheet to the Excel workbook.
Args:
name: The worksheet name. Defaults to 'Sheet1', etc.
Returns:
Reference to a worksheet object.
"""
if worksheet_class is None:
worksheet_class = self.worksheet_class
return self._add_sheet(name, worksheet_class=worksheet_class)
def add_chartsheet(
self, name: Optional[str] = None, chartsheet_class=None
) -> Chartsheet:
"""
Add a new chartsheet to the Excel workbook.
Args:
name: The chartsheet name. Defaults to 'Sheet1', etc.
Returns:
Reference to a chartsheet object.
"""
if chartsheet_class is None:
chartsheet_class = self.chartsheet_class
return self._add_sheet(name, worksheet_class=chartsheet_class)
def add_format(self, properties=None) -> Format:
"""
Add a new Format to the Excel Workbook.
Args:
properties: The format properties.
Returns:
Reference to a Format object.
"""
format_properties = self.default_format_properties.copy()
if properties:
format_properties.update(properties)
xf_format = Format(
format_properties, self.xf_format_indices, self.dxf_format_indices
)
# Store the format reference.
self.formats.append(xf_format)
return xf_format
def add_chart(self, options: Dict[str, Any]) -> Optional[
Union[
ChartArea,
ChartBar,
ChartColumn,
ChartDoughnut,
ChartLine,
ChartPie,
ChartRadar,
ChartScatter,
ChartStock,
]
]:
"""
Create a chart object.
Args:
options: The chart type and subtype options.
Returns:
Reference to a Chart object.
"""
# Type must be specified so we can create the required chart instance.
chart_type = options.get("type")
if chart_type is None:
warn("Chart type must be defined in add_chart()")
return None
if chart_type == "area":
chart = ChartArea(options)
elif chart_type == "bar":
chart = ChartBar(options)
elif chart_type == "column":
chart = ChartColumn(options)
elif chart_type == "doughnut":
chart = ChartDoughnut()
elif chart_type == "line":
chart = ChartLine(options)
elif chart_type == "pie":
chart = ChartPie()
elif chart_type == "radar":
chart = ChartRadar(options)
elif chart_type == "scatter":
chart = ChartScatter(options)
elif chart_type == "stock":
chart = ChartStock()
else:
warn(f"Unknown chart type '{chart_type}' in add_chart()")
return None
# Set the embedded chart name if present.
if "name" in options:
chart.chart_name = options["name"]
chart.embedded = True
chart.date_1904 = self.date_1904
chart.remove_timezone = self.remove_timezone
self.charts.append(chart)
return chart
def add_vba_project(self, vba_project: str, is_stream: bool = False) -> int:
"""
Add a vbaProject binary to the Excel workbook.
Args:
vba_project: The vbaProject binary file name.
is_stream: vba_project is an in memory byte stream.
Returns:
0 on success.
"""
if not is_stream and not os.path.exists(vba_project):
warn(f"VBA project binary file '{vba_project}' not found.")
return -1
if self.vba_codename is None:
self.vba_codename = "ThisWorkbook"
self.vba_project = vba_project
self.vba_project_is_stream = is_stream
return 0
def add_signed_vba_project(
self,
vba_project: str,
signature: str,
project_is_stream: bool = False,
signature_is_stream: bool = False,
) -> Literal[0, -1]:
"""
Add a vbaProject binary and a vbaProjectSignature binary to the
Excel workbook.
Args:
vba_project: The vbaProject binary file name.
signature: The vbaProjectSignature binary file name.
project_is_stream: vba_project is an in memory byte stream.
signature_is_stream: signature is an in memory byte stream.
Returns:
0 on success.
"""
if self.add_vba_project(vba_project, project_is_stream) == -1:
return -1
if not signature_is_stream and not os.path.exists(signature):
warn(f"VBA project signature binary file '{signature}' not found.")
return -1
self.vba_project_signature = signature
self.vba_project_signature_is_stream = signature_is_stream
return 0
def use_custom_theme(self, theme: Union[str, os.PathLike, IO[AnyStr]]) -> None:
"""
Add a custom theme to the Excel workbook.
Args:
theme: The custom theme as a file path (string or PathLike),
or in-memory string as a StringIO object.
Raises:
IOError: If the file cannot be read.
ThemeFileError: If the theme file is invalid or unsupported.
ValueError: If the theme parameter type is not supported.
"""
theme_xml = ""
if isinstance(theme, (str, os.PathLike)):
theme_xml = self._read_theme_from_file(theme)
elif isinstance(theme, StringIO):
theme_xml = theme.getvalue()
else:
raise ValueError(
"Theme must be a file path (string or PathLike), or StringIO object."
)
# Simple check to see if the file is text/XML.
if not theme_xml.startswith("<?xml") or "<a:theme" not in theme_xml:
raise ThemeFileError(f"Invalid XML theme file: '{theme}'.")
# Check for Excel 2007 theme files that contain images as fills. These
# aren't currently supported.
if "<a:blipFill>" in theme_xml:
raise ThemeFileError(
"Theme file contains image fills which aren't currently "
f"supported: '{theme}'."
)
self.theme_xml = theme_xml
self.default_theme_version = ""
def close(self) -> None:
"""
Call finalization code and close file.
Args:
None.
Returns:
Nothing.
"""
# pylint: disable=raise-missing-from
if not self.fileclosed:
try:
self._store_workbook()
except IOError as e:
raise FileCreateError(e)
except LargeZipFile:
raise FileSizeError(
"Filesize would require ZIP64 extensions. "
"Use workbook.use_zip64()."
)
self.fileclosed = True
# Ensure all constant_memory temp files are closed.
if self.constant_memory:
for worksheet in self.worksheets():
worksheet._opt_close()
else:
warn("Calling close() on already closed file.")
def set_size(self, width: int, height: int) -> None:
"""
Set the size of a workbook window.
Args:
width: Width of the window in pixels.
height: Height of the window in pixels.
Returns:
Nothing.
"""
# Convert the width/height to twips at 96 dpi.
if width:
self.window_width = int(width * 1440 / 96)
else:
self.window_width = 16095
if height:
self.window_height = int(height * 1440 / 96)
else:
self.window_height = 9660
def set_tab_ratio(self, tab_ratio: Optional[Union[int, float]] = None) -> None:
"""
Set the ratio between worksheet tabs and the horizontal slider.
Args:
tab_ratio: The tab ratio, 0 <= tab_ratio <= 100
Returns:
Nothing.
"""
if tab_ratio is None:
return
if tab_ratio < 0 or tab_ratio > 100:
warn(f"Tab ratio '{tab_ratio}' outside: 0 <= tab_ratio <= 100")
else:
self.tab_ratio = int(tab_ratio * 10)
def set_properties(self, properties) -> None:
"""
Set the document properties such as Title, Author etc.
Args:
properties: Dictionary of document properties.
Returns:
Nothing.
"""
self.doc_properties = properties
def set_custom_property(
self,
name: str,
value: Union[bool, datetime, int, float, Decimal, Fraction, Any],
property_type: Optional[
Literal["bool", "date", "number", "number_int", "text"]
] = None,
) -> Literal[0, -1]:
"""
Set a custom document property.
Args:
name: The name of the custom property.
value: The value of the custom property.
property_type: The type of the custom property. Optional.
Returns:
0 on success.
"""
if name is None or value is None:
warn(
"The name and value parameters must be non-None in "
"set_custom_property()"
)
return -1
if property_type is None:
# Determine the property type from the Python type.
if isinstance(value, bool):
property_type = "bool"
elif isinstance(value, datetime):
property_type = "date"
elif isinstance(value, int):
property_type = "number_int"
elif isinstance(value, (float, int, Decimal, Fraction)):
property_type = "number"
else:
property_type = "text"
# Convert non-string values to strings to have a single data type.
if property_type == "bool":
value = str(value).lower()
if property_type == "date":
value = value.strftime("%Y-%m-%dT%H:%M:%SZ")
if property_type in ("number", "number_int"):
value = str(value)
if property_type == "text" and len(value) > 255:
warn(
f"Length of 'value' parameter exceeds Excel's limit of 255 "
f"characters in set_custom_property(): '{value}'"
)
if len(name) > 255:
warn(
f"Length of 'name' parameter exceeds Excel's limit of 255 "
f"characters in set_custom_property(): '{name}'"
)
self.custom_properties.append((name, value, property_type))
return 0
def set_calc_mode(
self, mode: Literal["manual", "auto_except_tables", "auto"], calc_id=None
) -> None:
"""
Set the Excel calculation mode for the workbook.
Args:
mode: String containing one of:
* manual
* auto_except_tables
* auto
Returns:
Nothing.
"""
self.calc_mode = mode
if mode == "manual":
self.calc_on_load = False
elif mode == "auto_except_tables":
self.calc_mode = "autoNoTable"
# Leave undocumented for now. Rarely required.
if calc_id:
self.calc_id = calc_id
def define_name(self, name: str, formula: str) -> Literal[0, -1]:
# Create a defined name in Excel. We handle global/workbook level
# names and local/worksheet names.
"""
Create a defined name in the workbook.
Args:
name: The defined name.
formula: The cell or range that the defined name refers to.
Returns:
0 on success.
"""
sheet_index = None
sheetname = ""
# Remove the = sign from the formula if it exists.
if formula.startswith("="):
formula = formula.lstrip("=")
# Local defined names are formatted like "Sheet1!name".
sheet_parts = re.compile(r"^([^!]+)!([^!]+)$")
match = sheet_parts.match(name)
if match:
sheetname = match.group(1)
name = match.group(2)
sheet_index = self._get_sheet_index(sheetname)
# Warn if the sheet index wasn't found.
if sheet_index is None:
warn(f"Unknown sheet name '{sheetname}' in defined_name()")
return -1
else:
# Use -1 to indicate global names.
sheet_index = -1
# Warn if the defined name contains invalid chars as defined by Excel.
if not re.match(r"^[\w\\][\w\\.]*$", name, re.UNICODE) or re.match(
r"^\d", name
):
warn(f"Invalid Excel characters in defined_name(): '{name}'")
return -1
# Warn if the defined name looks like a cell name.
if re.match(r"^[a-zA-Z][a-zA-Z]?[a-dA-D]?\d+$", name):
warn(f"Name looks like a cell name in defined_name(): '{name}'")
return -1
# Warn if the name looks like a R1C1 cell reference.
if re.match(r"^[rcRC]$", name) or re.match(r"^[rcRC]\d+[rcRC]\d+$", name):
warn(f"Invalid name '{name}' like a RC cell ref in defined_name()")
return -1
self.defined_names.append([name, sheet_index, formula, False])
return 0
def worksheets(self) -> List[Worksheet]:
"""
Return a list of the worksheet objects in the workbook.
Args:
None.
Returns:
A list of worksheet objects.
"""
return self.worksheets_objs
def get_worksheet_by_name(self, name: str) -> Optional[Worksheet]:
"""
Return a worksheet object in the workbook using the sheetname.
Args:
name: The name of the worksheet.
Returns:
A worksheet object or None.
"""
return self.sheetnames.get(name)
def get_default_url_format(self) -> Format:
"""
Get the default url format used when a user defined format isn't
specified with write_url(). The format is the hyperlink style defined
by Excel for the default theme.
Args:
None.
Returns:
A format object.
"""
return self.default_url_format
def use_zip64(self) -> None:
"""
Allow ZIP64 extensions when writing xlsx file zip container.
Args:
None.
Returns:
Nothing.
"""
self.allow_zip64 = True
def set_vba_name(self, name: Optional[str] = None) -> None:
"""
Set the VBA name for the workbook. By default the workbook is referred
to as ThisWorkbook in VBA.
Args:
name: The VBA name for the workbook.
Returns:
Nothing.
"""
if name is not None:
self.vba_codename = name
else:
self.vba_codename = "ThisWorkbook"
def read_only_recommended(self) -> None:
"""
Set the Excel "Read-only recommended" option when saving a file.
Args:
None.
Returns:
Nothing.
"""
self.read_only = 2
###########################################################################
#
# Private API.
#
###########################################################################
def _assemble_xml_file(self) -> None:
# Assemble and write the XML file.
# Prepare format object for passing to Style.pm.
self._prepare_format_properties()
# Write the XML declaration.
self._xml_declaration()
# Write the workbook element.
self._write_workbook()
# Write the fileVersion element.
self._write_file_version()
# Write the fileSharing element.
self._write_file_sharing()
# Write the workbookPr element.
self._write_workbook_pr()
# Write the bookViews element.
self._write_book_views()
# Write the sheets element.
self._write_sheets()
# Write the workbook defined names.
self._write_defined_names()
# Write the calcPr element.
self._write_calc_pr()
# Close the workbook tag.
self._xml_end_tag("workbook")
# Close the file.
self._xml_close()
def _store_workbook(self) -> None:
# pylint: disable=consider-using-with
# Create the xlsx/zip file.
try:
xlsx_file = ZipFile(
self.filename,
"w",
compression=ZIP_DEFLATED,
allowZip64=self.allow_zip64,
)
except IOError as e:
raise e
# Assemble worksheets into a workbook.
packager = self._get_packager()
# Add a default worksheet if non have been added.
if not self.worksheets():
self.add_worksheet()
# Ensure that at least one worksheet has been selected.
if self.worksheet_meta.activesheet == 0:
self.worksheets_objs[0].selected = 1
self.worksheets_objs[0].hidden = 0
# Set the active sheet.
for sheet in self.worksheets():
if sheet.index == self.worksheet_meta.activesheet:
sheet.active = 1
# Set the sheet vba_codename the workbook has a vbaProject binary.
if self.vba_project:
for sheet in self.worksheets():
if sheet.vba_codename is None:
sheet.set_vba_name()
# Convert the SST strings data structure.
self._prepare_sst_string_data()
# Prepare the worksheet VML elements such as comments and buttons.
self._prepare_vml()
# Set the defined names for the worksheets such as Print Titles.
self._prepare_defined_names()
# Prepare the drawings, charts and images.
self._prepare_drawings()
# Add cached data to charts.
self._add_chart_data()
# Prepare the worksheet tables.
self._prepare_tables()
# Prepare the metadata file links.
self._prepare_metadata()
# Package the workbook.
packager._add_workbook(self)
packager._set_tmpdir(self.tmpdir)
packager._set_in_memory(self.in_memory)
xml_files = packager._create_package()
# Free up the Packager object.
packager = None
# Add XML sub-files to the Zip file with their Excel filename.
for file_id, file_data in enumerate(xml_files):
os_filename, xml_filename, is_binary = file_data
if self.in_memory:
# Set sub-file timestamp to Excel's timestamp of 1/1/1980.
zipinfo = ZipInfo(xml_filename, (1980, 1, 1, 0, 0, 0))
# Copy compression type from parent ZipFile.
zipinfo.compress_type = xlsx_file.compression
if is_binary:
xlsx_file.writestr(zipinfo, os_filename.getvalue())
else:
xlsx_file.writestr(zipinfo, os_filename.getvalue().encode("utf-8"))
else:
# The sub-files are tempfiles on disk, i.e, not in memory.
# Set sub-file timestamp to 31/1/1980 due to portability
# issues setting it to Excel's timestamp of 1/1/1980.
timestamp = time.mktime((1980, 1, 31, 0, 0, 0, 0, 0, -1))
os.utime(os_filename, (timestamp, timestamp))
try:
xlsx_file.write(os_filename, xml_filename)
os.remove(os_filename)
except LargeZipFile as e:
# Close open temp files on zipfile.LargeZipFile exception.
for i in range(file_id, len(xml_files) - 1):
os.remove(xml_files[i][0])
raise e
xlsx_file.close()
def _add_sheet(self, name, worksheet_class=None):
# Utility for shared code in add_worksheet() and add_chartsheet().
if worksheet_class:
worksheet = worksheet_class()
else:
worksheet = self.worksheet_class()
sheet_index = len(self.worksheets_objs)
name = self._check_sheetname(name, isinstance(worksheet, Chartsheet))
# Initialization data to pass to the worksheet.
init_data = {
"name": name,
"index": sheet_index,
"str_table": self.str_table,
"worksheet_meta": self.worksheet_meta,
"constant_memory": self.constant_memory,
"tmpdir": self.tmpdir,
"date_1904": self.date_1904,
"strings_to_numbers": self.strings_to_numbers,
"strings_to_formulas": self.strings_to_formulas,
"strings_to_urls": self.strings_to_urls,
"nan_inf_to_errors": self.nan_inf_to_errors,
"default_date_format": self.default_date_format,
"default_url_format": self.default_url_format,
"workbook_add_format": self.add_format,
"excel2003_style": self.excel2003_style,
"remove_timezone": self.remove_timezone,
"max_url_length": self.max_url_length,
"use_future_functions": self.use_future_functions,
"embedded_images": self.embedded_images,
"default_row_height": self.default_row_height,
"default_col_width": self.default_col_width,
"max_digit_width": self.max_digit_width,
"cell_padding": self.cell_padding,
"max_col_width": self.max_col_width,
}
worksheet._initialize(init_data)
self.worksheets_objs.append(worksheet)
self.sheetnames[name] = worksheet
return worksheet
def _check_sheetname(self, sheetname, is_chartsheet=False):
# Check for valid worksheet names. We check the length, if it contains
# any invalid chars and if the sheetname is unique in the workbook.
invalid_char = re.compile(r"[\[\]:*?/\\]")
# Increment the Sheet/Chart number used for default sheet names below.
if is_chartsheet:
self.chartname_count += 1
else:
self.sheetname_count += 1
# Supply default Sheet/Chart sheetname if none has been defined.
if sheetname is None or sheetname == "":
if is_chartsheet:
sheetname = self.chart_name + str(self.chartname_count)
else:
sheetname = self.sheet_name + str(self.sheetname_count)
# Check that sheet sheetname is <= 31. Excel limit.
if len(sheetname) > 31:
raise InvalidWorksheetName(
f"Excel worksheet name '{sheetname}' must be <= 31 chars."
)
# Check that sheetname doesn't contain any invalid characters.
if invalid_char.search(sheetname):
raise InvalidWorksheetName(
f"Invalid Excel character '[]:*?/\\' in sheetname '{sheetname}'."
)
# Check that sheetname doesn't start or end with an apostrophe.
if sheetname.startswith("'") or sheetname.endswith("'"):
raise InvalidWorksheetName(
f'Sheet name cannot start or end with an apostrophe "{sheetname}".'
)
# Check that the worksheet name doesn't already exist since this is a
# fatal Excel error. The check must be case insensitive like Excel.
for worksheet in self.worksheets():
if sheetname.lower() == worksheet.name.lower():
raise DuplicateWorksheetName(
f"Sheetname '{sheetname}', with case ignored, is already in use."
)
return sheetname
def _prepare_format_properties(self) -> None:
# Prepare all Format properties prior to passing them to styles.py.
# Separate format objects into XF and DXF formats.
self._prepare_formats()
# Set the font index for the format objects.
self._prepare_fonts()
# Set the number format index for the format objects.
self._prepare_num_formats()
# Set the border index for the format objects.
self._prepare_borders()
# Set the fill index for the format objects.
self._prepare_fills()
def _prepare_formats(self) -> None:
# Iterate through the XF Format objects and separate them into
# XF and DXF formats. The XF and DF formats then need to be sorted
# back into index order rather than creation order.
xf_formats = []
dxf_formats = []
# Sort into XF and DXF formats.
for xf_format in self.formats:
if xf_format.xf_index is not None:
xf_formats.append(xf_format)
if xf_format.dxf_index is not None:
dxf_formats.append(xf_format)
# Pre-extend the format lists.
self.xf_formats = [None] * len(xf_formats)
self.dxf_formats = [None] * len(dxf_formats)
# Rearrange formats into index order.
for xf_format in xf_formats:
index = xf_format.xf_index
self.xf_formats[index] = xf_format
for dxf_format in dxf_formats:
index = dxf_format.dxf_index
self.dxf_formats[index] = dxf_format
def _set_default_xf_indices(self) -> None:
# Set the default index for each format. Only used for testing.
formats = list(self.formats)
# Delete the default url format.
del formats[1]
# Skip the default date format if set.
if self.default_date_format is not None:
del formats[1]
# Set the remaining formats.
for xf_format in formats:
xf_format._get_xf_index()
def _prepare_fonts(self) -> None:
# Iterate through the XF Format objects and give them an index to
# non-default font elements.
fonts = {}
index = 0
for xf_format in self.xf_formats:
key = xf_format._get_font_key()
if key in fonts:
# Font has already been used.
xf_format.font_index = fonts[key]
xf_format.has_font = False
else:
# This is a new font.
fonts[key] = index
xf_format.font_index = index
xf_format.has_font = True
index += 1
self.font_count = index
# For DXF formats we only need to check if the properties have changed.
for xf_format in self.dxf_formats:
# The only font properties that can change for a DXF format are:
# color, bold, italic, underline and strikethrough.
if (
xf_format.font_color
or xf_format.bold
or xf_format.italic
or xf_format.underline
or xf_format.font_strikeout
):
xf_format.has_dxf_font = True
def _prepare_num_formats(self) -> None:
# User defined records in Excel start from index 0xA4.
unique_num_formats = {}
num_formats = []
index = 164
for xf_format in self.xf_formats + self.dxf_formats:
num_format = xf_format.num_format
# Check if num_format is an index to a built-in number format.
if not isinstance(num_format, str):
num_format = int(num_format)
# Number format '0' is indexed as 1 in Excel.
if num_format == 0:
num_format = 1
xf_format.num_format_index = num_format
continue
if num_format == "0":
# Number format '0' is indexed as 1 in Excel.
xf_format.num_format_index = 1
continue
if num_format == "General":
# The 'General' format has an number format index of 0.
xf_format.num_format_index = 0
continue
if num_format in unique_num_formats:
# Number xf_format has already been used.
xf_format.num_format_index = unique_num_formats[num_format]
else:
# Add a new number xf_format.
unique_num_formats[num_format] = index
xf_format.num_format_index = index
index += 1
# Only increase font count for XF formats (not DXF formats).
if xf_format.xf_index:
num_formats.append(num_format)
self.num_formats = num_formats
def _prepare_borders(self) -> None:
# Iterate through the XF Format objects and give them an index to
# non-default border elements.
borders = {}
index = 0
for xf_format in self.xf_formats:
key = xf_format._get_border_key()
if key in borders:
# Border has already been used.
xf_format.border_index = borders[key]
xf_format.has_border = False
else:
# This is a new border.
borders[key] = index
xf_format.border_index = index
xf_format.has_border = True
index += 1
self.border_count = index
# For DXF formats we only need to check if the properties have changed.
has_border = re.compile(r"[^0None:]")
for xf_format in self.dxf_formats:
key = xf_format._get_border_key()
if has_border.search(key):
xf_format.has_dxf_border = True
def _prepare_fills(self) -> None:
# Iterate through the XF Format objects and give them an index to
# non-default fill elements.
# The user defined fill properties start from 2 since there are 2
# default fills: patternType="none" and patternType="gray125".
fills = {}
index = 2 # Start from 2. See above.
# Add the default fills.
fills["0:None:None"] = 0
fills["17:None:None"] = 1
# Store the DXF colors separately since them may be reversed below.
for xf_format in self.dxf_formats:
if xf_format.pattern or xf_format.bg_color or xf_format.fg_color:
xf_format.has_dxf_fill = True
xf_format.dxf_bg_color = xf_format.bg_color
xf_format.dxf_fg_color = xf_format.fg_color
for xf_format in self.xf_formats:
# The following logical statements jointly take care of special
# cases in relation to cell colors and patterns:
# 1. For a solid fill (_pattern == 1) Excel reverses the role of
# foreground and background colors, and
# 2. If the user specifies a foreground or background color
# without a pattern they probably wanted a solid fill, so we fill
# in the defaults.
if xf_format.pattern == 1 and xf_format.bg_color and xf_format.fg_color:
tmp = xf_format.fg_color
xf_format.fg_color = xf_format.bg_color
xf_format.bg_color = tmp
if xf_format.pattern <= 1 and xf_format.bg_color and not xf_format.fg_color:
xf_format.fg_color = xf_format.bg_color
xf_format.bg_color = None
xf_format.pattern = 1
if xf_format.pattern <= 1 and not xf_format.bg_color and xf_format.fg_color:
xf_format.pattern = 1
key = xf_format._get_fill_key()
if key in fills:
# Fill has already been used.
xf_format.fill_index = fills[key]
xf_format.has_fill = False
else:
# This is a new fill.
fills[key] = index
xf_format.fill_index = index
xf_format.has_fill = True
index += 1
self.fill_count = index
def _has_feature_property_bags(self):
# Check for any format properties that require a feature bag. Currently
# this only applies to checkboxes.
if not self.feature_property_bags:
for xf_format in self.formats:
if xf_format.checkbox:
self.feature_property_bags.add("XFComplements")
if xf_format.dxf_index is not None and xf_format.checkbox:
self.feature_property_bags.add("DXFComplements")
return self.feature_property_bags
def _prepare_defined_names(self) -> None:
# Iterate through the worksheets and store any defined names in
# addition to any user defined names. Stores the defined names
# for the Workbook.xml and the named ranges for App.xml.
defined_names = self.defined_names
for sheet in self.worksheets():
# Check for Print Area settings.
if sheet.autofilter_area:
hidden = 1
sheet_range = sheet.autofilter_area
# Store the defined names.
defined_names.append(
["_xlnm._FilterDatabase", sheet.index, sheet_range, hidden]
)
# Check for Print Area settings.
if sheet.print_area_range:
hidden = 0
sheet_range = sheet.print_area_range
# Store the defined names.
defined_names.append(
["_xlnm.Print_Area", sheet.index, sheet_range, hidden]
)
# Check for repeat rows/cols referred to as Print Titles.
if sheet.repeat_col_range or sheet.repeat_row_range:
hidden = 0
sheet_range = ""
if sheet.repeat_col_range and sheet.repeat_row_range:
sheet_range = sheet.repeat_col_range + "," + sheet.repeat_row_range
else:
sheet_range = sheet.repeat_col_range + sheet.repeat_row_range
# Store the defined names.
defined_names.append(
["_xlnm.Print_Titles", sheet.index, sheet_range, hidden]
)
defined_names = self._sort_defined_names(defined_names)
self.defined_names = defined_names
self.named_ranges = self._extract_named_ranges(defined_names)
def _sort_defined_names(self, names):
# Sort the list of list of internal and user defined names in
# the same order as used by Excel.
# Add a normalize name string to each list for sorting.
for name_list in names:
(defined_name, _, sheet_name, _) = name_list
# Normalize the defined name by removing any leading '_xmln.'
# from internal names and lowercasing the string.
defined_name = defined_name.replace("_xlnm.", "").lower()
# Normalize the sheetname by removing the leading quote and
# lowercasing the string.
sheet_name = sheet_name.lstrip("'").lower()
name_list.append(defined_name + "::" + sheet_name)
# Sort based on the normalized key.
names.sort(key=operator.itemgetter(4))
# Remove the extra key used for sorting.
for name_list in names:
name_list.pop()
return names
def _prepare_drawings(self) -> None:
# Iterate through the worksheets and set up chart and image drawings.
chart_ref_id = 0
ref_id = 0
drawing_id = 0
image_ids = {}
header_image_ids = {}
background_ids = {}
# Store the image types for any embedded images.
for image in self.embedded_images.images:
image_extension = image._image_extension
self.image_types[image_extension] = True
if image.description is not None:
self.has_embedded_descriptions = True
image_ref_id = len(self.embedded_images.images)
for sheet in self.worksheets():
chart_count = len(sheet.charts)
image_count = len(sheet.images)
shape_count = len(sheet.shapes)
header_image_count = len(sheet.header_images)
footer_image_count = len(sheet.footer_images)
has_background = sheet.background_image
has_drawing = False
if not (
chart_count
or image_count
or shape_count
or header_image_count
or footer_image_count
or has_background
):
continue
# Don't increase the drawing_id header/footer images.
if chart_count or image_count or shape_count:
drawing_id += 1
has_drawing = True
# Prepare the background images.
if sheet.background_image:
image = sheet.background_image
image_extension = image._image_extension
image_digest = image._digest
self.image_types[image_extension] = True
if image_digest in background_ids:
ref_id = background_ids[image_digest]
else:
image_ref_id += 1
ref_id = image_ref_id
background_ids[image_digest] = image_ref_id
self.images.append(image)
sheet._prepare_background(ref_id, image_extension)
# Prepare the worksheet images.
for index in range(image_count):
image = sheet.images[index]
image_extension = image._image_extension
image_digest = image._digest
self.image_types[image_extension] = True
if image_digest in image_ids:
ref_id = image_ids[image_digest]
else:
image_ref_id += 1
ref_id = image_ref_id
image_ids[image_digest] = image_ref_id
self.images.append(image)
sheet._prepare_image(
image,
ref_id,
drawing_id,
)
# Prepare the worksheet charts.
for index in range(chart_count):
chart_ref_id += 1
sheet._prepare_chart(index, chart_ref_id, drawing_id)
# Prepare the worksheet shapes.
for index in range(shape_count):
sheet._prepare_shape(index, drawing_id)
# Prepare the header images.
for image in sheet.header_images:
image_extension = image._image_extension
image_digest = image._digest
self.image_types[image_extension] = True
if image_digest in header_image_ids:
ref_id = header_image_ids[image_digest]
else:
image_ref_id += 1
ref_id = image_ref_id
header_image_ids[image_digest] = image_ref_id
self.images.append(image)
sheet._prepare_header_image(ref_id, image)
# Prepare the footer images.
for image in sheet.footer_images:
image_extension = image._image_extension
image_digest = image._digest
self.image_types[image_extension] = True
if image_digest in header_image_ids:
ref_id = header_image_ids[image_digest]
else:
image_ref_id += 1
ref_id = image_ref_id
header_image_ids[image_digest] = image_ref_id
self.images.append(image)
sheet._prepare_header_image(ref_id, image)
if has_drawing:
drawing = sheet.drawing
self.drawings.append(drawing)
# Remove charts that were created but not inserted into worksheets.
for chart in self.charts[:]:
if chart.id == -1:
self.charts.remove(chart)
# Sort the workbook charts references into the order that the were
# written to the worksheets above.
self.charts = sorted(self.charts, key=lambda chart: chart.id)
self.drawing_count = drawing_id
def _extract_named_ranges(self, defined_names):
# Extract the named ranges from the sorted list of defined names.
# These are used in the App.xml file.
named_ranges = []
for defined_name in defined_names:
name = defined_name[0]
index = defined_name[1]
sheet_range = defined_name[2]
# Skip autoFilter ranges.
if name == "_xlnm._FilterDatabase":
continue
# We are only interested in defined names with ranges.
if "!" in sheet_range:
sheet_name, _ = sheet_range.split("!", 1)
# Match Print_Area and Print_Titles xlnm types.
if name.startswith("_xlnm."):
xlnm_type = name.replace("_xlnm.", "")
name = sheet_name + "!" + xlnm_type
elif index != -1:
name = sheet_name + "!" + name
named_ranges.append(name)
return named_ranges
def _get_sheet_index(self, sheetname):
# Convert a sheet name to its index. Return None otherwise.
sheetname = sheetname.strip("'")
if sheetname in self.sheetnames:
return self.sheetnames[sheetname].index
return None
def _prepare_vml(self) -> None:
# Iterate through the worksheets and set up the VML objects.
comment_id = 0
vml_drawing_id = 0
vml_data_id = 1
vml_header_id = 0
vml_shape_id = 1024
vml_files = 0
comment_files = 0
for sheet in self.worksheets():
if not sheet.has_vml and not sheet.has_header_vml:
continue
vml_files += 1
if sheet.has_vml:
if sheet.has_comments:
comment_files += 1
comment_id += 1
self.has_comments = True
vml_drawing_id += 1
count = sheet._prepare_vml_objects(
vml_data_id, vml_shape_id, vml_drawing_id, comment_id
)
# Each VML should start with a shape id incremented by 1024.
vml_data_id += 1 * int((1024 + count) / 1024)
vml_shape_id += 1024 * int((1024 + count) / 1024)
if sheet.has_header_vml:
vml_header_id += 1
vml_drawing_id += 1
sheet._prepare_header_vml_objects(vml_header_id, vml_drawing_id)
self.num_vml_files = vml_files
self.num_comment_files = comment_files
def _prepare_tables(self) -> None:
# Set the table ids for the worksheet tables.
table_id = 0
seen = {}
for sheet in self.worksheets():
table_count = len(sheet.tables)
if not table_count:
continue
sheet._prepare_tables(table_id + 1, seen)
table_id += table_count
def _prepare_metadata(self) -> None:
# Set the metadata rel link.
self.has_embedded_images = self.embedded_images.has_images()
self.has_metadata = self.has_embedded_images
for sheet in self.worksheets():
if sheet.has_dynamic_arrays:
self.has_metadata = True
self.has_dynamic_functions = True
def _add_chart_data(self) -> None:
# Add "cached" data to charts to provide the numCache and strCache
# data for series and title/axis ranges.
worksheets = {}
seen_ranges = {}
charts = []
# Map worksheet names to worksheet objects.
for worksheet in self.worksheets():
worksheets[worksheet.name] = worksheet
# Build a list of the worksheet charts including any combined charts.
for chart in self.charts:
charts.append(chart)
if chart.combined:
charts.append(chart.combined)
for chart in charts:
for c_range in chart.formula_ids.keys():
r_id = chart.formula_ids[c_range]
# Skip if the series has user defined data.
if chart.formula_data[r_id] is not None:
if c_range not in seen_ranges or seen_ranges[c_range] is None:
data = chart.formula_data[r_id]
seen_ranges[c_range] = data
continue
# Check to see if the data is already cached locally.
if c_range in seen_ranges:
chart.formula_data[r_id] = seen_ranges[c_range]
continue
# Convert the range formula to a sheet name and cell range.
(sheetname, cells) = self._get_chart_range(c_range)
# Skip if we couldn't parse the formula.
if sheetname is None:
continue
# Handle non-contiguous ranges like:
# (Sheet1!$A$1:$A$2,Sheet1!$A$4:$A$5).
# We don't try to parse them. We just return an empty list.
if sheetname.startswith("("):
chart.formula_data[r_id] = []
seen_ranges[c_range] = []
continue
# Warn if the name is unknown since it indicates a user error
# in a chart series formula.
if sheetname not in worksheets:
warn(
f"Unknown worksheet reference '{sheetname}' in range "
f"'{c_range}' passed to add_series()"
)
chart.formula_data[r_id] = []
seen_ranges[c_range] = []
continue
# Find the worksheet object based on the sheet name.
worksheet = worksheets[sheetname]
# Get the data from the worksheet table.
data = worksheet._get_range_data(*cells)
# Add the data to the chart.
chart.formula_data[r_id] = data
# Store range data locally to avoid lookup if seen again.
seen_ranges[c_range] = data
def _get_chart_range(self, c_range):
# Convert a range formula such as Sheet1!$B$1:$B$5 into a sheet name
# and cell range such as ( 'Sheet1', 0, 1, 4, 1 ).
# Split the range formula into sheetname and cells at the last '!'.
pos = c_range.rfind("!")
if pos > 0:
sheetname = c_range[:pos]
cells = c_range[pos + 1 :]
else:
return None, None
# Split the cell range into 2 cells or else use single cell for both.
if cells.find(":") > 0:
(cell_1, cell_2) = cells.split(":", 1)
else:
(cell_1, cell_2) = (cells, cells)
# Remove leading/trailing quotes and convert escaped quotes to single.
sheetname = sheetname.strip("'")
sheetname = sheetname.replace("''", "'")
try:
# Get the row, col values from the Excel ranges. We do this in a
# try block for ranges that can't be parsed such as defined names.
(row_start, col_start) = xl_cell_to_rowcol(cell_1)
(row_end, col_end) = xl_cell_to_rowcol(cell_2)
except AttributeError:
return None, None
# We only handle 1D ranges.
if row_start != row_end and col_start != col_end:
return None, None
return sheetname, [row_start, col_start, row_end, col_end]
def _prepare_sst_string_data(self) -> None:
# Convert the SST string data from a dict to a list.
self.str_table._sort_string_data()
def _get_packager(self):
# Get and instance of the Packager class to create the xlsx package.
# This allows the default packager to be over-ridden.
return Packager()
def _default_column_metrics(self, width: int) -> Tuple[int, int, int]:
# Get default font metrics for a default column width.
#
# This function returns the font metrics (max_digit_width, padding,
# max_col_width) based on the column pixel width for a default font.
#
# To add support for additional fonts and sizes please open a GitHub request
# with an empty sample workbook with one worksheet.
if width == 56:
metrics = (6, 5, 1533)
elif width == 64:
metrics = (7, 5, 1790)
elif width == 72:
metrics = (8, 5, 2043)
elif width == 80:
metrics = (9, 7, 2300)
elif width == 96:
metrics = (11, 7, 2810)
elif width == 104:
metrics = (12, 7, 3065)
elif width == 120:
metrics = (13, 9, 3323)
else:
warn(f"Unsupported default_column_width '{width}'. Using 64 pixels.")
metrics = (7, 5, 1790)
return metrics
def _read_theme_from_file(self, path: Union[str, os.PathLike]) -> str:
# Read theme XML from either a zip file (thmx/xlsx) or a text file.
try:
# Try to read as a thmx/xlsx zip file first.
with zipfile.ZipFile(path, "r") as archive:
possible_paths = [
"theme/theme/theme1.xml", # thmx file.
"xl/theme/theme1.xml", # xlsx file.
]
for theme_path in possible_paths:
try:
with archive.open(theme_path) as theme_file:
theme_xml = theme_file.read().decode("utf-8")
return theme_xml
except KeyError:
continue
raise ThemeFileError(f"No theme1.xml found in file: '{path}'.")
except zipfile.BadZipFile:
try:
# Try reading as a text file if zipfile failed.
with open(path, "r", encoding="utf-8") as f:
return f.read()
except IOError as e:
raise IOError(f"Could not read file '{path}': {e}.") from e
###########################################################################
#
# XML methods.
#
###########################################################################
def _write_workbook(self) -> None:
# Write <workbook> element.
schema = "http://schemas.openxmlformats.org"
xmlns = schema + "/spreadsheetml/2006/main"
xmlns_r = schema + "/officeDocument/2006/relationships"
attributes = [
("xmlns", xmlns),
("xmlns:r", xmlns_r),
]
self._xml_start_tag("workbook", attributes)
def _write_file_version(self) -> None:
# Write the <fileVersion> element.
app_name = "xl"
last_edited = 4
lowest_edited = 4
rup_build = 4505
attributes = [
("appName", app_name),
("lastEdited", last_edited),
("lowestEdited", lowest_edited),
("rupBuild", rup_build),
]
if self.vba_project:
attributes.append(("codeName", "{37E998C4-C9E5-D4B9-71C8-EB1FF731991C}"))
self._xml_empty_tag("fileVersion", attributes)
def _write_file_sharing(self) -> None:
# Write the <fileSharing> element.
if self.read_only == 0:
return
attributes = [("readOnlyRecommended", 1)]
self._xml_empty_tag("fileSharing", attributes)
def _write_workbook_pr(self) -> None:
# Write <workbookPr> element.
attributes = []
if self.vba_codename:
attributes.append(("codeName", self.vba_codename))
if self.date_1904:
attributes.append(("date1904", 1))
if self.default_theme_version:
attributes.append(("defaultThemeVersion", self.default_theme_version))
self._xml_empty_tag("workbookPr", attributes)
def _write_book_views(self) -> None:
# Write <bookViews> element.
self._xml_start_tag("bookViews")
self._write_workbook_view()
self._xml_end_tag("bookViews")
def _write_workbook_view(self) -> None:
# Write <workbookView> element.
attributes = [
("xWindow", self.x_window),
("yWindow", self.y_window),
("windowWidth", self.window_width),
("windowHeight", self.window_height),
]
# Store the tabRatio attribute when it isn't the default.
if self.tab_ratio != 600:
attributes.append(("tabRatio", self.tab_ratio))
# Store the firstSheet attribute when it isn't the default.
if self.worksheet_meta.firstsheet > 0:
firstsheet = self.worksheet_meta.firstsheet + 1
attributes.append(("firstSheet", firstsheet))
# Store the activeTab attribute when it isn't the first sheet.
if self.worksheet_meta.activesheet > 0:
attributes.append(("activeTab", self.worksheet_meta.activesheet))
self._xml_empty_tag("workbookView", attributes)
def _write_sheets(self) -> None:
# Write <sheets> element.
self._xml_start_tag("sheets")
id_num = 1
for worksheet in self.worksheets():
self._write_sheet(worksheet.name, id_num, worksheet.hidden)
id_num += 1
self._xml_end_tag("sheets")
def _write_sheet(self, name, sheet_id, hidden) -> None:
# Write <sheet> element.
attributes = [
("name", name),
("sheetId", sheet_id),
]
if hidden == 1:
attributes.append(("state", "hidden"))
elif hidden == 2:
attributes.append(("state", "veryHidden"))
attributes.append(("r:id", "rId" + str(sheet_id)))
self._xml_empty_tag("sheet", attributes)
def _write_calc_pr(self) -> None:
# Write the <calcPr> element.
attributes = [("calcId", self.calc_id)]
if self.calc_mode == "manual":
attributes.append(("calcMode", self.calc_mode))
attributes.append(("calcOnSave", "0"))
elif self.calc_mode == "autoNoTable":
attributes.append(("calcMode", self.calc_mode))
if self.calc_on_load:
attributes.append(("fullCalcOnLoad", "1"))
self._xml_empty_tag("calcPr", attributes)
def _write_defined_names(self) -> None:
# Write the <definedNames> element.
if not self.defined_names:
return
self._xml_start_tag("definedNames")
for defined_name in self.defined_names:
self._write_defined_name(defined_name)
self._xml_end_tag("definedNames")
def _write_defined_name(self, defined_name) -> None:
# Write the <definedName> element.
name = defined_name[0]
sheet_id = defined_name[1]
sheet_range = defined_name[2]
hidden = defined_name[3]
attributes = [("name", name)]
if sheet_id != -1:
attributes.append(("localSheetId", sheet_id))
if hidden:
attributes.append(("hidden", 1))
self._xml_data_element("definedName", sheet_range, attributes)
# A metadata class to share data between worksheets.
| Workbook |
python | realpython__materials | python-namedtuple/subclass.py | {
"start": 155,
"end": 532
} | class ____(BasePerson):
"""A namedtuple subclass to hold a person's data."""
__slots__ = ()
def __str__(self):
return f"Name: {self.name}, age: {self.age} years old."
@property
def age(self):
return (date.today() - self.birthdate).days // 365
print(Person.__doc__)
jane = Person("Jane", date(1996, 3, 5))
print(jane.age)
print(jane)
| Person |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/asset_selection.py | {
"start": 35922,
"end": 37030
} | class ____(ChainedAssetSelection):
depth: Optional[int]
include_self: bool
def resolve_inner(
self, asset_graph: BaseAssetGraph, allow_missing: bool
) -> AbstractSet[AssetKey]:
selection = self.child.resolve_inner(asset_graph, allow_missing=allow_missing)
return operator.sub(
(
selection
| fetch_connected(
selection, asset_graph.asset_dep_graph, direction="downstream", depth=self.depth
)
),
selection if not self.include_self else set(),
)
def to_selection_str(self) -> str:
if self.depth is None:
base = f"{self.child.operand_to_selection_str()}+"
elif self.depth == 0:
base = self.child.operand_to_selection_str()
else:
base = f"{self.child.operand_to_selection_str()}{'+'}{self.depth}"
if self.include_self:
return base
else:
return f"{base} and not {self.child.operand_to_selection_str()}"
@whitelist_for_serdes
@record
| DownstreamAssetSelection |
python | huggingface__transformers | src/transformers/models/auto/configuration_auto.py | {
"start": 38179,
"end": 40390
} | class ____(OrderedDict[str, type[PreTrainedConfig]]):
"""
A dictionary that lazily load its values when they are requested.
"""
def __init__(self, mapping) -> None:
self._mapping = mapping
self._extra_content = {}
self._modules = {}
def __getitem__(self, key: str) -> type[PreTrainedConfig]:
if key in self._extra_content:
return self._extra_content[key]
if key not in self._mapping:
raise KeyError(key)
value = self._mapping[key]
module_name = model_type_to_module_name(key)
if module_name not in self._modules:
self._modules[module_name] = importlib.import_module(f".{module_name}", "transformers.models")
if hasattr(self._modules[module_name], value):
return getattr(self._modules[module_name], value)
# Some of the mappings have entries model_type -> config of another model type. In that case we try to grab the
# object at the top level.
transformers_module = importlib.import_module("transformers")
return getattr(transformers_module, value)
def keys(self) -> list[str]:
return list(self._mapping.keys()) + list(self._extra_content.keys())
def values(self) -> list[type[PreTrainedConfig]]:
return [self[k] for k in self._mapping] + list(self._extra_content.values())
def items(self) -> list[tuple[str, type[PreTrainedConfig]]]:
return [(k, self[k]) for k in self._mapping] + list(self._extra_content.items())
def __iter__(self) -> Iterator[str]:
return iter(list(self._mapping.keys()) + list(self._extra_content.keys()))
def __contains__(self, item: object) -> bool:
return item in self._mapping or item in self._extra_content
def register(self, key: str, value: type[PreTrainedConfig], exist_ok=False) -> None:
"""
Register a new configuration in this mapping.
"""
if key in self._mapping and not exist_ok:
raise ValueError(f"'{key}' is already used by a Transformers config, pick another name.")
self._extra_content[key] = value
CONFIG_MAPPING = _LazyConfigMapping(CONFIG_MAPPING_NAMES)
| _LazyConfigMapping |
python | pytorch__pytorch | test/functorch/test_vmap.py | {
"start": 49170,
"end": 51978
} | class ____:
class TestVmapBase(TestCase):
def __init__(self, method_name="runTest"):
super().__init__(method_name)
test_method = getattr(self, method_name, None)
if test_method is None:
return
if not should_allow_vmap_fallback_usage(test_method):
setattr(
self,
method_name,
self._wrap_method_with_vmap_fallback_check(test_method),
)
def _wrap_method_with_vmap_fallback_check(self, method):
# msg = (
# 'Expected the test to not invoke the vmap fallback path, i.e., '
# 'all of the operators being tested in this test should have batching '
# 'rules implemented. If you are intentionally testing something to '
# 'do with the fallback path, use allowVmapFallbackUsage. Otherwise, '
# 'please make sure that batching rules are implemented for the '
# 'operator(s) being tested.'
# )
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
with EnableVmapFallbackWarnings():
method(*args, **kwargs)
# for captured_warning in wa:
# self.assertNotRegex(str(captured_warning.message), FALLBACK_REGEX, msg)
return types.MethodType(wrapper, self)
@allowVmapFallbackUsage
def test_vmap_fallback_check_ok(self):
# One day we'll implement a batching rule for torch.var_mean.
# When that happens, please change the example to use an
# operator that doesn't have a batching rule implemented.
op_using_fallback = torch.var_mean
vmap(op_using_fallback)(torch.rand(3))
@unittest.expectedFailure
def test_vmap_fallback_check(self):
@self._wrap_method_with_vmap_fallback_check
def no_fallback(self):
pass
# One day we'll implement a batching rule for torch.var_mean.
# When that happens, please change the example to use an
# operator that doesn't have a batching rule implemented.
op_using_fallback = torch.var_mean
@self._wrap_method_with_vmap_fallback_check
def uses_fallback(self):
vmap(op_using_fallback)(torch.rand(3))
no_fallback(self)
with self.assertRaises(AssertionError):
uses_fallback(self)
def _make_case(op, input_getter=TensorFactory.randn):
return (op, input_getter)
@markDynamoStrictTest
| Namespace |
python | scipy__scipy | scipy/linalg/tests/test_lapack.py | {
"start": 33535,
"end": 56255
} | class ____:
@pytest.mark.parametrize('complex_dtype', COMPLEX_DTYPES)
def test_hetrd_with_zero_dim_array(self, complex_dtype):
# Assert that a 0x0 matrix raises an error
A = np.zeros((0, 0), dtype=complex_dtype)
hetrd = get_lapack_funcs('hetrd', (A,))
assert_raises(ValueError, hetrd, A)
@pytest.mark.parametrize('real_dtype,complex_dtype',
zip(REAL_DTYPES, COMPLEX_DTYPES))
@pytest.mark.parametrize('n', (1, 3))
def test_hetrd(self, n, real_dtype, complex_dtype):
A = np.zeros((n, n), dtype=complex_dtype)
hetrd, hetrd_lwork = \
get_lapack_funcs(('hetrd', 'hetrd_lwork'), (A,))
# some upper triangular array
A[np.triu_indices_from(A)] = (
np.arange(1, n*(n+1)//2+1, dtype=real_dtype)
+ 1j * np.arange(1, n*(n+1)//2+1, dtype=real_dtype)
)
np.fill_diagonal(A, np.real(np.diag(A)))
# test query lwork
for x in [0, 1]:
_, info = hetrd_lwork(n, lower=x)
assert_equal(info, 0)
# lwork returns complex which segfaults hetrd call (gh-10388)
# use the safe and recommended option
lwork = _compute_lwork(hetrd_lwork, n)
# check lower=1 behavior (shouldn't do much since the matrix is
# upper triangular)
data, d, e, tau, info = hetrd(A, lower=1, lwork=lwork)
assert_equal(info, 0)
assert_allclose(data, A, atol=5*np.finfo(real_dtype).eps, rtol=1.0)
assert_allclose(d, np.real(np.diag(A)))
assert_allclose(e, 0.0)
assert_allclose(tau, 0.0)
# and now for the proper test (lower=0 is the default)
data, d, e, tau, info = hetrd(A, lwork=lwork)
assert_equal(info, 0)
# assert Q^T*A*Q = tridiag(e, d, e)
# build tridiagonal matrix
T = np.zeros_like(A, dtype=real_dtype)
k = np.arange(A.shape[0], dtype=int)
T[k, k] = d
k2 = np.arange(A.shape[0]-1, dtype=int)
T[k2+1, k2] = e
T[k2, k2+1] = e
# build Q
Q = np.eye(n, n, dtype=complex_dtype)
for i in range(n-1):
v = np.zeros(n, dtype=complex_dtype)
v[:i] = data[:i, i+1]
v[i] = 1.0
H = np.eye(n, n, dtype=complex_dtype) \
- tau[i] * np.outer(v, np.conj(v))
Q = np.dot(H, Q)
# Make matrix fully Hermitian
i_lower = np.tril_indices(n, -1)
A[i_lower] = np.conj(A.T[i_lower])
QHAQ = np.dot(np.conj(Q.T), np.dot(A, Q))
# disable rtol here since some values in QTAQ and T are very close
# to 0.
assert_allclose(
QHAQ, T, atol=10*np.finfo(real_dtype).eps, rtol=1.0
)
def test_gglse():
# Example data taken from NAG manual
for ind, dtype in enumerate(DTYPES):
# DTYPES = <s,d,c,z> gglse
func, func_lwork = get_lapack_funcs(('gglse', 'gglse_lwork'),
dtype=dtype)
lwork = _compute_lwork(func_lwork, m=6, n=4, p=2)
# For <s,d>gglse
if ind < 2:
a = np.array([[-0.57, -1.28, -0.39, 0.25],
[-1.93, 1.08, -0.31, -2.14],
[2.30, 0.24, 0.40, -0.35],
[-1.93, 0.64, -0.66, 0.08],
[0.15, 0.30, 0.15, -2.13],
[-0.02, 1.03, -1.43, 0.50]], dtype=dtype)
c = np.array([-1.50, -2.14, 1.23, -0.54, -1.68, 0.82], dtype=dtype)
d = np.array([0., 0.], dtype=dtype)
# For <s,d>gglse
else:
a = np.array([[0.96-0.81j, -0.03+0.96j, -0.91+2.06j, -0.05+0.41j],
[-0.98+1.98j, -1.20+0.19j, -0.66+0.42j, -0.81+0.56j],
[0.62-0.46j, 1.01+0.02j, 0.63-0.17j, -1.11+0.60j],
[0.37+0.38j, 0.19-0.54j, -0.98-0.36j, 0.22-0.20j],
[0.83+0.51j, 0.20+0.01j, -0.17-0.46j, 1.47+1.59j],
[1.08-0.28j, 0.20-0.12j, -0.07+1.23j, 0.26+0.26j]])
c = np.array([[-2.54+0.09j],
[1.65-2.26j],
[-2.11-3.96j],
[1.82+3.30j],
[-6.41+3.77j],
[2.07+0.66j]])
d = np.zeros(2, dtype=dtype)
b = np.array([[1., 0., -1., 0.], [0., 1., 0., -1.]], dtype=dtype)
_, _, _, result, _ = func(a, b, c, d, lwork=lwork)
if ind < 2:
expected = np.array([0.48904455,
0.99754786,
0.48904455,
0.99754786])
else:
expected = np.array([1.08742917-1.96205783j,
-0.74093902+3.72973919j,
1.08742917-1.96205759j,
-0.74093896+3.72973895j])
assert_array_almost_equal(result, expected, decimal=4)
def test_sycon_hecon():
rng = np.random.default_rng(1234)
for ind, dtype in enumerate(DTYPES+COMPLEX_DTYPES):
# DTYPES + COMPLEX DTYPES = <s,d,c,z> sycon + <c,z>hecon
n = 10
# For <s,d,c,z>sycon
if ind < 4:
func_lwork = get_lapack_funcs('sytrf_lwork', dtype=dtype)
funcon, functrf = get_lapack_funcs(('sycon', 'sytrf'), dtype=dtype)
A = (rng.random((n, n))).astype(dtype)
# For <c,z>hecon
else:
func_lwork = get_lapack_funcs('hetrf_lwork', dtype=dtype)
funcon, functrf = get_lapack_funcs(('hecon', 'hetrf'), dtype=dtype)
A = (rng.random((n, n)) + rng.random((n, n))*1j).astype(dtype)
# Since sycon only refers to upper/lower part, conj() is safe here.
A = (A + A.conj().T)/2 + 2*np.eye(n, dtype=dtype)
anorm = norm(A, 1)
lwork = _compute_lwork(func_lwork, n)
ldu, ipiv, _ = functrf(A, lwork=lwork, lower=1)
rcond, _ = funcon(a=ldu, ipiv=ipiv, anorm=anorm, lower=1)
# The error is at most 1-fold
assert_(abs(1/rcond - np.linalg.cond(A, p=1))*rcond < 1)
def test_sygst():
rng = np.random.default_rng(1234)
for ind, dtype in enumerate(REAL_DTYPES):
# DTYPES = <s,d> sygst
n = 10
potrf, sygst, syevd, sygvd = get_lapack_funcs(('potrf', 'sygst',
'syevd', 'sygvd'),
dtype=dtype)
A = rng.random((n, n)).astype(dtype)
A = (A + A.T)/2
# B must be positive definite
B = rng.random((n, n)).astype(dtype)
B = (B + B.T)/2 + 2 * np.eye(n, dtype=dtype)
# Perform eig (sygvd)
eig_gvd, _, info = sygvd(A, B)
assert_(info == 0)
# Convert to std problem potrf
b, info = potrf(B)
assert_(info == 0)
a, info = sygst(A, b)
assert_(info == 0)
eig, _, info = syevd(a)
assert_(info == 0)
assert_allclose(eig, eig_gvd, rtol=1.2e-4)
def test_hegst():
rng = np.random.default_rng(1234)
for ind, dtype in enumerate(COMPLEX_DTYPES):
# DTYPES = <c,z> hegst
n = 10
potrf, hegst, heevd, hegvd = get_lapack_funcs(('potrf', 'hegst',
'heevd', 'hegvd'),
dtype=dtype)
A = rng.random((n, n)).astype(dtype) + 1j * rng.random((n, n)).astype(dtype)
A = (A + A.conj().T)/2
# B must be positive definite
B = rng.random((n, n)).astype(dtype) + 1j * rng.random((n, n)).astype(dtype)
B = (B + B.conj().T)/2 + 2 * np.eye(n, dtype=dtype)
# Perform eig (hegvd)
eig_gvd, _, info = hegvd(A, B)
assert_(info == 0)
# Convert to std problem potrf
b, info = potrf(B)
assert_(info == 0)
a, info = hegst(A, b)
assert_(info == 0)
eig, _, info = heevd(a)
assert_(info == 0)
assert_allclose(eig, eig_gvd, rtol=1e-4)
def test_tzrzf():
"""
This test performs an RZ decomposition in which an m x n upper trapezoidal
array M (m <= n) is factorized as M = [R 0] * Z where R is upper triangular
and Z is unitary.
"""
rng = np.random.RandomState(1234)
m, n = 10, 15
for ind, dtype in enumerate(DTYPES):
tzrzf, tzrzf_lw = get_lapack_funcs(('tzrzf', 'tzrzf_lwork'),
dtype=dtype)
lwork = _compute_lwork(tzrzf_lw, m, n)
if ind < 2:
A = triu(rng.rand(m, n).astype(dtype))
else:
A = triu((rng.rand(m, n) + rng.rand(m, n)*1j).astype(dtype))
# assert wrong shape arg, f2py returns generic error
assert_raises(Exception, tzrzf, A.T)
rz, tau, info = tzrzf(A, lwork=lwork)
# Check success
assert_(info == 0)
# Get Z manually for comparison
R = np.hstack((rz[:, :m], np.zeros((m, n-m), dtype=dtype)))
V = np.hstack((np.eye(m, dtype=dtype), rz[:, m:]))
Id = np.eye(n, dtype=dtype)
ref = [Id-tau[x]*V[[x], :].T.dot(V[[x], :].conj()) for x in range(m)]
Z = reduce(np.dot, ref)
assert_allclose(R.dot(Z) - A, zeros_like(A, dtype=dtype),
atol=10*np.spacing(dtype(1.0).real), rtol=0.)
def test_tfsm():
"""
Test for solving a linear system with the coefficient matrix is a
triangular array stored in Full Packed (RFP) format.
"""
rng = np.random.RandomState(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A = triu(rng.rand(n, n) + rng.rand(n, n)*1j + eye(n)).astype(dtype)
trans = 'C'
else:
A = triu(rng.rand(n, n) + eye(n)).astype(dtype)
trans = 'T'
trttf, tfttr, tfsm = get_lapack_funcs(('trttf', 'tfttr', 'tfsm'),
dtype=dtype)
Afp, _ = trttf(A)
B = rng.rand(n, 2).astype(dtype)
soln = tfsm(-1, Afp, B)
assert_array_almost_equal(soln, solve(-A, B),
decimal=4 if ind % 2 == 0 else 6)
soln = tfsm(-1, Afp, B, trans=trans)
assert_array_almost_equal(soln, solve(-A.conj().T, B),
decimal=4 if ind % 2 == 0 else 6)
# Make A, unit diagonal
A[np.arange(n), np.arange(n)] = dtype(1.)
soln = tfsm(-1, Afp, B, trans=trans, diag='U')
assert_array_almost_equal(soln, solve(-A.conj().T, B),
decimal=4 if ind % 2 == 0 else 6)
# Change side
B2 = rng.rand(3, n).astype(dtype)
soln = tfsm(-1, Afp, B2, trans=trans, diag='U', side='R')
assert_array_almost_equal(soln, solve(-A, B2.T).conj().T,
decimal=4 if ind % 2 == 0 else 6)
def test_ormrz_unmrz():
"""
This test performs a matrix multiplication with an arbitrary m x n matrix C
and a unitary matrix Q without explicitly forming the array. The array data
is encoded in the rectangular part of A which is obtained from ?TZRZF. Q
size is inferred by m, n, side keywords.
"""
rng = np.random.RandomState(1234)
qm, qn, cn = 10, 15, 15
for ind, dtype in enumerate(DTYPES):
tzrzf, tzrzf_lw = get_lapack_funcs(('tzrzf', 'tzrzf_lwork'),
dtype=dtype)
lwork_rz = _compute_lwork(tzrzf_lw, qm, qn)
if ind < 2:
A = triu(rng.random((qm, qn)).astype(dtype))
C = rng.random((cn, cn)).astype(dtype)
orun_mrz, orun_mrz_lw = get_lapack_funcs(('ormrz', 'ormrz_lwork'),
dtype=dtype)
else:
A = triu((rng.random((qm, qn)) + rng.random((qm, qn))*1j).astype(dtype))
C = (rng.random((cn, cn)) + rng.random((cn, cn))*1j).astype(dtype)
orun_mrz, orun_mrz_lw = get_lapack_funcs(('unmrz', 'unmrz_lwork'),
dtype=dtype)
lwork_mrz = _compute_lwork(orun_mrz_lw, cn, cn)
rz, tau, info = tzrzf(A, lwork=lwork_rz)
# Get Q manually for comparison
V = np.hstack((np.eye(qm, dtype=dtype), rz[:, qm:]))
Id = np.eye(qn, dtype=dtype)
ref = [Id-tau[x]*V[[x], :].T.dot(V[[x], :].conj()) for x in range(qm)]
Q = reduce(np.dot, ref)
# Now that we have Q, we can test whether lapack results agree with
# each case of CQ, CQ^H, QC, and QC^H
trans = 'T' if ind < 2 else 'C'
tol = 10*np.spacing(dtype(1.0).real)
cq, info = orun_mrz(rz, tau, C, lwork=lwork_mrz)
assert_(info == 0)
assert_allclose(cq - Q.dot(C), zeros_like(C), atol=tol, rtol=0.)
cq, info = orun_mrz(rz, tau, C, trans=trans, lwork=lwork_mrz)
assert_(info == 0)
assert_allclose(cq - Q.conj().T.dot(C), zeros_like(C), atol=tol,
rtol=0.)
cq, info = orun_mrz(rz, tau, C, side='R', lwork=lwork_mrz)
assert_(info == 0)
assert_allclose(cq - C.dot(Q), zeros_like(C), atol=tol, rtol=0.)
cq, info = orun_mrz(rz, tau, C, side='R', trans=trans, lwork=lwork_mrz)
assert_(info == 0)
assert_allclose(cq - C.dot(Q.conj().T), zeros_like(C), atol=tol,
rtol=0.)
def test_tfttr_trttf():
"""
Test conversion routines between the Rectangular Full Packed (RFP) format
and Standard Triangular Array (TR)
"""
rng = np.random.RandomState(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A_full = (rng.rand(n, n) + rng.rand(n, n)*1j).astype(dtype)
transr = 'C'
else:
A_full = (rng.rand(n, n)).astype(dtype)
transr = 'T'
trttf, tfttr = get_lapack_funcs(('trttf', 'tfttr'), dtype=dtype)
A_tf_U, info = trttf(A_full)
assert_(info == 0)
A_tf_L, info = trttf(A_full, uplo='L')
assert_(info == 0)
A_tf_U_T, info = trttf(A_full, transr=transr, uplo='U')
assert_(info == 0)
A_tf_L_T, info = trttf(A_full, transr=transr, uplo='L')
assert_(info == 0)
# Create the RFP array manually (n is even!)
A_tf_U_m = zeros((n+1, n//2), dtype=dtype)
A_tf_U_m[:-1, :] = triu(A_full)[:, n//2:]
A_tf_U_m[n//2+1:, :] += triu(A_full)[:n//2, :n//2].conj().T
A_tf_L_m = zeros((n+1, n//2), dtype=dtype)
A_tf_L_m[1:, :] = tril(A_full)[:, :n//2]
A_tf_L_m[:n//2, :] += tril(A_full)[n//2:, n//2:].conj().T
assert_array_almost_equal(A_tf_U, A_tf_U_m.reshape(-1, order='F'))
assert_array_almost_equal(A_tf_U_T,
A_tf_U_m.conj().T.reshape(-1, order='F'))
assert_array_almost_equal(A_tf_L, A_tf_L_m.reshape(-1, order='F'))
assert_array_almost_equal(A_tf_L_T,
A_tf_L_m.conj().T.reshape(-1, order='F'))
# Get the original array from RFP
A_tr_U, info = tfttr(n, A_tf_U)
assert_(info == 0)
A_tr_L, info = tfttr(n, A_tf_L, uplo='L')
assert_(info == 0)
A_tr_U_T, info = tfttr(n, A_tf_U_T, transr=transr, uplo='U')
assert_(info == 0)
A_tr_L_T, info = tfttr(n, A_tf_L_T, transr=transr, uplo='L')
assert_(info == 0)
assert_array_almost_equal(A_tr_U, triu(A_full))
assert_array_almost_equal(A_tr_U_T, triu(A_full))
assert_array_almost_equal(A_tr_L, tril(A_full))
assert_array_almost_equal(A_tr_L_T, tril(A_full))
def test_tpttr_trttp():
"""
Test conversion routines between the Rectangular Full Packed (RFP) format
and Standard Triangular Array (TR)
"""
rng = np.random.RandomState(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A_full = (rng.rand(n, n) + rng.rand(n, n)*1j).astype(dtype)
else:
A_full = (rng.rand(n, n)).astype(dtype)
trttp, tpttr = get_lapack_funcs(('trttp', 'tpttr'), dtype=dtype)
A_tp_U, info = trttp(A_full)
assert_(info == 0)
A_tp_L, info = trttp(A_full, uplo='L')
assert_(info == 0)
# Create the TP array manually
inds = tril_indices(n)
A_tp_U_m = zeros(n*(n+1)//2, dtype=dtype)
A_tp_U_m[:] = (triu(A_full).T)[inds]
inds = triu_indices(n)
A_tp_L_m = zeros(n*(n+1)//2, dtype=dtype)
A_tp_L_m[:] = (tril(A_full).T)[inds]
assert_array_almost_equal(A_tp_U, A_tp_U_m)
assert_array_almost_equal(A_tp_L, A_tp_L_m)
# Get the original array from TP
A_tr_U, info = tpttr(n, A_tp_U)
assert_(info == 0)
A_tr_L, info = tpttr(n, A_tp_L, uplo='L')
assert_(info == 0)
assert_array_almost_equal(A_tr_U, triu(A_full))
assert_array_almost_equal(A_tr_L, tril(A_full))
def test_pftrf():
"""
Test Cholesky factorization of a positive definite Rectangular Full
Packed (RFP) format array
"""
rng = np.random.RandomState(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A = (rng.rand(n, n) + rng.rand(n, n)*1j).astype(dtype)
A = A + A.conj().T + n*eye(n)
else:
A = (rng.rand(n, n)).astype(dtype)
A = A + A.T + n*eye(n)
pftrf, trttf, tfttr = get_lapack_funcs(('pftrf', 'trttf', 'tfttr'),
dtype=dtype)
# Get the original array from TP
Afp, info = trttf(A)
Achol_rfp, info = pftrf(n, Afp)
assert_(info == 0)
A_chol_r, _ = tfttr(n, Achol_rfp)
Achol = cholesky(A)
assert_array_almost_equal(A_chol_r, Achol)
def test_pftri():
"""
Test Cholesky factorization of a positive definite Rectangular Full
Packed (RFP) format array to find its inverse
"""
rng = np.random.RandomState(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A = (rng.rand(n, n) + rng.rand(n, n)*1j).astype(dtype)
A = A + A.conj().T + n*eye(n)
else:
A = (rng.rand(n, n)).astype(dtype)
A = A + A.T + n*eye(n)
pftri, pftrf, trttf, tfttr = get_lapack_funcs(('pftri',
'pftrf',
'trttf',
'tfttr'),
dtype=dtype)
# Get the original array from TP
Afp, info = trttf(A)
A_chol_rfp, info = pftrf(n, Afp)
A_inv_rfp, info = pftri(n, A_chol_rfp)
assert_(info == 0)
A_inv_r, _ = tfttr(n, A_inv_rfp)
Ainv = inv(A)
assert_array_almost_equal(A_inv_r, triu(Ainv),
decimal=4 if ind % 2 == 0 else 6)
def test_pftrs():
"""
Test Cholesky factorization of a positive definite Rectangular Full
Packed (RFP) format array and solve a linear system
"""
rng = np.random.RandomState(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A = (rng.rand(n, n) + rng.rand(n, n)*1j).astype(dtype)
A = A + A.conj().T + n*eye(n)
else:
A = (rng.rand(n, n)).astype(dtype)
A = A + A.T + n*eye(n)
B = ones((n, 3), dtype=dtype)
Bf1 = ones((n+2, 3), dtype=dtype)
Bf2 = ones((n-2, 3), dtype=dtype)
pftrs, pftrf, trttf, tfttr = get_lapack_funcs(('pftrs',
'pftrf',
'trttf',
'tfttr'),
dtype=dtype)
# Get the original array from TP
Afp, info = trttf(A)
A_chol_rfp, info = pftrf(n, Afp)
# larger B arrays shouldn't segfault
soln, info = pftrs(n, A_chol_rfp, Bf1)
assert_(info == 0)
assert_raises(Exception, pftrs, n, A_chol_rfp, Bf2)
soln, info = pftrs(n, A_chol_rfp, B)
assert_(info == 0)
assert_array_almost_equal(solve(A, B), soln,
decimal=4 if ind % 2 == 0 else 6)
def test_sfrk_hfrk():
"""
Test for performing a symmetric rank-k operation for matrix in RFP format.
"""
rng = np.random.RandomState(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A = (rng.rand(n, n) + rng.rand(n, n)*1j).astype(dtype)
A = A + A.conj().T + n*eye(n)
else:
A = (rng.rand(n, n)).astype(dtype)
A = A + A.T + n*eye(n)
prefix = 's'if ind < 2 else 'h'
trttf, tfttr, shfrk = get_lapack_funcs(('trttf', 'tfttr', f'{prefix}frk'),
dtype=dtype)
Afp, _ = trttf(A)
C = rng.rand(n, 2).astype(dtype)
Afp_out = shfrk(n, 2, -1, C, 2, Afp)
A_out, _ = tfttr(n, Afp_out)
assert_array_almost_equal(A_out, triu(-C.dot(C.conj().T) + 2*A),
decimal=4 if ind % 2 == 0 else 6)
def test_syconv():
"""
Test for going back and forth between the returned format of he/sytrf to
L and D factors/permutations.
"""
rng = np.random.RandomState(1234)
for ind, dtype in enumerate(DTYPES):
n = 10
if ind > 1:
A = (rng.randint(-30, 30, (n, n)) +
rng.randint(-30, 30, (n, n))*1j).astype(dtype)
A = A + A.conj().T
else:
A = rng.randint(-30, 30, (n, n)).astype(dtype)
A = A + A.T + n*eye(n)
tol = 100*np.spacing(dtype(1.0).real)
syconv, trf, trf_lwork = get_lapack_funcs(('syconv', 'sytrf',
'sytrf_lwork'), dtype=dtype)
lw = _compute_lwork(trf_lwork, n, lower=1)
L, D, perm = ldl(A, lower=1, hermitian=False)
lw = _compute_lwork(trf_lwork, n, lower=1)
ldu, ipiv, info = trf(A, lower=1, lwork=lw)
a, e, info = syconv(ldu, ipiv, lower=1)
assert_allclose(tril(a, -1,), tril(L[perm, :], -1), atol=tol, rtol=0.)
# Test also upper
U, D, perm = ldl(A, lower=0, hermitian=False)
ldu, ipiv, info = trf(A, lower=0)
a, e, info = syconv(ldu, ipiv, lower=0)
assert_allclose(triu(a, 1), triu(U[perm, :], 1), atol=tol, rtol=0.)
| TestHetrd |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/redefined_slots_in_subclass.py | {
"start": 210,
"end": 259
} | class ____(Parent):
__slots__ = ("c", "a")
| Child |
python | celery__celery | t/unit/utils/test_deprecated.py | {
"start": 86,
"end": 1591
} | class ____:
@patch('celery.utils.deprecated.warn')
def test_deprecated(self, warn):
class X:
_foo = None
@deprecated.Property(deprecation='1.2')
def foo(self):
return self._foo
@foo.setter
def foo(self, value):
self._foo = value
@foo.deleter
def foo(self):
self._foo = None
assert X.foo
assert X.foo.__set__(None, 1)
assert X.foo.__delete__(None)
x = X()
x.foo = 10
warn.assert_called_with(
stacklevel=3, deprecation='1.2', alternative=None,
description='foo', removal=None,
)
warn.reset_mock()
assert x.foo == 10
warn.assert_called_with(
stacklevel=3, deprecation='1.2', alternative=None,
description='foo', removal=None,
)
warn.reset_mock()
del (x.foo)
warn.assert_called_with(
stacklevel=3, deprecation='1.2', alternative=None,
description='foo', removal=None,
)
assert x._foo is None
def test_deprecated_no_setter_or_deleter(self):
class X:
@deprecated.Property(deprecation='1.2')
def foo(self):
pass
assert X.foo
x = X()
with pytest.raises(AttributeError):
x.foo = 10
with pytest.raises(AttributeError):
del (x.foo)
| test_deprecated_property |
python | anthropics__anthropic-sdk-python | src/anthropic/resources/beta/models.py | {
"start": 835,
"end": 6013
} | class ____(SyncAPIResource):
@cached_property
def with_raw_response(self) -> ModelsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#accessing-raw-response-data-eg-headers
"""
return ModelsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> ModelsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#with_streaming_response
"""
return ModelsWithStreamingResponse(self)
def retrieve(
self,
model_id: str,
*,
betas: List[AnthropicBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> BetaModelInfo:
"""
Get a specific model.
The Models API response can be used to determine information about a specific
model or resolve a model alias to a model ID.
Args:
model_id: Model identifier or alias.
betas: Optional header to specify the beta version(s) you want to use.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not model_id:
raise ValueError(f"Expected a non-empty value for `model_id` but received {model_id!r}")
extra_headers = {
**strip_not_given({"anthropic-beta": ",".join(str(e) for e in betas) if is_given(betas) else not_given}),
**(extra_headers or {}),
}
return self._get(
f"/v1/models/{model_id}?beta=true",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=BetaModelInfo,
)
def list(
self,
*,
after_id: str | Omit = omit,
before_id: str | Omit = omit,
limit: int | Omit = omit,
betas: List[AnthropicBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncPage[BetaModelInfo]:
"""
List available models.
The Models API response can be used to determine which models are available for
use in the API. More recently released models are listed first.
Args:
after_id: ID of the object to use as a cursor for pagination. When provided, returns the
page of results immediately after this object.
before_id: ID of the object to use as a cursor for pagination. When provided, returns the
page of results immediately before this object.
limit: Number of items to return per page.
Defaults to `20`. Ranges from `1` to `1000`.
betas: Optional header to specify the beta version(s) you want to use.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
extra_headers = {
**strip_not_given({"anthropic-beta": ",".join(str(e) for e in betas) if is_given(betas) else not_given}),
**(extra_headers or {}),
}
return self._get_api_list(
"/v1/models?beta=true",
page=SyncPage[BetaModelInfo],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after_id": after_id,
"before_id": before_id,
"limit": limit,
},
model_list_params.ModelListParams,
),
),
model=BetaModelInfo,
)
| Models |
python | pandas-dev__pandas | pandas/tests/extension/base/casting.py | {
"start": 180,
"end": 3254
} | class ____:
"""Casting to and from ExtensionDtypes"""
def test_astype_object_series(self, all_data):
ser = pd.Series(all_data, name="A")
result = ser.astype(object)
assert result.dtype == np.dtype(object)
if hasattr(result._mgr, "blocks"):
blk = result._mgr.blocks[0]
assert isinstance(blk, NumpyBlock)
assert blk.is_object
assert isinstance(result._mgr.array, np.ndarray)
assert result._mgr.array.dtype == np.dtype(object)
def test_astype_object_frame(self, all_data):
df = pd.DataFrame({"A": all_data})
result = df.astype(object)
if hasattr(result._mgr, "blocks"):
blk = result._mgr.blocks[0]
assert isinstance(blk, NumpyBlock), type(blk)
assert blk.is_object
arr = result._mgr.blocks[0].values
assert isinstance(arr, np.ndarray)
assert arr.dtype == np.dtype(object)
# check that we can compare the dtypes
comp = result.dtypes == df.dtypes
assert not comp.any()
def test_tolist(self, data):
result = pd.Series(data).tolist()
expected = list(data)
assert result == expected
def test_astype_str(self, data):
result = pd.Series(data[:2]).astype(str)
expected = pd.Series([str(x) for x in data[:2]], dtype=str)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"nullable_string_dtype",
[
"string[python]",
pytest.param("string[pyarrow]", marks=td.skip_if_no("pyarrow")),
],
)
def test_astype_string(self, data, nullable_string_dtype):
# GH-33465, GH#45326 as of 2.0 we decode bytes instead of calling str(obj)
def as_str(x):
if isinstance(x, bytes):
return x.decode()
elif x is data.dtype.na_value:
return x
else:
return str(x)
result = pd.Series(data[:5]).astype(nullable_string_dtype)
expected = pd.Series(
[as_str(x) for x in data[:5]],
dtype=nullable_string_dtype,
)
tm.assert_series_equal(result, expected)
def test_to_numpy(self, data):
expected = np.asarray(data)
result = data.to_numpy()
tm.assert_equal(result, expected)
result = pd.Series(data).to_numpy()
tm.assert_equal(result, expected)
def test_astype_empty_dataframe(self, dtype):
# https://github.com/pandas-dev/pandas/issues/33113
df = pd.DataFrame()
result = df.astype(dtype)
tm.assert_frame_equal(result, df)
@pytest.mark.parametrize("copy", [True, False])
def test_astype_own_type(self, data, copy):
# ensure that astype returns the original object for equal dtype and copy=False
# https://github.com/pandas-dev/pandas/issues/28488
result = data.astype(data.dtype, copy=copy)
assert (result is data) is (not copy)
tm.assert_extension_array_equal(result, data)
| BaseCastingTests |
python | doocs__leetcode | solution/2800-2899/2848.Points That Intersect With Cars/Solution2.py | {
"start": 0,
"end": 376
} | class ____:
def numberOfPoints(self, nums: List[List[int]]) -> int:
d = defaultdict(int)
for start, end in nums:
d[start] += 1
d[end + 1] -= 1
ans = s = last = 0
for cur, v in sorted(d.items()):
if s > 0:
ans += cur - last
s += v
last = cur
return ans
| Solution |
python | fastapi__sqlmodel | sqlmodel/main.py | {
"start": 28223,
"end": 37749
} | class ____(BaseModel, metaclass=SQLModelMetaclass, registry=default_registry):
# SQLAlchemy needs to set weakref(s), Pydantic will set the other slots values
__slots__ = ("__weakref__",)
__tablename__: ClassVar[Union[str, Callable[..., str]]]
__sqlmodel_relationships__: ClassVar[Dict[str, RelationshipProperty[Any]]]
__name__: ClassVar[str]
metadata: ClassVar[MetaData]
__allow_unmapped__ = True # https://docs.sqlalchemy.org/en/20/changelog/migration_20.html#migration-20-step-six
if IS_PYDANTIC_V2:
model_config = SQLModelConfig(from_attributes=True)
else:
class Config:
orm_mode = True
def __new__(cls, *args: Any, **kwargs: Any) -> Any:
new_object = super().__new__(cls)
# SQLAlchemy doesn't call __init__ on the base class when querying from DB
# Ref: https://docs.sqlalchemy.org/en/14/orm/constructors.html
# Set __fields_set__ here, that would have been set when calling __init__
# in the Pydantic model so that when SQLAlchemy sets attributes that are
# added (e.g. when querying from DB) to the __fields_set__, this already exists
init_pydantic_private_attrs(new_object)
return new_object
def __init__(__pydantic_self__, **data: Any) -> None:
# Uses something other than `self` the first arg to allow "self" as a
# settable attribute
# SQLAlchemy does very dark black magic and modifies the __init__ method in
# sqlalchemy.orm.instrumentation._generate_init()
# so, to make SQLAlchemy work, it's needed to explicitly call __init__ to
# trigger all the SQLAlchemy logic, it doesn't work using cls.__new__, setting
# attributes obj.__dict__, etc. The __init__ method has to be called. But
# there are cases where calling all the default logic is not ideal, e.g.
# when calling Model.model_validate(), as the validation is done outside
# of instance creation.
# At the same time, __init__ is what users would normally call, by creating
# a new instance, which should have validation and all the default logic.
# So, to be able to set up the internal SQLAlchemy logic alone without
# executing the rest, and support things like Model.model_validate(), we
# use a contextvar to know if we should execute everything.
if finish_init.get():
sqlmodel_init(self=__pydantic_self__, data=data)
def __setattr__(self, name: str, value: Any) -> None:
if name in {"_sa_instance_state"}:
self.__dict__[name] = value
return
else:
# Set in SQLAlchemy, before Pydantic to trigger events and updates
if is_table_model_class(self.__class__) and is_instrumented(self, name): # type: ignore[no-untyped-call]
set_attribute(self, name, value)
# Set in Pydantic model to trigger possible validation changes, only for
# non relationship values
if name not in self.__sqlmodel_relationships__:
super().__setattr__(name, value)
def __repr_args__(self) -> Sequence[Tuple[Optional[str], Any]]:
# Don't show SQLAlchemy private attributes
return [
(k, v)
for k, v in super().__repr_args__()
if not (isinstance(k, str) and k.startswith("_sa_"))
]
@declared_attr # type: ignore
def __tablename__(cls) -> str:
return cls.__name__.lower()
@classmethod
def model_validate( # type: ignore[override]
cls: Type[_TSQLModel],
obj: Any,
*,
strict: Union[bool, None] = None,
from_attributes: Union[bool, None] = None,
context: Union[Dict[str, Any], None] = None,
update: Union[Dict[str, Any], None] = None,
) -> _TSQLModel:
return sqlmodel_validate(
cls=cls,
obj=obj,
strict=strict,
from_attributes=from_attributes,
context=context,
update=update,
)
def model_dump(
self,
*,
mode: Union[Literal["json", "python"], str] = "python",
include: Union[IncEx, None] = None,
exclude: Union[IncEx, None] = None,
context: Union[Any, None] = None, # v2.7
by_alias: Union[bool, None] = None,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
exclude_computed_fields: bool = False, # v2.12
round_trip: bool = False,
warnings: Union[bool, Literal["none", "warn", "error"]] = True,
fallback: Union[Callable[[Any], Any], None] = None, # v2.11
serialize_as_any: bool = False, # v2.7
) -> Dict[str, Any]:
if PYDANTIC_MINOR_VERSION < (2, 11):
by_alias = by_alias or False
extra_kwargs: Dict[str, Any] = {}
if PYDANTIC_MINOR_VERSION >= (2, 7):
extra_kwargs["context"] = context
extra_kwargs["serialize_as_any"] = serialize_as_any
if PYDANTIC_MINOR_VERSION >= (2, 11):
extra_kwargs["fallback"] = fallback
if PYDANTIC_MINOR_VERSION >= (2, 12):
extra_kwargs["exclude_computed_fields"] = exclude_computed_fields
if IS_PYDANTIC_V2:
return super().model_dump(
mode=mode,
include=include,
exclude=exclude,
by_alias=by_alias,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
round_trip=round_trip,
warnings=warnings,
**extra_kwargs,
)
else:
return super().dict(
include=include,
exclude=exclude,
by_alias=by_alias or False,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
)
@deprecated(
"""
🚨 `obj.dict()` was deprecated in SQLModel 0.0.14, you should
instead use `obj.model_dump()`.
"""
)
def dict(
self,
*,
include: Union[IncEx, None] = None,
exclude: Union[IncEx, None] = None,
by_alias: bool = False,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
) -> Dict[str, Any]:
return self.model_dump(
include=include,
exclude=exclude,
by_alias=by_alias,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
)
@classmethod
@deprecated(
"""
🚨 `obj.from_orm(data)` was deprecated in SQLModel 0.0.14, you should
instead use `obj.model_validate(data)`.
"""
)
def from_orm(
cls: Type[_TSQLModel], obj: Any, update: Optional[Dict[str, Any]] = None
) -> _TSQLModel:
return cls.model_validate(obj, update=update)
@classmethod
@deprecated(
"""
🚨 `obj.parse_obj(data)` was deprecated in SQLModel 0.0.14, you should
instead use `obj.model_validate(data)`.
"""
)
def parse_obj(
cls: Type[_TSQLModel], obj: Any, update: Optional[Dict[str, Any]] = None
) -> _TSQLModel:
if not IS_PYDANTIC_V2:
obj = cls._enforce_dict_if_root(obj) # type: ignore[attr-defined] # noqa
return cls.model_validate(obj, update=update)
# From Pydantic, override to only show keys from fields, omit SQLAlchemy attributes
@deprecated(
"""
🚨 You should not access `obj._calculate_keys()` directly.
It is only useful for Pydantic v1.X, you should probably upgrade to
Pydantic v2.X.
""",
category=None,
)
def _calculate_keys(
self,
include: Optional[Mapping[Union[int, str], Any]],
exclude: Optional[Mapping[Union[int, str], Any]],
exclude_unset: bool,
update: Optional[Dict[str, Any]] = None,
) -> Optional[AbstractSet[str]]:
return _calculate_keys(
self,
include=include,
exclude=exclude,
exclude_unset=exclude_unset,
update=update,
)
def sqlmodel_update(
self: _TSQLModel,
obj: Union[Dict[str, Any], BaseModel],
*,
update: Union[Dict[str, Any], None] = None,
) -> _TSQLModel:
use_update = (update or {}).copy()
if isinstance(obj, dict):
for key, value in {**obj, **use_update}.items():
if key in get_model_fields(self):
setattr(self, key, value)
elif isinstance(obj, BaseModel):
for key in get_model_fields(obj):
if key in use_update:
value = use_update.pop(key)
else:
value = getattr(obj, key)
setattr(self, key, value)
for remaining_key in use_update:
if remaining_key in get_model_fields(self):
value = use_update.pop(remaining_key)
setattr(self, remaining_key, value)
else:
raise ValueError(
"Can't use sqlmodel_update() with something that "
f"is not a dict or SQLModel or Pydantic model: {obj}"
)
return self
| SQLModel |
python | pandas-dev__pandas | asv_bench/benchmarks/algos/isin.py | {
"start": 3809,
"end": 4297
} | class ____:
params = [
[np.float64, np.int64, np.uint64, np.object_],
[
1_000,
2_000,
8_000,
100_000,
1_000_000,
],
]
param_names = ["dtype", "size"]
def setup(self, dtype, size):
self.series = Series(np.arange(size)).astype(dtype)
self.values = np.arange(size).astype(dtype)
def time_isin(self, dtype, size):
self.series.isin(self.values)
| IsinWithArangeSorted |
python | urllib3__urllib3 | test/test_connectionpool.py | {
"start": 1405,
"end": 22778
} | class ____:
"""
Tests in this suite should exercise the ConnectionPool functionality
without actually making any network requests or connections.
"""
@pytest.mark.parametrize(
"a, b",
[
("http://google.com/", "/"),
("http://google.com/", "http://google.com/"),
("http://google.com/", "http://google.com"),
("http://google.com/", "http://google.com/abra/cadabra"),
("http://google.com:42/", "http://google.com:42/abracadabra"),
# Test comparison using default ports
("http://google.com:80/", "http://google.com/abracadabra"),
("http://google.com/", "http://google.com:80/abracadabra"),
("https://google.com:443/", "https://google.com/abracadabra"),
("https://google.com/", "https://google.com:443/abracadabra"),
(
"http://[2607:f8b0:4005:805::200e%25eth0]/",
"http://[2607:f8b0:4005:805::200e%eth0]/",
),
(
"https://[2607:f8b0:4005:805::200e%25eth0]:443/",
"https://[2607:f8b0:4005:805::200e%eth0]:443/",
),
("http://[::1]/", "http://[::1]"),
(
"http://[2001:558:fc00:200:f816:3eff:fef9:b954%lo]/",
"http://[2001:558:fc00:200:f816:3eff:fef9:b954%25lo]",
),
],
)
def test_same_host(self, a: str, b: str) -> None:
with connection_from_url(a) as c:
assert c.is_same_host(b)
@pytest.mark.parametrize(
"a, b",
[
("https://google.com/", "http://google.com/"),
("http://google.com/", "https://google.com/"),
("http://yahoo.com/", "http://google.com/"),
("http://google.com:42", "https://google.com/abracadabra"),
("http://google.com", "https://google.net/"),
# Test comparison with default ports
("http://google.com:42", "http://google.com"),
("https://google.com:42", "https://google.com"),
("http://google.com:443", "http://google.com"),
("https://google.com:80", "https://google.com"),
("http://google.com:443", "https://google.com"),
("https://google.com:80", "http://google.com"),
("https://google.com:443", "http://google.com"),
("http://google.com:80", "https://google.com"),
# Zone identifiers are unique connection end points and should
# never be equivalent.
("http://[dead::beef]", "https://[dead::beef%en5]/"),
],
)
def test_not_same_host(self, a: str, b: str) -> None:
with connection_from_url(a) as c:
assert not c.is_same_host(b)
with connection_from_url(b) as c:
assert not c.is_same_host(a)
@pytest.mark.parametrize(
"a, b",
[
("google.com", "/"),
("google.com", "http://google.com/"),
("google.com", "http://google.com"),
("google.com", "http://google.com/abra/cadabra"),
# Test comparison using default ports
("google.com", "http://google.com:80/abracadabra"),
],
)
def test_same_host_no_port_http(self, a: str, b: str) -> None:
# This test was introduced in #801 to deal with the fact that urllib3
# never initializes ConnectionPool objects with port=None.
with HTTPConnectionPool(a) as c:
assert c.is_same_host(b)
@pytest.mark.parametrize(
"a, b",
[
("google.com", "/"),
("google.com", "https://google.com/"),
("google.com", "https://google.com"),
("google.com", "https://google.com/abra/cadabra"),
# Test comparison using default ports
("google.com", "https://google.com:443/abracadabra"),
],
)
def test_same_host_no_port_https(self, a: str, b: str) -> None:
# This test was introduced in #801 to deal with the fact that urllib3
# never initializes ConnectionPool objects with port=None.
with HTTPSConnectionPool(a) as c:
assert c.is_same_host(b)
@pytest.mark.parametrize(
"a, b",
[
("google.com", "https://google.com/"),
("yahoo.com", "http://google.com/"),
("google.com", "https://google.net/"),
("google.com", "http://google.com./"),
],
)
def test_not_same_host_no_port_http(self, a: str, b: str) -> None:
with HTTPConnectionPool(a) as c:
assert not c.is_same_host(b)
with HTTPConnectionPool(b) as c:
assert not c.is_same_host(a)
@pytest.mark.parametrize(
"a, b",
[
("google.com", "http://google.com/"),
("yahoo.com", "https://google.com/"),
("google.com", "https://google.net/"),
("google.com", "https://google.com./"),
],
)
def test_not_same_host_no_port_https(self, a: str, b: str) -> None:
with HTTPSConnectionPool(a) as c:
assert not c.is_same_host(b)
with HTTPSConnectionPool(b) as c:
assert not c.is_same_host(a)
@pytest.mark.parametrize(
"a, b",
[
("%2Fvar%2Frun%2Fdocker.sock", "http+unix://%2Fvar%2Frun%2Fdocker.sock"),
("%2Fvar%2Frun%2Fdocker.sock", "http+unix://%2Fvar%2Frun%2Fdocker.sock/"),
(
"%2Fvar%2Frun%2Fdocker.sock",
"http+unix://%2Fvar%2Frun%2Fdocker.sock/abracadabra",
),
("%2Ftmp%2FTEST.sock", "http+unix://%2Ftmp%2FTEST.sock"),
("%2Ftmp%2FTEST.sock", "http+unix://%2Ftmp%2FTEST.sock/"),
("%2Ftmp%2FTEST.sock", "http+unix://%2Ftmp%2FTEST.sock/abracadabra"),
],
)
def test_same_host_custom_protocol(self, a: str, b: str) -> None:
with HTTPUnixConnectionPool(a) as c:
assert c.is_same_host(b)
@pytest.mark.parametrize(
"a, b",
[
("%2Ftmp%2Ftest.sock", "http+unix://%2Ftmp%2FTEST.sock"),
("%2Ftmp%2Ftest.sock", "http+unix://%2Ftmp%2FTEST.sock/"),
("%2Ftmp%2Ftest.sock", "http+unix://%2Ftmp%2FTEST.sock/abracadabra"),
("%2Fvar%2Frun%2Fdocker.sock", "http+unix://%2Ftmp%2FTEST.sock"),
],
)
def test_not_same_host_custom_protocol(self, a: str, b: str) -> None:
with HTTPUnixConnectionPool(a) as c:
assert not c.is_same_host(b)
def test_max_connections(self) -> None:
with HTTPConnectionPool(host="localhost", maxsize=1, block=True) as pool:
pool._get_conn(timeout=SHORT_TIMEOUT)
with pytest.raises(EmptyPoolError):
pool._get_conn(timeout=SHORT_TIMEOUT)
with pytest.raises(EmptyPoolError):
pool.request("GET", "/", pool_timeout=SHORT_TIMEOUT)
assert pool.num_connections == 1
def test_put_conn_when_pool_is_full_nonblocking(
self, caplog: pytest.LogCaptureFixture
) -> None:
"""
If maxsize = n and we _put_conn n + 1 conns, the n + 1th conn will
get closed and will not get added to the pool.
"""
with HTTPConnectionPool(host="localhost", maxsize=1, block=False) as pool:
conn1 = pool._get_conn()
# pool.pool is empty because we popped the one None that pool.pool was initialized with
# but this pool._get_conn call will not raise EmptyPoolError because block is False
conn2 = pool._get_conn()
with patch.object(conn1, "close") as conn1_close:
with patch.object(conn2, "close") as conn2_close:
pool._put_conn(conn1)
pool._put_conn(conn2)
assert conn1_close.called is False
assert conn2_close.called is True
assert conn1 == pool._get_conn()
assert conn2 != pool._get_conn()
assert pool.num_connections == 3
assert "Connection pool is full, discarding connection" in caplog.text
assert "Connection pool size: 1" in caplog.text
def test_put_conn_when_pool_is_full_blocking(self) -> None:
"""
If maxsize = n and we _put_conn n + 1 conns, the n + 1th conn will
cause a FullPoolError.
"""
with HTTPConnectionPool(host="localhost", maxsize=1, block=True) as pool:
conn1 = pool._get_conn()
conn2 = pool._new_conn()
with patch.object(conn1, "close") as conn1_close:
with patch.object(conn2, "close") as conn2_close:
pool._put_conn(conn1)
with pytest.raises(FullPoolError):
pool._put_conn(conn2)
assert conn1_close.called is False
assert conn2_close.called is True
assert conn1 == pool._get_conn()
def test_put_conn_closed_pool(self) -> None:
with HTTPConnectionPool(host="localhost", maxsize=1, block=True) as pool:
conn1 = pool._get_conn()
with patch.object(conn1, "close") as conn1_close:
pool.close()
assert pool.pool is None
# Accessing pool.pool will raise AttributeError, which will get
# caught and will close conn1
pool._put_conn(conn1)
assert conn1_close.called is True
def test_exception_str(self) -> None:
assert (
str(EmptyPoolError(HTTPConnectionPool(host="localhost"), "Test."))
== "HTTPConnectionPool(host='localhost', port=None): Test."
)
def test_retry_exception_str(self) -> None:
assert (
str(MaxRetryError(HTTPConnectionPool(host="localhost"), "Test.", None))
== "HTTPConnectionPool(host='localhost', port=None): "
"Max retries exceeded with url: Test. (Caused by None)"
)
err = SocketError("Test")
# using err.__class__ here, as socket.error is an alias for OSError
# since Py3.3 and gets printed as this
assert (
str(MaxRetryError(HTTPConnectionPool(host="localhost"), "Test.", err))
== "HTTPConnectionPool(host='localhost', port=None): "
"Max retries exceeded with url: Test. "
"(Caused by %r)" % err
)
def test_pool_size(self) -> None:
POOL_SIZE = 1
with HTTPConnectionPool(
host="localhost", maxsize=POOL_SIZE, block=True
) as pool:
def _test(
exception: type[BaseException],
expect: type[BaseException],
reason: type[BaseException] | None = None,
) -> None:
with patch.object(pool, "_make_request", side_effect=exception()):
with pytest.raises(expect) as excinfo:
pool.request("GET", "/")
if reason is not None:
assert isinstance(excinfo.value.reason, reason) # type: ignore[attr-defined]
assert pool.pool is not None
assert pool.pool.qsize() == POOL_SIZE
# Make sure that all of the exceptions return the connection
# to the pool
_test(BaseSSLError, MaxRetryError, SSLError)
_test(CertificateError, MaxRetryError, SSLError)
# The pool should never be empty, and with these two exceptions
# being raised, a retry will be triggered, but that retry will
# fail, eventually raising MaxRetryError, not EmptyPoolError
# See: https://github.com/urllib3/urllib3/issues/76
with patch.object(pool, "_make_request", side_effect=HTTPException()):
with pytest.raises(MaxRetryError):
pool.request("GET", "/", retries=1, pool_timeout=SHORT_TIMEOUT)
assert pool.pool is not None
assert pool.pool.qsize() == POOL_SIZE
def test_empty_does_not_put_conn(self) -> None:
"""Do not put None back in the pool if the pool was empty"""
with HTTPConnectionPool(host="localhost", maxsize=1, block=True) as pool:
with patch.object(
pool, "_get_conn", side_effect=EmptyPoolError(pool, "Pool is empty")
):
with patch.object(
pool,
"_put_conn",
side_effect=AssertionError("Unexpected _put_conn"),
):
with pytest.raises(EmptyPoolError):
pool.request("GET", "/")
def test_assert_same_host(self) -> None:
with connection_from_url("http://google.com:80") as c:
with pytest.raises(HostChangedError):
c.request("GET", "http://yahoo.com:80", assert_same_host=True)
def test_pool_close(self) -> None:
pool = connection_from_url("http://google.com:80")
# Populate with some connections
conn1 = pool._get_conn()
conn2 = pool._get_conn()
conn3 = pool._get_conn()
pool._put_conn(conn1)
pool._put_conn(conn2)
old_pool_queue = pool.pool
pool.close()
assert pool.pool is None
with pytest.raises(ClosedPoolError):
pool._get_conn()
pool._put_conn(conn3)
with pytest.raises(ClosedPoolError):
pool._get_conn()
with pytest.raises(Empty):
assert old_pool_queue is not None
old_pool_queue.get(block=False)
def test_pool_close_twice(self) -> None:
pool = connection_from_url("http://google.com:80")
# Populate with some connections
conn1 = pool._get_conn()
conn2 = pool._get_conn()
pool._put_conn(conn1)
pool._put_conn(conn2)
pool.close()
assert pool.pool is None
try:
pool.close()
except AttributeError:
pytest.fail("Pool of the ConnectionPool is None and has no attribute get.")
def test_pool_timeouts(self) -> None:
with HTTPConnectionPool(host="localhost") as pool:
conn = pool._new_conn()
assert conn.__class__ == HTTPConnection
assert pool.timeout.__class__ == Timeout
assert pool.timeout._read == _DEFAULT_TIMEOUT
assert pool.timeout._connect == _DEFAULT_TIMEOUT
assert pool.timeout.total is None
pool = HTTPConnectionPool(host="localhost", timeout=SHORT_TIMEOUT)
assert pool.timeout._read == SHORT_TIMEOUT
assert pool.timeout._connect == SHORT_TIMEOUT
assert pool.timeout.total is None
def test_no_host(self) -> None:
with pytest.raises(LocationValueError):
HTTPConnectionPool(None) # type: ignore[arg-type]
def test_contextmanager(self) -> None:
with connection_from_url("http://google.com:80") as pool:
# Populate with some connections
conn1 = pool._get_conn()
conn2 = pool._get_conn()
conn3 = pool._get_conn()
pool._put_conn(conn1)
pool._put_conn(conn2)
old_pool_queue = pool.pool
assert pool.pool is None
with pytest.raises(ClosedPoolError):
pool._get_conn()
pool._put_conn(conn3)
with pytest.raises(ClosedPoolError):
pool._get_conn()
with pytest.raises(Empty):
assert old_pool_queue is not None
old_pool_queue.get(block=False)
def test_url_from_pool(self) -> None:
with connection_from_url("http://google.com:80") as pool:
path = "path?query=foo"
assert f"http://google.com:80/{path}" == _url_from_pool(pool, path)
def test_ca_certs_default_cert_required(self) -> None:
with connection_from_url("https://google.com:80", ca_certs=DEFAULT_CA) as pool:
conn = pool._get_conn()
assert conn.cert_reqs == ssl.CERT_REQUIRED # type: ignore[attr-defined]
def test_cleanup_on_extreme_connection_error(self) -> None:
"""
This test validates that we clean up properly even on exceptions that
we'd not otherwise catch, i.e. those that inherit from BaseException
like KeyboardInterrupt or gevent.Timeout. See #805 for more details.
"""
class RealBad(BaseException):
pass
def kaboom(*args: typing.Any, **kwargs: typing.Any) -> None:
raise RealBad()
with connection_from_url("http://localhost:80") as c:
with patch.object(c, "_make_request", kaboom):
assert c.pool is not None
initial_pool_size = c.pool.qsize()
try:
# We need to release_conn this way or we'd put it away
# regardless.
c.urlopen("GET", "/", release_conn=False)
except RealBad:
pass
new_pool_size = c.pool.qsize()
assert initial_pool_size == new_pool_size
def test_release_conn_param_is_respected_after_http_error_retry(self) -> None:
"""For successful ```urlopen(release_conn=False)```,
the connection isn't released, even after a retry.
This is a regression test for issue #651 [1], where the connection
would be released if the initial request failed, even if a retry
succeeded.
[1] <https://github.com/urllib3/urllib3/issues/651>
"""
class _raise_once_make_request_function:
"""Callable that can mimic `_make_request()`.
Raises the given exception on its first call, but returns a
successful response on subsequent calls.
"""
def __init__(
self, ex: type[BaseException], pool: HTTPConnectionPool
) -> None:
super().__init__()
self._ex: type[BaseException] | None = ex
self._pool = pool
def __call__(
self,
conn: HTTPConnection,
method: str,
url: str,
*args: typing.Any,
retries: Retry,
**kwargs: typing.Any,
) -> HTTPResponse:
if self._ex:
ex, self._ex = self._ex, None
raise ex()
httplib_response = httplib.HTTPResponse(MockSock) # type: ignore[arg-type]
httplib_response.fp = MockChunkedEncodingResponse([b"f", b"o", b"o"]) # type: ignore[assignment]
httplib_response.headers = httplib_response.msg = httplib.HTTPMessage()
response_conn: HTTPConnection | None = kwargs.get("response_conn")
response = HTTPResponse(
body=httplib_response,
headers=httplib_response.headers, # type: ignore[arg-type]
status=httplib_response.status,
version=httplib_response.version,
reason=httplib_response.reason,
original_response=httplib_response,
retries=retries,
request_method=method,
request_url=url,
preload_content=False,
connection=response_conn,
pool=self._pool,
)
return response
def _test(exception: type[BaseException]) -> None:
with HTTPConnectionPool(host="localhost", maxsize=1, block=True) as pool:
# Verify that the request succeeds after two attempts, and that the
# connection is left on the response object, instead of being
# released back into the pool.
with patch.object(
pool,
"_make_request",
_raise_once_make_request_function(exception, pool),
):
response = pool.urlopen(
"GET",
"/",
retries=1,
release_conn=False,
preload_content=False,
chunked=True,
)
assert pool.pool is not None
assert pool.pool.qsize() == 0
assert pool.num_connections == 2
assert response.connection is not None
response.release_conn()
assert pool.pool.qsize() == 1
assert response.connection is None
# Run the test case for all the retriable exceptions.
_test(TimeoutError)
_test(HTTPException)
_test(SocketError)
_test(ProtocolError)
def test_read_timeout_0_does_not_raise_bad_status_line_error(self) -> None:
with HTTPConnectionPool(host="localhost", maxsize=1) as pool:
conn = Mock(spec=HTTPConnection)
# Needed to tell the pool that the connection is alive.
conn.is_closed = False
with patch.object(Timeout, "read_timeout", 0):
timeout = Timeout(1, 1, 1)
with pytest.raises(ReadTimeoutError):
pool._make_request(conn, "", "", timeout=timeout)
| TestConnectionPool |
python | streamlit__streamlit | lib/tests/streamlit/delta_generator_singletons_test.py | {
"start": 950,
"end": 2196
} | class ____(unittest.TestCase):
def test_get_last_dg_added_to_context_stack(self):
last_dg_added_to_context_stack = get_last_dg_added_to_context_stack()
assert last_dg_added_to_context_stack is None
sidebar = st.sidebar
with sidebar:
last_dg_added_to_context_stack = get_last_dg_added_to_context_stack()
assert sidebar == last_dg_added_to_context_stack
last_dg_added_to_context_stack = get_last_dg_added_to_context_stack()
assert sidebar != last_dg_added_to_context_stack
def test_context_dg_stack(self):
dg_stack = context_dg_stack.get()
assert get_default_dg_stack_value() == dg_stack
assert len(dg_stack) == 1
new_dg = DeltaGenerator(
root_container=RootContainer.MAIN,
parent=get_dg_singleton_instance().main_dg,
)
token = context_dg_stack.set((*context_dg_stack.get(), new_dg))
# get the updated dg_stack for current context
dg_stack = context_dg_stack.get()
assert len(dg_stack) == 2
# reset for the other tests
context_dg_stack.reset(token)
dg_stack = context_dg_stack.get()
assert len(dg_stack) == 1
| DeltaGeneratorSingletonsTest |
python | numba__numba | numba/tests/test_struct_ref.py | {
"start": 711,
"end": 1594
} | class ____(structref.StructRefProxy):
def __new__(cls, values, counter):
# Define this method to customize the constructor.
# The default takes `*args`. Customizing allow the use of keyword-arg.
# The impl of the method calls `StructRefProxy.__new__`
return structref.StructRefProxy.__new__(cls, values, counter)
# The below defines wrappers for attributes and methods manually
@property
def values(self):
return get_values(self)
@values.setter
def values(self, val):
return set_values(self, val)
@property
def counter(self):
return get_counter(self)
def testme(self, arg):
return self.values * arg + self.counter
@property
def prop(self):
return self.values, self.counter
def __hash__(self):
return compute_fields(self)
@structref.register
| MyStruct |
python | plotly__plotly.py | plotly/graph_objs/barpolar/_legendgrouptitle.py | {
"start": 233,
"end": 2946
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "barpolar"
_path_str = "barpolar.legendgrouptitle"
_valid_props = {"font", "text"}
@property
def font(self):
"""
Sets this legend group's title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.barpolar.legendgrouptitle.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.barpolar.legendgrouptitle.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def text(self):
"""
Sets the title of the legend group.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def _prop_descriptions(self):
return """\
font
Sets this legend group's title font.
text
Sets the title of the legend group.
"""
def __init__(self, arg=None, font=None, text=None, **kwargs):
"""
Construct a new Legendgrouptitle object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.barpolar.Legendgrouptitle`
font
Sets this legend group's title font.
text
Sets the title of the legend group.
Returns
-------
Legendgrouptitle
"""
super().__init__("legendgrouptitle")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.barpolar.Legendgrouptitle
constructor must be a dict or
an instance of :class:`plotly.graph_objs.barpolar.Legendgrouptitle`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("font", arg, font)
self._set_property("text", arg, text)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Legendgrouptitle |
python | django__django | tests/fixtures/tests.py | {
"start": 1731,
"end": 3787
} | class ____:
def _dumpdata_assert(
self,
args,
output,
format="json",
filename=None,
natural_foreign_keys=False,
natural_primary_keys=False,
use_base_manager=False,
exclude_list=[],
primary_keys="",
):
new_io = StringIO()
filename = filename and os.path.join(tempfile.gettempdir(), filename)
management.call_command(
"dumpdata",
*args,
format=format,
stdout=new_io,
stderr=new_io,
output=filename,
use_natural_foreign_keys=natural_foreign_keys,
use_natural_primary_keys=natural_primary_keys,
use_base_manager=use_base_manager,
exclude=exclude_list,
primary_keys=primary_keys,
)
if filename:
file_root, file_ext = os.path.splitext(filename)
compression_formats = {
".bz2": (open, file_root),
".gz": (gzip.open, filename),
".lzma": (open, file_root),
".xz": (open, file_root),
".zip": (open, file_root),
}
if HAS_BZ2:
compression_formats[".bz2"] = (bz2.open, filename)
if HAS_LZMA:
compression_formats[".lzma"] = (lzma.open, filename)
compression_formats[".xz"] = (lzma.open, filename)
try:
open_method, file_path = compression_formats[file_ext]
except KeyError:
open_method, file_path = open, filename
with open_method(file_path, "rt") as f:
command_output = f.read()
os.remove(file_path)
else:
command_output = new_io.getvalue().strip()
if format == "json":
self.assertJSONEqual(command_output, output)
elif format == "xml":
self.assertXMLEqual(command_output, output)
else:
self.assertEqual(command_output, output)
| DumpDataAssertMixin |
python | modin-project__modin | modin/core/dataframe/algebra/default2pandas/list.py | {
"start": 898,
"end": 1327
} | class ____(SeriesDefault):
"""Builder for default-to-pandas methods which is executed under list accessor."""
@classmethod
def frame_wrapper(cls, df):
"""
Get list accessor of the passed frame.
Parameters
----------
df : pandas.DataFrame
Returns
-------
pandas.core.arrays.arrow.ListAccessor
"""
return df.squeeze(axis=1).list
| ListDefault |
python | kubernetes-client__python | kubernetes/client/models/v1_policy_rules_with_subjects.py | {
"start": 383,
"end": 6921
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'non_resource_rules': 'list[V1NonResourcePolicyRule]',
'resource_rules': 'list[V1ResourcePolicyRule]',
'subjects': 'list[FlowcontrolV1Subject]'
}
attribute_map = {
'non_resource_rules': 'nonResourceRules',
'resource_rules': 'resourceRules',
'subjects': 'subjects'
}
def __init__(self, non_resource_rules=None, resource_rules=None, subjects=None, local_vars_configuration=None): # noqa: E501
"""V1PolicyRulesWithSubjects - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._non_resource_rules = None
self._resource_rules = None
self._subjects = None
self.discriminator = None
if non_resource_rules is not None:
self.non_resource_rules = non_resource_rules
if resource_rules is not None:
self.resource_rules = resource_rules
self.subjects = subjects
@property
def non_resource_rules(self):
"""Gets the non_resource_rules of this V1PolicyRulesWithSubjects. # noqa: E501
`nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb and the target non-resource URL. # noqa: E501
:return: The non_resource_rules of this V1PolicyRulesWithSubjects. # noqa: E501
:rtype: list[V1NonResourcePolicyRule]
"""
return self._non_resource_rules
@non_resource_rules.setter
def non_resource_rules(self, non_resource_rules):
"""Sets the non_resource_rules of this V1PolicyRulesWithSubjects.
`nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb and the target non-resource URL. # noqa: E501
:param non_resource_rules: The non_resource_rules of this V1PolicyRulesWithSubjects. # noqa: E501
:type: list[V1NonResourcePolicyRule]
"""
self._non_resource_rules = non_resource_rules
@property
def resource_rules(self):
"""Gets the resource_rules of this V1PolicyRulesWithSubjects. # noqa: E501
`resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the target resource. At least one of `resourceRules` and `nonResourceRules` has to be non-empty. # noqa: E501
:return: The resource_rules of this V1PolicyRulesWithSubjects. # noqa: E501
:rtype: list[V1ResourcePolicyRule]
"""
return self._resource_rules
@resource_rules.setter
def resource_rules(self, resource_rules):
"""Sets the resource_rules of this V1PolicyRulesWithSubjects.
`resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the target resource. At least one of `resourceRules` and `nonResourceRules` has to be non-empty. # noqa: E501
:param resource_rules: The resource_rules of this V1PolicyRulesWithSubjects. # noqa: E501
:type: list[V1ResourcePolicyRule]
"""
self._resource_rules = resource_rules
@property
def subjects(self):
"""Gets the subjects of this V1PolicyRulesWithSubjects. # noqa: E501
subjects is the list of normal user, serviceaccount, or group that this rule cares about. There must be at least one member in this slice. A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. Required. # noqa: E501
:return: The subjects of this V1PolicyRulesWithSubjects. # noqa: E501
:rtype: list[FlowcontrolV1Subject]
"""
return self._subjects
@subjects.setter
def subjects(self, subjects):
"""Sets the subjects of this V1PolicyRulesWithSubjects.
subjects is the list of normal user, serviceaccount, or group that this rule cares about. There must be at least one member in this slice. A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. Required. # noqa: E501
:param subjects: The subjects of this V1PolicyRulesWithSubjects. # noqa: E501
:type: list[FlowcontrolV1Subject]
"""
if self.local_vars_configuration.client_side_validation and subjects is None: # noqa: E501
raise ValueError("Invalid value for `subjects`, must not be `None`") # noqa: E501
self._subjects = subjects
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1PolicyRulesWithSubjects):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1PolicyRulesWithSubjects):
return True
return self.to_dict() != other.to_dict()
| V1PolicyRulesWithSubjects |
python | pypa__warehouse | warehouse/accounts/models.py | {
"start": 10319,
"end": 11011
} | class ____(db.Model):
__tablename__ = "user_terms_of_service_engagements"
__table_args__ = (
Index(
"user_terms_of_service_engagements_user_id_revision_idx",
"user_id",
"revision",
),
)
__repr__ = make_repr("user_id")
user_id: Mapped[UUID] = mapped_column(
ForeignKey("users.id", onupdate="CASCADE", ondelete="CASCADE"),
)
revision: Mapped[str]
created: Mapped[datetime.datetime] = mapped_column(TZDateTime)
engagement: Mapped[TermsOfServiceEngagement]
user: Mapped[User] = orm.relationship(
lazy=True, back_populates="terms_of_service_engagements"
)
| UserTermsOfServiceEngagement |
python | facebook__pyre-check | client/tests/find_directories_test.py | {
"start": 19121,
"end": 21165
} | class ____(testslide.TestCase):
def assert_typeshed_roots(
self, relative_directories: Iterable[str], expected_roots: Iterable[str]
) -> None:
self.maxDiff = None
with tempfile.TemporaryDirectory() as root:
root_path = Path(root)
ensure_directories_exists(root_path, relative_directories)
self.assertListEqual(
find_typeshed_search_paths(root_path),
[root_path / subdirectory for subdirectory in expected_roots],
)
def test_find_typeshed_search_paths__no_third_party(self) -> None:
self.assert_typeshed_roots(
relative_directories=["stdlib"],
expected_roots=["stdlib"],
)
def test_find_typeshed_search_paths__empty_third_party(self) -> None:
self.assert_typeshed_roots(
relative_directories=["stdlib", "stubs"],
expected_roots=["stdlib"],
)
def test_find_typeshed_search_paths__with_standard_stubs(self) -> None:
self.assert_typeshed_roots(
relative_directories=["stdlib", "stubs/foo/foo", "stubs/bar/bar"],
expected_roots=[
"stdlib",
"stubs/bar",
"stubs/foo",
],
)
def test_find_typeshed_search_paths__with_combined_stubs(self) -> None:
self.assert_typeshed_roots(
relative_directories=["stdlib", "combined_stubs/foo", "combined_stubs/bar"],
expected_roots=[
"stdlib",
"combined_stubs",
],
)
def test_find_typeshed_search_paths__with_both_stubs(self) -> None:
self.assert_typeshed_roots(
relative_directories=[
"stdlib",
"stubs/foo/foo",
"stubs/bar/bar",
"combined_stubs/foo",
"combined_stubs/bar",
],
expected_roots=[
"stdlib",
"combined_stubs",
],
)
| FindTypeshedTest |
python | gevent__gevent | src/gevent/_config.py | {
"start": 8867,
"end": 9407
} | class ____(_PositiveValueMixin):
_MULTIPLES = {
# All keys must be the same size.
'kb': 1024,
'mb': 1024 * 1024,
'gb': 1024 * 1024 * 1024,
}
_SUFFIX_SIZE = 2
def _convert(self, value):
if not value or not isinstance(value, str):
return value
value = value.lower()
for s, m in self._MULTIPLES.items():
if value[-self._SUFFIX_SIZE:] == s:
return int(value[:-self._SUFFIX_SIZE]) * m
return int(value)
| ByteCountSettingMixin |
python | google__flatbuffers | tests/flatc/flatc_kotlin_tests.py | {
"start": 624,
"end": 1333
} | class ____:
def EnumValAttributes(self):
flatc(["--kotlin", "enum_val_attributes.fbs"])
subject = assert_file_exists("ValAttributes.kt")
assert_file_doesnt_contains(
subject, 'val names : Array<String> = arrayOf("Val1", "Val2", "Val3")'
)
assert_file_doesnt_contains(subject, "fun name(e: Int) : String = names[e]")
def EnumValAttributes_ReflectNames(self):
flatc(["--kotlin", "--reflect-names", "enum_val_attributes.fbs"])
subject = assert_file_exists("ValAttributes.kt")
assert_file_contains(
subject, 'val names : Array<String> = arrayOf("Val1", "Val2", "Val3")'
)
assert_file_contains(subject, "fun name(e: Int) : String = names[e]")
| KotlinTests |
python | spyder-ide__spyder | spyder/plugins/explorer/widgets/explorer.py | {
"start": 4222,
"end": 5782
} | class ____(QStyledItemDelegate):
def __init__(self, parent):
super().__init__(parent)
self._project_dir = ""
def set_project_dir(self, project_dir):
self._project_dir = project_dir
def initStyleOption(self, option, index):
"""
To change the item icon when expanding a folder.
From https://stackoverflow.com/a/48531349/438386
"""
super().initStyleOption(option, index)
if isinstance(option, QStyleOptionViewItem):
model = index.model()
if isinstance(model, QSortFilterProxyModel):
# This is necessary for Projects because it has a proxy model
is_dir = model.sourceModel().isDir(model.mapToSource(index))
else:
is_dir = model.isDir(index)
if is_dir:
# This is necessary because Projects has a root directory and
# we want to set a different icon for it.
if isinstance(model, QSortFilterProxyModel):
dir_path = model.sourceModel().filePath(
model.mapToSource(index)
)
else:
dir_path = None
if dir_path == self._project_dir:
option.icon = ima.icon("project_spyder")
elif (option.state & QStyle.State_Open):
option.icon = ima.icon("DirOpenIcon")
# ---- Widgets
# ----------------------------------------------------------------------------
| DirViewItemDelegate |
python | PyCQA__pylint | pylint/pyreverse/plantuml_printer.py | {
"start": 501,
"end": 3578
} | class ____(Printer):
"""Printer for PlantUML diagrams."""
DEFAULT_COLOR = "black"
NODES: dict[NodeType, str] = {
NodeType.CLASS: "class",
NodeType.PACKAGE: "package",
}
ARROWS: dict[EdgeType, str] = {
EdgeType.INHERITS: "--|>",
EdgeType.ASSOCIATION: "-->",
EdgeType.COMPOSITION: "--*",
EdgeType.AGGREGATION: "--o",
EdgeType.USES: "-->",
EdgeType.TYPE_DEPENDENCY: "..>",
}
def _open_graph(self) -> None:
"""Emit the header lines."""
self.emit("@startuml " + self.title)
if not self.use_automatic_namespace:
self.emit("set namespaceSeparator none")
if self.layout:
if self.layout is Layout.LEFT_TO_RIGHT:
self.emit("left to right direction")
elif self.layout is Layout.TOP_TO_BOTTOM:
self.emit("top to bottom direction")
else:
raise ValueError(
f"Unsupported layout {self.layout}. PlantUmlPrinter only "
"supports left to right and top to bottom layout."
)
def emit_node(
self,
name: str,
type_: NodeType,
properties: NodeProperties | None = None,
) -> None:
"""Create a new node.
Nodes can be classes, packages, participants etc.
"""
if properties is None:
properties = NodeProperties(label=name)
nodetype = self.NODES[type_]
if properties.color and properties.color != self.DEFAULT_COLOR:
color = f" #{properties.color.lstrip('#')}"
else:
color = ""
body = []
if properties.attrs:
body.extend(properties.attrs)
if properties.methods:
for func in properties.methods:
args = self._get_method_arguments(func)
line = "{abstract}" if func.is_abstract() else ""
line += f"{func.name}({', '.join(args)})"
if func.returns:
line += " -> " + get_annotation_label(func.returns)
body.append(line)
label = properties.label if properties.label is not None else name
if properties.fontcolor and properties.fontcolor != self.DEFAULT_COLOR:
label = f"<color:{properties.fontcolor}>{label}</color>"
self.emit(f'{nodetype} "{label}" as {name}{color} {{')
self._inc_indent()
for line in body:
self.emit(line)
self._dec_indent()
self.emit("}")
def emit_edge(
self,
from_node: str,
to_node: str,
type_: EdgeType,
label: str | None = None,
) -> None:
"""Create an edge from one node to another to display relationships."""
edge = f"{from_node} {self.ARROWS[type_]} {to_node}"
if label:
edge += f" : {label}"
self.emit(edge)
def _close_graph(self) -> None:
"""Emit the lines needed to properly close the graph."""
self.emit("@enduml")
| PlantUmlPrinter |
python | pypa__hatch | tests/backend/builders/test_binary.py | {
"start": 4836,
"end": 7226
} | class ____:
def test_default_no_source(self, isolation):
config = {"project": {"name": "My.App", "version": "0.1.0"}}
builder = BinaryBuilder(str(isolation), config=config)
assert builder.config.python_version == builder.config.python_version == builder.config.SUPPORTED_VERSIONS[0]
def test_default_explicit_source(self, isolation):
config = {"project": {"name": "My.App", "version": "0.1.0"}}
builder = BinaryBuilder(str(isolation), config=config)
with EnvVars({"PYAPP_DISTRIBUTION_SOURCE": "url"}):
assert builder.config.python_version == builder.config.python_version == ""
def test_set(self, isolation):
config = {
"project": {
"name": "My.App",
"version": "0.1.0",
},
"tool": {"hatch": {"build": {"targets": {"binary": {"python-version": "4.0"}}}}},
}
builder = BinaryBuilder(str(isolation), config=config)
assert builder.config.python_version == "4.0"
def test_not_string(self, isolation):
config = {
"project": {
"name": "My.App",
"version": "0.1.0",
},
"tool": {"hatch": {"build": {"targets": {"binary": {"python-version": 9000}}}}},
}
builder = BinaryBuilder(str(isolation), config=config)
with pytest.raises(TypeError, match="Field `tool.hatch.build.targets.binary.python-version` must be a string"):
_ = builder.config.python_version
def test_compatibility(self, isolation):
config = {
"project": {
"name": "My.App",
"version": "0.1.0",
"requires-python": "<3.11",
},
}
builder = BinaryBuilder(str(isolation), config=config)
assert builder.config.python_version == "3.10"
def test_incompatible(self, isolation):
config = {
"project": {
"name": "My.App",
"version": "0.1.0",
"requires-python": ">9000",
},
}
builder = BinaryBuilder(str(isolation), config=config)
with pytest.raises(
ValueError, match="Field `project.requires-python` is incompatible with the known distributions"
):
_ = builder.config.python_version
| TestPythonVersion |
python | pytorch__pytorch | test/distributed/test_c10d_ucc.py | {
"start": 37239,
"end": 38432
} | class ____(
test_c10d_common.ProcessGroupWithDispatchedCollectivesTests
):
@skip_but_pass_in_sandcastle("Fails on M60")
@requires_ucc()
@skip_if_lt_x_gpu(1)
def test_collectives(self):
# includes reduce, broadcast, all_reduce, all_gather, reduce_scatter, barrier, all_to_all, scatter
self._test_collectives(backend="ucc")
@skip_but_pass_in_sandcastle("Fails on M60")
@requires_ucc()
@skip_if_lt_x_gpu(1)
def test_allgather_base(self):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
"ucc",
world_size=self.world_size,
rank=self.rank,
store=store,
)
device = "cuda"
tensor = torch.ones(10, 10, device=torch.device(device))
output_tensor = torch.zeros(10, 10, device=torch.device(device))
dist.all_gather_into_tensor(output_tensor, tensor)
self.assertEqual(output_tensor, tensor)
if __name__ == "__main__":
assert not torch.cuda._initialized, (
"test_distributed must not have initialized CUDA context on main process"
)
run_tests()
| UccProcessGroupWithDispatchedCollectivesTests |
python | kamyu104__LeetCode-Solutions | Python/match-substring-after-replacement.py | {
"start": 953,
"end": 1437
} | class ____(object):
def matchReplacement(self, s, sub, mappings):
"""
:type s: str
:type sub: str
:type mappings: List[List[str]]
:rtype: bool
"""
def check(i):
return all(sub[j] == s[i+j] or (sub[j], s[i+j]) in lookup for j in xrange(len(sub)))
lookup = set()
for a, b in mappings:
lookup.add((a, b))
return any(check(i) for i in xrange(len(s)-len(sub)+1))
| Solution2 |
python | apache__airflow | providers/apache/hive/src/airflow/providers/apache/hive/hooks/hive.py | {
"start": 2042,
"end": 20992
} | class ____(BaseHook):
"""
Simple wrapper around the hive CLI.
It also supports the ``beeline``
a lighter CLI that runs JDBC and is replacing the heavier
traditional CLI. To enable ``beeline``, set the use_beeline param in the
extra field of your connection as in ``{ "use_beeline": true }``
Note that you can also set default hive CLI parameters by passing ``hive_cli_params``
space separated list of parameters to add to the hive command.
The extra connection parameter ``auth`` gets passed as in the ``jdbc``
connection string as is.
:param hive_cli_conn_id: Reference to the
:ref:`Hive CLI connection id <howto/connection:hive_cli>`.
:param mapred_queue: queue used by the Hadoop Scheduler (Capacity or Fair)
:param mapred_queue_priority: priority within the job queue.
Possible settings include: VERY_HIGH, HIGH, NORMAL, LOW, VERY_LOW
:param mapred_job_name: This name will appear in the jobtracker.
This can make monitoring easier.
:param hive_cli_params: Space separated list of hive command parameters to add to the
hive command.
:param proxy_user: Run HQL code as this user.
"""
conn_name_attr = "hive_cli_conn_id"
default_conn_name = "hive_cli_default"
conn_type = "hive_cli"
hook_name = "Hive Client Wrapper"
def __init__(
self,
hive_cli_conn_id: str = default_conn_name,
mapred_queue: str | None = None,
mapred_queue_priority: str | None = None,
mapred_job_name: str | None = None,
hive_cli_params: str = "",
auth: str | None = None,
proxy_user: str | None = None,
) -> None:
super().__init__()
conn = self.get_connection(hive_cli_conn_id)
self.hive_cli_params: str = hive_cli_params
self.use_beeline: bool = conn.extra_dejson.get("use_beeline", False)
self.auth = auth
self.conn = conn
self.sub_process: Any = None
if mapred_queue_priority:
mapred_queue_priority = mapred_queue_priority.upper()
if mapred_queue_priority not in HIVE_QUEUE_PRIORITIES:
raise AirflowException(
f"Invalid Mapred Queue Priority. Valid values are: {', '.join(HIVE_QUEUE_PRIORITIES)}"
)
self.mapred_queue = mapred_queue or conf.get("hive", "default_hive_mapred_queue")
self.mapred_queue_priority = mapred_queue_priority
self.mapred_job_name = mapred_job_name
self.proxy_user = proxy_user
self.high_availability = self.conn.extra_dejson.get("high_availability", False)
@classmethod
def get_connection_form_widgets(cls) -> dict[str, Any]:
"""Return connection widgets to add to Hive Client Wrapper connection form."""
from flask_appbuilder.fieldwidgets import BS3TextFieldWidget
from flask_babel import lazy_gettext
from wtforms import BooleanField, StringField
return {
"use_beeline": BooleanField(lazy_gettext("Use Beeline"), default=True),
"proxy_user": StringField(lazy_gettext("Proxy User"), widget=BS3TextFieldWidget(), default=""),
"principal": StringField(
lazy_gettext("Principal"), widget=BS3TextFieldWidget(), default="hive/_HOST@EXAMPLE.COM"
),
"high_availability": BooleanField(lazy_gettext("High Availability mode"), default=False),
}
@classmethod
def get_ui_field_behaviour(cls) -> dict[str, Any]:
"""Return custom UI field behaviour for Hive Client Wrapper connection."""
return {
"hidden_fields": ["extra"],
"relabeling": {},
}
def _get_proxy_user(self) -> str:
"""Set the proper proxy_user value in case the user overwrite the default."""
conn = self.conn
if self.proxy_user is not None:
return f"hive.server2.proxy.user={self.proxy_user}"
proxy_user_value: str = conn.extra_dejson.get("proxy_user", "")
if proxy_user_value != "":
return f"hive.server2.proxy.user={proxy_user_value}"
return ""
def _prepare_cli_cmd(self) -> list[Any]:
"""Create the command list from available information."""
conn = self.conn
hive_bin = "hive"
cmd_extra = []
if self.use_beeline:
hive_bin = "beeline"
self._validate_beeline_parameters(conn)
if self.high_availability:
jdbc_url = f"jdbc:hive2://{conn.host}/{conn.schema}"
self.log.info("High Availability selected, setting JDBC url as %s", jdbc_url)
else:
jdbc_url = f"jdbc:hive2://{conn.host}:{conn.port}/{conn.schema}"
self.log.info("High Availability not selected, setting JDBC url as %s", jdbc_url)
if conf.get("core", "security") == "kerberos":
template = conn.extra_dejson.get("principal", "hive/_HOST@EXAMPLE.COM")
if "_HOST" in template:
template = utils.replace_hostname_pattern(utils.get_components(template))
proxy_user = self._get_proxy_user()
if ";" in template:
raise RuntimeError("The principal should not contain the ';' character")
if ";" in proxy_user:
raise RuntimeError("The proxy_user should not contain the ';' character")
jdbc_url += f";principal={template};{proxy_user}"
if self.high_availability:
if not jdbc_url.endswith(";"):
jdbc_url += ";"
jdbc_url += "serviceDiscoveryMode=zooKeeper;ssl=true;zooKeeperNamespace=hiveserver2"
elif self.auth:
jdbc_url += ";auth=" + self.auth
jdbc_url = f'"{jdbc_url}"'
cmd_extra += ["-u", jdbc_url]
if conn.login:
cmd_extra += ["-n", conn.login]
if conn.password:
cmd_extra += ["-p", conn.password]
hive_params_list = self.hive_cli_params.split()
return [hive_bin, *cmd_extra, *hive_params_list]
def _validate_beeline_parameters(self, conn):
if self.high_availability:
if ";" in conn.schema:
raise ValueError(
f"The schema used in beeline command ({conn.schema}) should not contain ';' character)"
)
return
if ":" in conn.host or "/" in conn.host or ";" in conn.host:
raise ValueError(
f"The host used in beeline command ({conn.host}) should not contain ':/;' characters)"
)
try:
int_port = int(conn.port)
if not 0 < int_port <= 65535:
raise ValueError(
f"The port used in beeline command ({conn.port}) should be in range 0-65535)"
)
except (ValueError, TypeError) as e:
raise ValueError(
f"The port used in beeline command ({conn.port}) should be a valid integer: {e})"
)
if ";" in conn.schema:
raise ValueError(
f"The schema used in beeline command ({conn.schema}) should not contain ';' character)"
)
@staticmethod
def _prepare_hiveconf(d: dict[Any, Any]) -> list[Any]:
"""
Prepare a list of hiveconf params from a dictionary of key value pairs.
:param d:
>>> hh = HiveCliHook()
>>> hive_conf = {"hive.exec.dynamic.partition": "true",
... "hive.exec.dynamic.partition.mode": "nonstrict"}
>>> hh._prepare_hiveconf(hive_conf)
["-hiveconf", "hive.exec.dynamic.partition=true",\
"-hiveconf", "hive.exec.dynamic.partition.mode=nonstrict"]
"""
if not d:
return []
return as_flattened_list(zip(["-hiveconf"] * len(d), [f"{k}={v}" for k, v in d.items()]))
def run_cli(
self,
hql: str,
schema: str | None = None,
verbose: bool = True,
hive_conf: dict[Any, Any] | None = None,
) -> Any:
"""
Run an hql statement using the hive cli.
If hive_conf is specified it should be a dict and the entries
will be set as key/value pairs in HiveConf.
:param hql: an hql (hive query language) statement to run with hive cli
:param schema: Name of hive schema (database) to use
:param verbose: Provides additional logging. Defaults to True.
:param hive_conf: if specified these key value pairs will be passed
to hive as ``-hiveconf "key"="value"``. Note that they will be
passed after the ``hive_cli_params`` and thus will override
whatever values are specified in the database.
>>> hh = HiveCliHook()
>>> result = hh.run_cli("USE airflow;")
>>> ("OK" in result)
True
"""
conn = self.conn
schema = schema or conn.schema or ""
invalid_chars_list = re.findall(r"[^a-z0-9_]", schema)
if invalid_chars_list:
invalid_chars = "".join(invalid_chars_list)
raise RuntimeError(f"The schema `{schema}` contains invalid characters: {invalid_chars}")
if schema:
hql = f"USE {schema};\n{hql}"
with TemporaryDirectory(prefix="airflow_hiveop_") as tmp_dir, NamedTemporaryFile(dir=tmp_dir) as f:
hql += "\n"
f.write(hql.encode("UTF-8"))
f.flush()
hive_cmd = self._prepare_cli_cmd()
env_context = get_context_from_env_var()
# Only extend the hive_conf if it is defined.
if hive_conf:
env_context.update(hive_conf)
hive_conf_params = self._prepare_hiveconf(env_context)
if self.mapred_queue:
hive_conf_params.extend(
[
"-hiveconf",
f"mapreduce.job.queuename={self.mapred_queue}",
"-hiveconf",
f"mapred.job.queue.name={self.mapred_queue}",
"-hiveconf",
f"tez.queue.name={self.mapred_queue}",
]
)
if self.mapred_queue_priority:
hive_conf_params.extend(["-hiveconf", f"mapreduce.job.priority={self.mapred_queue_priority}"])
if self.mapred_job_name:
hive_conf_params.extend(["-hiveconf", f"mapred.job.name={self.mapred_job_name}"])
hive_cmd.extend(hive_conf_params)
hive_cmd.extend(["-f", f.name])
if verbose:
self.log.info("%s", " ".join(hive_cmd))
sub_process: Any = subprocess.Popen(
hive_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=tmp_dir, close_fds=True
)
self.sub_process = sub_process
stdout = ""
for line_raw in iter(sub_process.stdout.readline, b""):
line = line_raw.decode()
stdout += line
if verbose:
self.log.info(line.strip())
sub_process.wait()
if sub_process.returncode:
raise AirflowException(stdout)
return stdout
def test_hql(self, hql: str) -> None:
"""Test an hql statement using the hive cli and EXPLAIN."""
create, insert, other = [], [], []
for query in hql.split(";"): # naive
query_lower = query.lower().strip()
if query_lower.startswith("create table"):
create.append(query)
elif query_lower.startswith(("set ", "add jar ", "create temporary function")):
other.append(query)
elif query_lower.startswith("insert"):
insert.append(query)
other_ = ";".join(other)
for query_set in [create, insert]:
for query_item in query_set:
query_preview = " ".join(query_item.split())[:50]
self.log.info("Testing HQL [%s (...)]", query_preview)
if query_set == insert:
query = other_ + "; explain " + query_item
else:
query = "explain " + query_item
try:
self.run_cli(query, verbose=False)
except AirflowException as e:
message = e.args[0].splitlines()[-2]
self.log.info(message)
error_loc = re.search(r"(\d+):(\d+)", message)
if error_loc:
lst = int(error_loc.group(1))
begin = max(lst - 2, 0)
end = min(lst + 3, len(query.splitlines()))
context = "\n".join(query.splitlines()[begin:end])
self.log.info("Context :\n %s", context)
else:
self.log.info("SUCCESS")
def load_df(
self,
df: pd.DataFrame,
table: str,
field_dict: dict[Any, Any] | None = None,
delimiter: str = ",",
encoding: str = "utf8",
pandas_kwargs: Any = None,
**kwargs: Any,
) -> None:
"""
Load a pandas DataFrame into hive.
Hive data types will be inferred if not passed but column names will
not be sanitized.
:param df: DataFrame to load into a Hive table
:param table: target Hive table, use dot notation to target a
specific database
:param field_dict: mapping from column name to hive data type.
Note that Python dict is ordered so it keeps columns' order.
:param delimiter: field delimiter in the file
:param encoding: str encoding to use when writing DataFrame to file
:param pandas_kwargs: passed to DataFrame.to_csv
:param kwargs: passed to self.load_file
"""
def _infer_field_types_from_df(df: pd.DataFrame) -> dict[Any, Any]:
dtype_kind_hive_type = {
"b": "BOOLEAN", # boolean
"i": "BIGINT", # signed integer
"u": "BIGINT", # unsigned integer
"f": "DOUBLE", # floating-point
"c": "STRING", # complex floating-point
"M": "TIMESTAMP", # datetime
"O": "STRING", # object
"S": "STRING", # (byte-)string
"U": "STRING", # Unicode
"V": "STRING", # void
}
order_type = {}
for col, dtype in df.dtypes.items():
order_type[col] = dtype_kind_hive_type[dtype.kind]
return order_type
if pandas_kwargs is None:
pandas_kwargs = {}
with (
TemporaryDirectory(prefix="airflow_hiveop_") as tmp_dir,
NamedTemporaryFile(dir=tmp_dir, mode="w") as f,
):
if field_dict is None:
field_dict = _infer_field_types_from_df(df)
df.to_csv(
path_or_buf=f,
sep=delimiter,
header=False,
index=False,
encoding=encoding,
date_format="%Y-%m-%d %H:%M:%S",
**pandas_kwargs,
)
f.flush()
return self.load_file(
filepath=f.name, table=table, delimiter=delimiter, field_dict=field_dict, **kwargs
)
def load_file(
self,
filepath: str,
table: str,
delimiter: str = ",",
field_dict: dict[Any, Any] | None = None,
create: bool = True,
overwrite: bool = True,
partition: dict[str, Any] | None = None,
recreate: bool = False,
tblproperties: dict[str, Any] | None = None,
) -> None:
"""
Load a local file into Hive.
Note that the table generated in Hive uses ``STORED AS textfile``
which isn't the most efficient serialization format. If a
large amount of data is loaded and/or if the tables gets
queried considerably, you may want to use this operator only to
stage the data into a temporary table before loading it into its
final destination using a ``HiveOperator``.
:param filepath: local filepath of the file to load
:param table: target Hive table, use dot notation to target a
specific database
:param delimiter: field delimiter in the file
:param field_dict: A dictionary of the fields name in the file
as keys and their Hive types as values.
Note that Python dict is ordered so it keeps columns' order.
:param create: whether to create the table if it doesn't exist
:param overwrite: whether to overwrite the data in table or partition
:param partition: target partition as a dict of partition columns
and values
:param recreate: whether to drop and recreate the table at every
execution
:param tblproperties: TBLPROPERTIES of the hive table being created
"""
hql = ""
if recreate:
hql += f"DROP TABLE IF EXISTS {table};\n"
if create or recreate:
if field_dict is None:
raise ValueError("Must provide a field dict when creating a table")
fields = ",\n ".join(f"`{k.strip('`')}` {v}" for k, v in field_dict.items())
hql += f"CREATE TABLE IF NOT EXISTS {table} (\n{fields})\n"
if partition:
pfields = ",\n ".join(p + " STRING" for p in partition)
hql += f"PARTITIONED BY ({pfields})\n"
hql += "ROW FORMAT DELIMITED\n"
hql += f"FIELDS TERMINATED BY '{delimiter}'\n"
hql += "STORED AS textfile\n"
if tblproperties is not None:
tprops = ", ".join(f"'{k}'='{v}'" for k, v in tblproperties.items())
hql += f"TBLPROPERTIES({tprops})\n"
hql += ";"
self.log.info(hql)
self.run_cli(hql)
hql = f"LOAD DATA LOCAL INPATH '{filepath}' "
if overwrite:
hql += "OVERWRITE "
hql += f"INTO TABLE {table} "
if partition:
pvals = ", ".join(f"{k}='{v}'" for k, v in partition.items())
hql += f"PARTITION ({pvals})"
# Add a newline character as a workaround for https://issues.apache.org/jira/browse/HIVE-10541,
hql += ";\n"
self.log.info(hql)
self.run_cli(hql)
def kill(self) -> None:
"""Kill Hive cli command."""
if hasattr(self, "sub_process"):
if self.sub_process.poll() is None:
print("Killing the Hive job")
self.sub_process.terminate()
time.sleep(60)
self.sub_process.kill()
| HiveCliHook |
python | charliermarsh__ruff | crates/ty_python_semantic/resources/corpus/77_class__class__nested.py | {
"start": 0,
"end": 67
} | class ____:
def z(self):
def x():
super()
| Outer |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_translate.py | {
"start": 7840,
"end": 10306
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.translate.TranslationNativeDatasetLink.persist")
@mock.patch("airflow.providers.google.cloud.operators.translate.TranslateHook")
def test_minimal_green_path(self, mock_hook, mock_link_persist):
DS_CREATION_RESULT_SAMPLE = {
"display_name": "",
"example_count": 0,
"name": f"projects/{PROJECT_ID}/locations/{LOCATION}/datasets/{DATASET_ID}",
"source_language_code": "",
"target_language_code": "",
"test_example_count": 0,
"train_example_count": 0,
"validate_example_count": 0,
}
sample_operation = mock.MagicMock()
sample_operation.result.return_value = automl_translation.Dataset(DS_CREATION_RESULT_SAMPLE)
mock_hook.return_value.create_dataset.return_value = sample_operation
mock_hook.return_value.wait_for_operation_result.side_effect = lambda operation: operation.result()
mock_hook.return_value.extract_object_id = TranslateHook.extract_object_id
DATASET_DATA = {
"display_name": "sample ds name",
"source_language_code": "es",
"target_language_code": "uk",
}
op = TranslateCreateDatasetOperator(
task_id="task_id",
dataset=DATASET_DATA,
project_id=PROJECT_ID,
location=LOCATION,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
timeout=TIMEOUT_VALUE,
retry=None,
)
mock_ti = mock.MagicMock()
mock_context = {"ti": mock_ti}
result = op.execute(context=mock_context)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.create_dataset.assert_called_once_with(
dataset=DATASET_DATA,
project_id=PROJECT_ID,
location=LOCATION,
timeout=TIMEOUT_VALUE,
retry=None,
metadata=(),
)
mock_ti.xcom_push.assert_any_call(key="dataset_id", value=DATASET_ID)
mock_link_persist.assert_called_once_with(
context=mock_context,
dataset_id=DATASET_ID,
location=LOCATION,
project_id=PROJECT_ID,
)
assert result == DS_CREATION_RESULT_SAMPLE
| TestTranslateDatasetCreate |
python | mkdocs__mkdocs | mkdocs/tests/config/config_options_tests.py | {
"start": 5294,
"end": 9635
} | class ____(TestCase):
def test_deprecated_option_simple(self) -> None:
class Schema(Config):
d = c.Deprecated()
self.get_config(
Schema,
{'d': 'value'},
warnings=dict(
d="The configuration option 'd' has been deprecated and will be removed in a "
"future release."
),
)
def test_deprecated_option_message(self) -> None:
class Schema(Config):
d = c.Deprecated(message='custom message for {} key')
self.get_config(Schema, {'d': 'value'}, warnings={'d': 'custom message for d key'})
def test_deprecated_option_with_type(self) -> None:
class Schema(Config):
d = c.Deprecated(option_type=c.Type(str))
self.get_config(
Schema,
{'d': 'value'},
warnings=dict(
d="The configuration option 'd' has been deprecated and will be removed in a "
"future release."
),
)
def test_deprecated_option_with_invalid_type(self) -> None:
class Schema(Config):
d = c.Deprecated(option_type=c.Type(list))
with self.expect_error(d="Expected type: <class 'list'> but received: <class 'str'>"):
self.get_config(
Schema,
{'d': 'value'},
warnings=dict(
d="The configuration option 'd' has been deprecated and will be removed in a "
"future release."
),
)
def test_removed_option(self) -> None:
class Schema(Config):
d = c.Deprecated(removed=True, moved_to='foo')
with self.expect_error(
d="The configuration option 'd' was removed from MkDocs. Use 'foo' instead.",
):
self.get_config(Schema, {'d': 'value'})
def test_deprecated_option_with_type_undefined(self) -> None:
class Schema(Config):
option = c.Deprecated(option_type=c.Type(str))
self.get_config(Schema, {'option': None})
def test_deprecated_option_move(self) -> None:
class Schema(Config):
new = c.Type(str)
old = c.Deprecated(moved_to='new')
conf = self.get_config(
Schema,
{'old': 'value'},
warnings=dict(
old="The configuration option 'old' has been deprecated and will be removed in a "
"future release. Use 'new' instead."
),
)
self.assertEqual(conf, {'new': 'value', 'old': None})
def test_deprecated_option_move_complex(self) -> None:
class Schema(Config):
foo = c.Type(dict)
old = c.Deprecated(moved_to='foo.bar')
conf = self.get_config(
Schema,
{'old': 'value'},
warnings=dict(
old="The configuration option 'old' has been deprecated and will be removed in a "
"future release. Use 'foo.bar' instead."
),
)
self.assertEqual(conf, {'foo': {'bar': 'value'}, 'old': None})
def test_deprecated_option_move_existing(self) -> None:
class Schema(Config):
foo = c.Type(dict)
old = c.Deprecated(moved_to='foo.bar')
conf = self.get_config(
Schema,
{'old': 'value', 'foo': {'existing': 'existing'}},
warnings=dict(
old="The configuration option 'old' has been deprecated and will be removed in a "
"future release. Use 'foo.bar' instead."
),
)
self.assertEqual(conf, {'foo': {'existing': 'existing', 'bar': 'value'}, 'old': None})
def test_deprecated_option_move_invalid(self) -> None:
class Schema(Config):
foo = c.Type(dict)
old = c.Deprecated(moved_to='foo.bar')
with self.expect_error(foo="Expected type: <class 'dict'> but received: <class 'str'>"):
self.get_config(
Schema,
{'old': 'value', 'foo': 'wrong type'},
warnings=dict(
old="The configuration option 'old' has been deprecated and will be removed in a "
"future release. Use 'foo.bar' instead."
),
)
| DeprecatedTest |
python | PyCQA__pyflakes | pyflakes/messages.py | {
"start": 1370,
"end": 1611
} | class ____(Message):
message = "'from %s import *' used; unable to detect undefined names"
def __init__(self, filename, loc, modname):
Message.__init__(self, filename, loc)
self.message_args = (modname,)
| ImportStarUsed |
python | pikepdf__pikepdf | src/pikepdf/models/outlines.py | {
"start": 3044,
"end": 3146
} | class ____(Exception):
"""Indicates an error in the outline data structure."""
| OutlineStructureError |
python | huggingface__transformers | src/transformers/models/barthez/tokenization_barthez.py | {
"start": 1101,
"end": 6361
} | class ____(TokenizersBackend):
"""
Adapted from [`CamembertTokenizer`] and [`BartTokenizer`]. Construct a "fast" BARThez tokenizer. Based on
[SentencePiece](https://github.com/google/sentencepiece).
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
vocab_file (`str`, *optional*):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
vocab (`dict`, *optional*):
Custom vocabulary dictionary. If not provided, vocabulary is loaded from vocab_file.
add_prefix_space (`bool`, *optional*, defaults to `True`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
slow_tokenizer_class = None
def __init__(
self,
bos_token="<s>",
eos_token="</s>",
sep_token="</s>",
cls_token="<s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
vocab_file=None,
vocab=None,
add_prefix_space=True,
**kwargs,
):
# Mask token behave like a normal word, i.e. include the space before it
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
self.add_prefix_space = add_prefix_space
self.vocab_file = vocab_file
if vocab is not None:
self._vocab = vocab
else:
self._vocab = [
(str(pad_token), 0.0),
(str(unk_token), 0.0),
(str(cls_token), 0.0),
(str(sep_token), 0.0),
(str(mask_token), 0.0),
]
self._tokenizer = Tokenizer(Unigram(self._vocab, unk_id=3, byte_fallback=False))
self._tokenizer.normalizer = normalizers.Sequence(
[
normalizers.Replace("\n", " "),
normalizers.Replace("\r", " "),
normalizers.Replace("\t", " "),
normalizers.Replace(Regex(r" {2,}"), " "),
normalizers.NFC(),
normalizers.Strip(left=False, right=True),
]
)
prepend_scheme = "always" if add_prefix_space else "never"
self._tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement="▁", prepend_scheme=prepend_scheme)
self._tokenizer.decoder = decoders.Metaspace(replacement="▁", prepend_scheme=prepend_scheme)
tokenizer_object = self._tokenizer
super().__init__(
tokenizer_object=tokenizer_object,
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
sep_token=sep_token,
cls_token=cls_token,
pad_token=pad_token,
mask_token=mask_token,
add_prefix_space=add_prefix_space,
**kwargs,
)
__all__ = ["BarthezTokenizer"]
| BarthezTokenizer |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/oracle/cx_oracle.py | {
"start": 22713,
"end": 22834
} | class ____(_OracleNumericCommon):
def get_dbapi_type(self, dbapi):
return dbapi.NATIVE_FLOAT
| _OracleBinaryFloat |
python | dask__distributed | distributed/dashboard/components/worker.py | {
"start": 6950,
"end": 8109
} | class ____(DashboardComponent):
def __init__(self, worker, **kwargs):
self.worker = worker
self.source = ColumnDataSource({"x": [], "in": [], "out": []})
x_range = DataRange1d(follow="end", follow_interval=20000, range_padding=0)
fig = figure(
title="Communication History",
x_axis_type="datetime",
y_range=[-0.1, worker.state.transfer_incoming_count_limit + 0.5],
height=150,
tools="",
x_range=x_range,
**kwargs,
)
fig.line(source=self.source, x="x", y="in", color="red")
fig.line(source=self.source, x="x", y="out", color="blue")
fig.add_tools(
ResetTool(), PanTool(dimensions="width"), WheelZoomTool(dimensions="width")
)
self.root = fig
@without_property_validation
@log_errors
def update(self):
self.source.stream(
{
"x": [time() * 1000],
"out": [len(self.worker._comms)],
"in": [self.worker.state.transfer_incoming_count],
},
10000,
)
| CommunicatingTimeSeries |
python | doocs__leetcode | solution/3200-3299/3222.Find the Winning Player in Coin Game/Solution.py | {
"start": 0,
"end": 187
} | class ____:
def losingPlayer(self, x: int, y: int) -> str:
k = min(x // 2, y // 8)
x -= k * 2
y -= k * 8
return "Alice" if x and y >= 4 else "Bob"
| Solution |
python | numba__llvmlite | llvmlite/tests/test_ir.py | {
"start": 95255,
"end": 109433
} | class ____(TestBase):
def has_logical_equality(self, ty):
while isinstance(ty, ir.PointerType):
ty = ty.pointee
return not isinstance(ty, ir.LabelType)
def assorted_types(self):
"""
A bunch of mutually unequal types
"""
# Avoid polluting the namespace
context = ir.Context()
types = [
ir.LabelType(), ir.VoidType(),
ir.FunctionType(int1, (int8, int8)), ir.FunctionType(int1, (int8,)),
ir.FunctionType(int1, (int8,), var_arg=True),
ir.FunctionType(int8, (int8,)),
int1, int8, int32, flt, dbl,
ir.ArrayType(flt, 5), ir.ArrayType(dbl, 5), ir.ArrayType(dbl, 4),
ir.LiteralStructType((int1, int8)), ir.LiteralStructType((int8,
int1)),
context.get_identified_type("MyType1"),
context.get_identified_type("MyType2"),
]
types += [ir.PointerType(tp) for tp in types
if not isinstance(tp, (ir.VoidType, ir.LabelType))]
return types
def test_pickling(self):
types = self.assorted_types()
for ty in types:
newty = self.assert_pickle_correctly(ty)
if self.has_logical_equality(ty):
self.assertEqual(newty, ty)
def test_comparisons(self):
types = self.assorted_types()
for a, b in itertools.product(types, types):
if a is not b:
self.assertFalse(a == b, (a, b))
self.assertTrue(a != b, (a, b))
# We assume copy.copy() works fine here...
for tp in types:
other = copy.copy(tp)
if self.has_logical_equality(tp):
self.assertTrue(tp == other, (tp, other))
self.assertFalse(tp != other, (tp, other))
else:
self.assertFalse(tp == other, (tp, other))
self.assertTrue(tp != other, (tp, other))
def test_ptr_comparisons(self):
# Create instances of:
# * Opaque pointers.
# * Typed pointers of i1's.
# * Typed pointers of i8's.
# The choice of types for the typed pointers are not consequential -
# they just need to differ. Each pointer class has two instances, one
# in address space 0, another in address space 1.
ptrs = {
'op_a0': ir.PointerType(),
'op_a1': ir.PointerType(addrspace=1),
'tp_i1_a0': ir.PointerType(int1),
'tp_i1_a1': ir.PointerType(int1, addrspace=1),
'tp_i8_a0': ir.PointerType(int8),
'tp_i8_a1': ir.PointerType(int8, addrspace=1),
}
def assert_eq(ptr1, ptr2):
self.assertTrue(ptr1 == ptr2, (ptr1, ptr2))
self.assertTrue(ptr2 == ptr1, (ptr2, ptr1))
self.assertFalse(ptr1 != ptr2, (ptr1, ptr2))
self.assertFalse(ptr2 != ptr1, (ptr2, ptr1))
def assert_ne(ptr1, ptr2):
self.assertFalse(ptr1 == ptr2, (ptr1, ptr2))
self.assertFalse(ptr2 == ptr1, (ptr2, ptr1))
self.assertTrue(ptr1 != ptr2, (ptr1, ptr2))
self.assertTrue(ptr2 != ptr1, (ptr2, ptr1))
for ptr in ptrs.values():
# Compare the pointers against any non-pointer type.
for other in self.assorted_types():
if not isinstance(other, ir.PointerType):
assert_ne(ptr, other)
# Compare the pointers against themselves.
assert_eq(ptr, ptr)
# Compare the pointers against each other.
# Opaque pointers are always equal, unless their address space differs.
# Typed pointers always differ, unless their pointee type and address
# space match.
assert_ne(ptrs['op_a0'], ptrs['op_a1'])
assert_eq(ptrs['op_a0'], ptrs['tp_i1_a0'])
assert_ne(ptrs['op_a0'], ptrs['tp_i1_a1'])
assert_eq(ptrs['op_a0'], ptrs['tp_i8_a0'])
assert_ne(ptrs['op_a0'], ptrs['tp_i8_a1'])
assert_ne(ptrs['op_a1'], ptrs['tp_i1_a0'])
assert_eq(ptrs['op_a1'], ptrs['tp_i1_a1'])
assert_ne(ptrs['op_a1'], ptrs['tp_i8_a0'])
assert_eq(ptrs['op_a1'], ptrs['tp_i8_a1'])
assert_ne(ptrs['tp_i1_a0'], ptrs['tp_i1_a1'])
assert_ne(ptrs['tp_i1_a0'], ptrs['tp_i8_a0'])
assert_ne(ptrs['tp_i1_a0'], ptrs['tp_i8_a1'])
assert_ne(ptrs['tp_i1_a1'], ptrs['tp_i8_a0'])
assert_ne(ptrs['tp_i1_a1'], ptrs['tp_i8_a1'])
assert_ne(ptrs['tp_i8_a0'], ptrs['tp_i8_a1'])
def test_pointers(self):
# Basic opaque pointers.
ptr = ir.PointerType()
ptr2 = ir.PointerType(addrspace=2)
self.assertTrue(ptr.is_opaque)
self.assertTrue(ptr2.is_opaque)
self.assertEqual(str(ptr), "ptr")
self.assertEqual(str(ptr2), "ptr addrspace(2)")
# Pointers of opaque pointers (necessarily opaque pointers).
ptr_ptr = ptr.as_pointer()
ptr2_ptr = ptr2.as_pointer()
ptr2_ptr3 = ptr2.as_pointer(addrspace=3)
self.assertTrue(ptr_ptr.is_opaque)
self.assertTrue(ptr2_ptr.is_opaque)
self.assertTrue(ptr2_ptr3.is_opaque)
self.assertEqual(str(ptr_ptr), "ptr")
self.assertEqual(str(ptr2_ptr), "ptr")
self.assertEqual(str(ptr2_ptr3), "ptr addrspace(3)")
# Basic typed pointers.
tptr = ir.IntType(32).as_pointer()
tptr2 = ir.IntType(32).as_pointer(addrspace=2)
self.assertTrue(not tptr.is_opaque)
self.assertTrue(not tptr2.is_opaque)
if ir_layer_typed_pointers_enabled:
self.assertEqual(str(tptr), "i32*")
self.assertEqual(str(tptr2), "i32 addrspace(2)*")
else:
self.assertEqual(str(tptr), "ptr")
self.assertEqual(str(tptr2), "ptr addrspace(2)")
# Pointers of typed pointers (necessarily typed pointers).
tptr_ptr = tptr.as_pointer()
tptr2_ptr = tptr2.as_pointer()
tptr2_ptr3 = tptr2.as_pointer(addrspace=3)
self.assertTrue(not tptr_ptr.is_opaque)
self.assertTrue(not tptr2_ptr.is_opaque)
self.assertTrue(not tptr2_ptr3.is_opaque)
if ir_layer_typed_pointers_enabled:
self.assertEqual(str(tptr_ptr), "i32**")
self.assertEqual(str(tptr2_ptr), "i32 addrspace(2)**")
self.assertEqual(str(tptr2_ptr3), "i32 addrspace(2)* addrspace(3)*")
else:
self.assertEqual(str(tptr_ptr), "ptr")
self.assertEqual(str(tptr2_ptr), "ptr")
self.assertEqual(str(tptr2_ptr3), "ptr addrspace(3)")
def test_ptr_intrinsic_name(self):
self.assertEqual(ir.PointerType().intrinsic_name, 'p0')
self.assertEqual(ir.PointerType(addrspace=1).intrinsic_name, 'p1')
if not ir_layer_typed_pointers_enabled:
self.assertEqual(ir.PointerType(int1).intrinsic_name, 'p0')
self.assertEqual(ir.PointerType(int1, 1).intrinsic_name, 'p1')
else:
self.assertEqual(ir.PointerType(int1).intrinsic_name, 'p0i1')
self.assertEqual(ir.PointerType(int1, 1).intrinsic_name, 'p1i1')
def test_str(self):
"""
Test the string representation of types.
"""
self.assertEqual(str(int1), 'i1')
self.assertEqual(str(ir.IntType(29)), 'i29')
self.assertEqual(str(flt), 'float')
self.assertEqual(str(dbl), 'double')
self.assertEqual(str(ir.VoidType()), 'void')
self.assertEqual(str(ir.FunctionType(int1, ())), 'i1 ()')
self.assertEqual(str(ir.FunctionType(int1, (flt,))), 'i1 (float)')
self.assertEqual(str(ir.FunctionType(int1, (flt, dbl))),
'i1 (float, double)')
self.assertEqual(str(ir.FunctionType(int1, (), var_arg=True)),
'i1 (...)')
self.assertEqual(str(ir.FunctionType(int1, (flt,), var_arg=True)),
'i1 (float, ...)')
self.assertEqual(str(ir.FunctionType(int1, (flt, dbl), var_arg=True)),
'i1 (float, double, ...)')
if not ir_layer_typed_pointers_enabled:
self.assertEqual(str(ir.PointerType(int32)), 'ptr')
self.assertEqual(str(ir.PointerType(ir.PointerType(int32))), 'ptr')
else:
self.assertEqual(str(ir.PointerType(int32)), 'i32*')
self.assertEqual(str(ir.PointerType(ir.PointerType(int32))),
'i32**')
self.assertEqual(str(ir.ArrayType(int1, 5)), '[5 x i1]')
if not ir_layer_typed_pointers_enabled:
self.assertEqual(str(ir.ArrayType(ir.PointerType(int1), 5)),
'[5 x ptr]')
self.assertEqual(str(ir.PointerType(ir.ArrayType(int1, 5))), 'ptr')
else:
self.assertEqual(str(ir.ArrayType(ir.PointerType(int1), 5)),
'[5 x i1*]')
self.assertEqual(str(ir.PointerType(ir.ArrayType(int1, 5))),
'[5 x i1]*')
self.assertEqual(str(ir.LiteralStructType((int1,))), '{i1}')
self.assertEqual(str(ir.LiteralStructType((int1, flt))), '{i1, float}')
if not ir_layer_typed_pointers_enabled:
self.assertEqual(str(ir.LiteralStructType((
ir.PointerType(int1), ir.LiteralStructType((int32, int8))))),
'{ptr, {i32, i8}}')
else:
self.assertEqual(str(ir.LiteralStructType((
ir.PointerType(int1), ir.LiteralStructType((int32, int8))))),
'{i1*, {i32, i8}}')
self.assertEqual(str(ir.LiteralStructType((int1,), packed=True)),
'<{i1}>')
self.assertEqual(str(ir.LiteralStructType((int1, flt), packed=True)),
'<{i1, float}>')
# Avoid polluting the namespace
context = ir.Context()
mytype = context.get_identified_type("MyType")
self.assertEqual(str(mytype), "%\"MyType\"")
mytype1 = context.get_identified_type("MyType\\")
self.assertEqual(str(mytype1), "%\"MyType\\5c\"")
mytype2 = context.get_identified_type("MyType\"")
self.assertEqual(str(mytype2), "%\"MyType\\22\"")
def test_hash(self):
for typ in filter(self.has_logical_equality, self.assorted_types()):
self.assertEqual(hash(typ), hash(copy.copy(typ)))
def test_gep(self):
def check_constant(tp, i, expected):
actual = tp.gep(ir.Constant(int32, i))
self.assertEqual(actual, expected)
def check_index_type(tp):
index = ir.Constant(dbl, 1.0)
with self.assertRaises(TypeError):
tp.gep(index)
tp = ir.PointerType(dbl)
for i in range(5):
check_constant(tp, i, dbl)
check_index_type(tp)
tp = ir.ArrayType(int1, 3)
for i in range(3):
check_constant(tp, i, int1)
check_index_type(tp)
tp = ir.LiteralStructType((dbl, ir.LiteralStructType((int1, int8))))
check_constant(tp, 0, dbl)
check_constant(tp, 1, ir.LiteralStructType((int1, int8)))
with self.assertRaises(IndexError):
tp.gep(ir.Constant(int32, 2))
check_index_type(tp)
context = ir.Context()
tp = ir.IdentifiedStructType(context, "MyType")
tp.set_body(dbl, ir.LiteralStructType((int1, int8)))
check_constant(tp, 0, dbl)
check_constant(tp, 1, ir.LiteralStructType((int1, int8)))
with self.assertRaises(IndexError):
tp.gep(ir.Constant(int32, 2))
check_index_type(tp)
def test_abi_size(self):
td = llvm.create_target_data("e-m:e-i64:64-f80:128-n8:16:32:64-S128")
def check(tp, expected):
self.assertEqual(tp.get_abi_size(td), expected)
check(int8, 1)
check(int32, 4)
check(int64, 8)
check(ir.ArrayType(int8, 5), 5)
check(ir.ArrayType(int32, 5), 20)
check(ir.LiteralStructType((dbl, flt, flt)), 16)
def test_abi_alignment(self):
td = llvm.create_target_data("e-m:e-i64:64-f80:128-n8:16:32:64-S128")
def check(tp, expected):
self.assertIn(tp.get_abi_alignment(td), expected)
check(int8, (1, 2, 4))
check(int32, (4,))
check(int64, (8,))
check(ir.ArrayType(int8, 5), (1, 2, 4))
check(ir.ArrayType(int32, 5), (4,))
check(ir.LiteralStructType((dbl, flt, flt)), (8,))
def test_identified_struct(self):
context = ir.Context()
mytype = context.get_identified_type("MyType")
module = ir.Module(context=context)
self.assertTrue(mytype.is_opaque)
self.assert_valid_ir(module)
oldstr = str(module)
mytype.set_body(ir.IntType(32), ir.IntType(64), ir.FloatType())
self.assertFalse(mytype.is_opaque)
self.assert_valid_ir(module)
self.assertNotEqual(oldstr, str(module))
def test_identified_struct_packed(self):
td = llvm.create_target_data("e-m:e-i64:64-f80:128-n8:16:32:64-S128")
context = ir.Context()
mytype = context.get_identified_type("MyType", True)
module = ir.Module(context=context)
self.assertTrue(mytype.is_opaque)
self.assert_valid_ir(module)
oldstr = str(module)
mytype.set_body(ir.IntType(16), ir.IntType(64), ir.FloatType())
self.assertEqual(mytype.get_element_offset(td, 1, context), 2)
self.assertFalse(mytype.is_opaque)
self.assert_valid_ir(module)
self.assertNotEqual(oldstr, str(module))
def test_target_data_non_default_context(self):
context = ir.Context()
mytype = context.get_identified_type("MyType")
mytype.elements = [ir.IntType(32)]
td = llvm.create_target_data("e-m:e-i64:64-f80:128-n8:16:32:64-S128")
self.assertEqual(mytype.get_abi_size(td, context=context), 4)
def test_vector(self):
vecty = ir.VectorType(ir.IntType(32), 8)
self.assertEqual(str(vecty), "<8 x i32>")
def c32(i):
return ir.Constant(int32, i)
| TestTypes |
python | doocs__leetcode | solution/0800-0899/0873.Length of Longest Fibonacci Subsequence/Solution.py | {
"start": 0,
"end": 549
} | class ____:
def lenLongestFibSubseq(self, arr: List[int]) -> int:
n = len(arr)
f = [[0] * n for _ in range(n)]
d = {x: i for i, x in enumerate(arr)}
for i in range(n):
for j in range(i):
f[i][j] = 2
ans = 0
for i in range(2, n):
for j in range(1, i):
t = arr[i] - arr[j]
if t in d and (k := d[t]) < j:
f[i][j] = max(f[i][j], f[j][k] + 1)
ans = max(ans, f[i][j])
return ans
| Solution |
python | coleifer__peewee | peewee.py | {
"start": 22248,
"end": 23753
} | class ____(Node):
c = _DynamicColumn()
def __init__(self, alias=None):
super(Source, self).__init__()
self._alias = alias
@Node.copy
def alias(self, name):
self._alias = name
def select(self, *columns):
if not columns:
columns = (SQL('*'),)
return Select((self,), columns)
@property
def __star__(self):
return Star(self)
def join(self, dest, join_type=JOIN.INNER, on=None):
return Join(self, dest, join_type, on)
def left_outer_join(self, dest, on=None):
return Join(self, dest, JOIN.LEFT_OUTER, on)
def cte(self, name, recursive=False, columns=None, materialized=None):
return CTE(name, self, recursive=recursive, columns=columns,
materialized=materialized)
def get_sort_key(self, ctx):
if self._alias:
return (self._alias,)
return (ctx.alias_manager[self],)
def apply_alias(self, ctx):
# If we are defining the source, include the "AS alias" declaration. An
# alias is created for the source if one is not already defined.
if ctx.scope == SCOPE_SOURCE:
if self._alias:
ctx.alias_manager[self] = self._alias
ctx.literal(' AS ').sql(Entity(ctx.alias_manager[self]))
return ctx
def apply_column(self, ctx):
if self._alias:
ctx.alias_manager[self] = self._alias
return ctx.sql(Entity(ctx.alias_manager[self]))
| Source |
python | sympy__sympy | sympy/printing/numpy.py | {
"start": 19854,
"end": 21049
} | class ____(NumPyPrinter):
"""
JAX printer which handles vectorized piecewise functions,
logical operators, etc.
"""
_module = "jax.numpy"
_kf = _jax_known_functions
_kc = _jax_known_constants
def __init__(self, settings=None):
super().__init__(settings=settings)
self.printmethod = '_jaxcode'
# These need specific override to allow for the lack of "jax.numpy.reduce"
def _print_And(self, expr):
"Logical And printer"
return "{}({}.asarray([{}]), axis=0)".format(
self._module_format(self._module + ".all"),
self._module_format(self._module),
",".join(self._print(i) for i in expr.args),
)
def _print_Or(self, expr):
"Logical Or printer"
return "{}({}.asarray([{}]), axis=0)".format(
self._module_format(self._module + ".any"),
self._module_format(self._module),
",".join(self._print(i) for i in expr.args),
)
for func in _jax_known_functions:
setattr(JaxPrinter, f'_print_{func}', _print_known_func)
for const in _jax_known_constants:
setattr(JaxPrinter, f'_print_{const}', _print_known_const)
| JaxPrinter |
python | lepture__authlib | authlib/oidc/core/claims.py | {
"start": 9060,
"end": 11521
} | class ____(dict):
"""The standard claims of a UserInfo object. Defined per `Section 5.1`_.
.. _`Section 5.1`: http://openid.net/specs/openid-connect-core-1_0.html#StandardClaims
"""
#: registered claims that UserInfo supports
REGISTERED_CLAIMS = [
"sub",
"name",
"given_name",
"family_name",
"middle_name",
"nickname",
"preferred_username",
"profile",
"picture",
"website",
"email",
"email_verified",
"gender",
"birthdate",
"zoneinfo",
"locale",
"phone_number",
"phone_number_verified",
"address",
"updated_at",
]
SCOPES_CLAIMS_MAPPING = {
"openid": ["sub"],
"profile": [
"name",
"family_name",
"given_name",
"middle_name",
"nickname",
"preferred_username",
"profile",
"picture",
"website",
"gender",
"birthdate",
"zoneinfo",
"locale",
"updated_at",
],
"email": ["email", "email_verified"],
"address": ["address"],
"phone": ["phone_number", "phone_number_verified"],
}
def filter(self, scope: str):
"""Return a new UserInfo object containing only the claims matching the scope passed in parameter."""
scope = scope_to_list(scope)
filtered_claims = [
claim
for scope_part in scope
for claim in self.SCOPES_CLAIMS_MAPPING.get(scope_part, [])
]
filtered_items = {
key: val for key, val in self.items() if key in filtered_claims
}
return UserInfo(filtered_items)
def __getattr__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError as error:
if key in self.REGISTERED_CLAIMS:
return self.get(key)
raise error
def get_claim_cls_by_response_type(response_type):
claims_classes = (CodeIDToken, ImplicitIDToken, HybridIDToken)
for claims_cls in claims_classes:
if response_type in claims_cls.RESPONSE_TYPES:
return claims_cls
def _verify_hash(signature, s, alg):
hash_value = create_half_hash(s, alg)
if not hash_value:
return True
return hmac.compare_digest(hash_value, to_bytes(signature))
| UserInfo |
python | apache__airflow | task-sdk/tests/task_sdk/execution_time/test_supervisor.py | {
"start": 94169,
"end": 95687
} | class ____:
class DummyComms:
pass
@pytest.fixture(autouse=True)
def cleanup_supervisor_comms(self):
# Ensure clean state before/after test
if hasattr(task_runner, "SUPERVISOR_COMMS"):
delattr(task_runner, "SUPERVISOR_COMMS")
yield
if hasattr(task_runner, "SUPERVISOR_COMMS"):
delattr(task_runner, "SUPERVISOR_COMMS")
def test_set_supervisor_comms_overrides_and_restores(self):
task_runner.SUPERVISOR_COMMS = self.DummyComms()
original = task_runner.SUPERVISOR_COMMS
replacement = self.DummyComms()
with set_supervisor_comms(replacement):
assert task_runner.SUPERVISOR_COMMS is replacement
assert task_runner.SUPERVISOR_COMMS is original
def test_set_supervisor_comms_sets_temporarily_when_not_set(self):
assert not hasattr(task_runner, "SUPERVISOR_COMMS")
replacement = self.DummyComms()
with set_supervisor_comms(replacement):
assert task_runner.SUPERVISOR_COMMS is replacement
assert not hasattr(task_runner, "SUPERVISOR_COMMS")
def test_set_supervisor_comms_unsets_temporarily_when_not_set(self):
assert not hasattr(task_runner, "SUPERVISOR_COMMS")
# This will delete an attribute that isn't set, and restore it likewise
with set_supervisor_comms(None):
assert not hasattr(task_runner, "SUPERVISOR_COMMS")
assert not hasattr(task_runner, "SUPERVISOR_COMMS")
| TestSetSupervisorComms |
python | sanic-org__sanic | sanic/models/server_types.py | {
"start": 202,
"end": 238
} | class ____:
stopped = False
| Signal |
python | ahupp__python-magic | magic/compat.py | {
"start": 1601,
"end": 3176
} | class ____(Structure):
pass
magic_set._fields_ = []
magic_t = POINTER(magic_set)
_open = _libraries['magic'].magic_open
_open.restype = magic_t
_open.argtypes = [c_int]
_close = _libraries['magic'].magic_close
_close.restype = None
_close.argtypes = [magic_t]
_file = _libraries['magic'].magic_file
_file.restype = c_char_p
_file.argtypes = [magic_t, c_char_p]
_descriptor = _libraries['magic'].magic_descriptor
_descriptor.restype = c_char_p
_descriptor.argtypes = [magic_t, c_int]
_buffer = _libraries['magic'].magic_buffer
_buffer.restype = c_char_p
_buffer.argtypes = [magic_t, c_void_p, c_size_t]
_error = _libraries['magic'].magic_error
_error.restype = c_char_p
_error.argtypes = [magic_t]
_setflags = _libraries['magic'].magic_setflags
_setflags.restype = c_int
_setflags.argtypes = [magic_t, c_int]
_load = _libraries['magic'].magic_load
_load.restype = c_int
_load.argtypes = [magic_t, c_char_p]
_compile = _libraries['magic'].magic_compile
_compile.restype = c_int
_compile.argtypes = [magic_t, c_char_p]
_check = _libraries['magic'].magic_check
_check.restype = c_int
_check.argtypes = [magic_t, c_char_p]
_list = _libraries['magic'].magic_list
_list.restype = c_int
_list.argtypes = [magic_t, c_char_p]
_errno = _libraries['magic'].magic_errno
_errno.restype = c_int
_errno.argtypes = [magic_t]
_getparam = _libraries['magic'].magic_getparam
_getparam.restype = c_int
_getparam.argtypes = [magic_t, c_int, c_void_p]
_setparam = _libraries['magic'].magic_setparam
_setparam.restype = c_int
_setparam.argtypes = [magic_t, c_int, c_void_p]
| magic_set |
python | walkccc__LeetCode | solutions/139. Word Break/139-3.py | {
"start": 0,
"end": 453
} | class ____:
def wordBreak(self, s: str, wordDict: list[str]) -> bool:
n = len(s)
wordSet = set(wordDict)
# dp[i] := True if s[0..i) can be segmented
dp = [True] + [False] * n
for i in range(1, n + 1):
for j in range(i):
# s[0..j) can be segmented and s[j..i) is in `wordSet`, so s[0..i) can
# be segmented.
if dp[j] and s[j:i] in wordSet:
dp[i] = True
break
return dp[n]
| Solution |
python | networkx__networkx | networkx/classes/reportviews.py | {
"start": 24071,
"end": 24122
} | class ____(ABC):
pass
# EdgeDataViews
| EdgeViewABC |
python | encode__django-rest-framework | rest_framework/permissions.py | {
"start": 7460,
"end": 9588
} | class ____(DjangoModelPermissions):
"""
The request is authenticated using Django's object-level permissions.
It requires an object-permissions-enabled backend, such as Django Guardian.
It ensures that the user is authenticated, and has the appropriate
`add`/`change`/`delete` permissions on the object using .has_perms.
This permission can only be applied against view classes that
provide a `.queryset` attribute.
"""
perms_map = {
'GET': [],
'OPTIONS': [],
'HEAD': [],
'POST': ['%(app_label)s.add_%(model_name)s'],
'PUT': ['%(app_label)s.change_%(model_name)s'],
'PATCH': ['%(app_label)s.change_%(model_name)s'],
'DELETE': ['%(app_label)s.delete_%(model_name)s'],
}
def get_required_object_permissions(self, method, model_cls):
kwargs = {
'app_label': model_cls._meta.app_label,
'model_name': model_cls._meta.model_name
}
if method not in self.perms_map:
raise exceptions.MethodNotAllowed(method)
return [perm % kwargs for perm in self.perms_map[method]]
def has_object_permission(self, request, view, obj):
# authentication checks have already executed via has_permission
queryset = self._queryset(view)
model_cls = queryset.model
user = request.user
perms = self.get_required_object_permissions(request.method, model_cls)
if not user.has_perms(perms, obj):
# If the user does not have permissions we need to determine if
# they have read permissions to see 403, or not, and simply see
# a 404 response.
if request.method in SAFE_METHODS:
# Read permissions already checked and failed, no need
# to make another lookup.
raise Http404
read_perms = self.get_required_object_permissions('GET', model_cls)
if not user.has_perms(read_perms, obj):
raise Http404
# Has read permissions.
return False
return True
| DjangoObjectPermissions |
python | pytorch__pytorch | test/inductor/test_config.py | {
"start": 409,
"end": 500
} | class ____(torch.nn.Module):
def forward(self, x):
return dummy_fn(x)
| DummyModule |
python | pydantic__pydantic | pydantic/v1/errors.py | {
"start": 5989,
"end": 6080
} | class ____(PydanticTypeError):
msg_template = 'value is not a valid integer'
| IntegerError |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 495058,
"end": 495555
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of ClearProjectV2ItemFieldValue"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "project_v2_item")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
project_v2_item = sgqlc.types.Field("ProjectV2Item", graphql_name="projectV2Item")
"""The updated item."""
| ClearProjectV2ItemFieldValuePayload |
python | viewflow__viewflow | viewflow/workflow/flow/views/dashboard.py | {
"start": 3213,
"end": 4134
} | class ____(
mixins.StoreRequestPathMixin,
mixins.ProcessViewTemplateNames,
ListModelView,
):
"""List of all tasks of the flow."""
flow_class = None
template_filename = "task_list.html"
columns = ("task_id", "flow_task", "process_summary", "created", "owner")
filterset_class = filters.DashboardTaskListViewFilter
def task_id(self, task):
task_url = task.flow_task.reverse("index", args=[task.process_id, task.pk])
return mark_safe(f'<a href="{task_url}">#{task.process_id}/{task.pk}</a>')
task_id.short_description = _("#")
def process_summary(self, task):
return task.process.coerced.brief
@property
def model(self):
return self.flow_class.task_class
@viewprop
def queryset(self):
queryset = self.model._default_manager.all()
return queryset.filter(process__flow_class=self.flow_class)
| DashboardTaskListView |
python | google__jax | tests/cudnn_fusion_test.py | {
"start": 813,
"end": 2474
} | class ____(jtu.JaxTestCase):
def setUp(self):
if (not jtu.test_device_matches(["cuda"]) or
not jtu.is_cuda_compute_capability_at_least("8.0")):
self.skipTest("Only works on >= sm80 GPUs")
super().setUp()
@parameterized.parameters(["", "pmap"])
@jtu.run_on_devices("cuda")
def test_cudnn_fusion(self, mode):
if jtu.is_cuda_version_at_least(13, 0):
self.skipTest("cuDNN creates no execution plans on CUDA 13.0.")
batch_size = 2
if mode == "pmap" and jax.device_count() < batch_size:
raise SkipTest("pmap test requires 2 GPUs")
@cudnn_fusion
def comp1(x, y, z):
return jnp.float32(jax.lax.batch_matmul(jnp.bfloat16(x), y)) + z
k = jax.random.key(0)
s = batch_size, 16, 16
x = jnp.int8(jax.random.normal(k, shape=s))
y = jnp.bfloat16(jax.random.normal(k, shape=s))
z = jnp.float32(jax.random.normal(k, shape=s))
fn = jax.pmap(comp1) if mode == "pmap" else comp1
jitted = jax.jit(comp1)
lowered = jitted.lower(x, y, z)
stablehlo = lowered.as_text("stablehlo")
self.assertIn("func.func private @comp1", stablehlo)
self.assertIn("__cudnn$fusion", stablehlo)
hlo = lowered.as_text("hlo")
self.assertIn('custom_call_target="__cudnn$fusion"', hlo)
self.assertIn("called_computations=", hlo)
compiled = lowered.compile({"xla_gpu_cublas_fallback": False})
hlo_after_opt = compiled.as_text()
self.assertIn("kind=kCustom", hlo_after_opt)
self.assertIn("plan_id", hlo_after_opt)
self.assertAllClose(compiled(x, y, z), fn(x, y, z))
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| CudnnFusionTest |
python | viewflow__viewflow | viewflow/fields.py | {
"start": 634,
"end": 3845
} | class ____(models.AutoField):
class Key(dict):
"""Dictionary with json-compatible string conversion."""
def __str__(self):
return json.dumps(self)
def __hash__(self):
return hash(tuple(self[key] for key in sorted(self.keys())))
def __init__(self, columns: List[str], **kwargs):
self.columns = columns
super().__init__(primary_key=True, **kwargs)
def contribute_to_class(self, cls, name, private_only=False):
self.set_attributes_from_name(name)
self.model = cls
self.concrete = False
self.editable = False
self.column = self.columns[0] # for default order_by
cls._meta.add_field(self, private=True) # virtual field
cls._meta.setup_pk(self) # acts as pk
if not getattr(cls, self.attname, None):
setattr(cls, self.attname, self)
def delete(inst, using=None, keep_parents=False):
using = using or router.db_for_write(self.model, instance=inst)
signals.pre_delete.send(sender=cls, instance=inst, using=using)
query = cls._default_manager.filter(**self.__get__(inst))
query._raw_delete(using)
for column in self.columns:
setattr(inst, column, None)
signals.post_delete.send(sender=cls, instance=inst, using=using)
cls.delete = delete
def get_prep_value(self, value):
return self.to_python(value)
def to_python(self, value):
if value is None or isinstance(value, CompositeKey.Key):
return value
if isinstance(value, dict):
return value
return CompositeKey.Key(json.loads(value))
def to_json(self, value):
if isinstance(value, datetime.datetime):
result = value.isoformat()
if value.microsecond:
result = result[:23] + result[26:]
if result.endswith("+00:00"):
result = result[:-6] + "Z"
return result
elif isinstance(value, datetime.date):
return value.isoformat()
elif isinstance(value, datetime.time):
if is_aware(value):
raise ValueError("JSON can't represent timezone-aware times.")
result = value.isoformat()
if value.microsecond:
result = result[:12]
return result
elif isinstance(value, datetime.timedelta):
return duration_iso_string(value)
elif isinstance(value, (decimal.Decimal, uuid.UUID, Promise)):
return str(value)
return value
def bulk_related_objects(self, objs, using=DEFAULT_DB_ALIAS):
return []
def __get__(self, instance, cls=None):
if instance is None:
return self
return CompositeKey.Key(
{
column: self.to_json(
self.model._meta.get_field(column).value_from_object(instance)
)
for column in self.columns
}
)
def __set__(self, instance, value):
"""
I hope it's safe to ignore!
"""
pass
@CompositeKey.register_lookup
| CompositeKey |
python | apache__airflow | task-sdk/src/airflow/sdk/execution_time/comms.py | {
"start": 21660,
"end": 21856
} | class ____(HITLDetailRequest):
"""Add the input request part of a Human-in-the-loop response."""
type: Literal["CreateHITLDetailPayload"] = "CreateHITLDetailPayload"
| CreateHITLDetailPayload |
python | pytest-dev__pytest | src/_pytest/threadexception.py | {
"start": 509,
"end": 4953
} | class ____(NamedTuple):
msg: str
cause_msg: str
exc_value: BaseException | None
thread_exceptions: StashKey[collections.deque[ThreadExceptionMeta | BaseException]] = (
StashKey()
)
def collect_thread_exception(config: Config) -> None:
pop_thread_exception = config.stash[thread_exceptions].pop
errors: list[pytest.PytestUnhandledThreadExceptionWarning | RuntimeError] = []
meta = None
hook_error = None
try:
while True:
try:
meta = pop_thread_exception()
except IndexError:
break
if isinstance(meta, BaseException):
hook_error = RuntimeError("Failed to process thread exception")
hook_error.__cause__ = meta
errors.append(hook_error)
continue
msg = meta.msg
try:
warnings.warn(pytest.PytestUnhandledThreadExceptionWarning(msg))
except pytest.PytestUnhandledThreadExceptionWarning as e:
# This except happens when the warning is treated as an error (e.g. `-Werror`).
if meta.exc_value is not None:
# Exceptions have a better way to show the traceback, but
# warnings do not, so hide the traceback from the msg and
# set the cause so the traceback shows up in the right place.
e.args = (meta.cause_msg,)
e.__cause__ = meta.exc_value
errors.append(e)
if len(errors) == 1:
raise errors[0]
if errors:
raise ExceptionGroup("multiple thread exception warnings", errors)
finally:
del errors, meta, hook_error
def cleanup(
*, config: Config, prev_hook: Callable[[threading.ExceptHookArgs], object]
) -> None:
try:
try:
# We don't join threads here, so exceptions raised from any
# threads still running by the time _threading_atexits joins them
# do not get captured (see #13027).
collect_thread_exception(config)
finally:
threading.excepthook = prev_hook
finally:
del config.stash[thread_exceptions]
def thread_exception_hook(
args: threading.ExceptHookArgs,
/,
*,
append: Callable[[ThreadExceptionMeta | BaseException], object],
) -> None:
try:
# we need to compute these strings here as they might change after
# the excepthook finishes and before the metadata object is
# collected by a pytest hook
thread_name = "<unknown>" if args.thread is None else args.thread.name
summary = f"Exception in thread {thread_name}"
traceback_message = "\n\n" + "".join(
traceback.format_exception(
args.exc_type,
args.exc_value,
args.exc_traceback,
)
)
tracemalloc_tb = "\n" + tracemalloc_message(args.thread)
msg = summary + traceback_message + tracemalloc_tb
cause_msg = summary + tracemalloc_tb
append(
ThreadExceptionMeta(
# Compute these strings here as they might change later
msg=msg,
cause_msg=cause_msg,
exc_value=args.exc_value,
)
)
except BaseException as e:
append(e)
# Raising this will cause the exception to be logged twice, once in our
# collect_thread_exception and once by sys.excepthook
# which is fine - this should never happen anyway and if it does
# it should probably be reported as a pytest bug.
raise
def pytest_configure(config: Config) -> None:
prev_hook = threading.excepthook
deque: collections.deque[ThreadExceptionMeta | BaseException] = collections.deque()
config.stash[thread_exceptions] = deque
config.add_cleanup(functools.partial(cleanup, config=config, prev_hook=prev_hook))
threading.excepthook = functools.partial(thread_exception_hook, append=deque.append)
@pytest.hookimpl(trylast=True)
def pytest_runtest_setup(item: Item) -> None:
collect_thread_exception(item.config)
@pytest.hookimpl(trylast=True)
def pytest_runtest_call(item: Item) -> None:
collect_thread_exception(item.config)
@pytest.hookimpl(trylast=True)
def pytest_runtest_teardown(item: Item) -> None:
collect_thread_exception(item.config)
| ThreadExceptionMeta |
python | pytorch__pytorch | torch/testing/_internal/common_fsdp.py | {
"start": 16437,
"end": 17745
} | class ____(NestedWrappedModule):
@staticmethod
def init(
group: dist.ProcessGroup,
fsdp_init_mode: FSDPInitMode,
device_init_mode: DEVICEInitMode,
fsdp_kwargs: Optional[dict[str, Any]] = None,
deterministic: bool = False,
):
"""
Initializes a :class:`NestedWrappedModule` instance, but unlike
:meth:`NestedWrappedModule.init`, for the ``RECURSIVE`` init mode, this
wraps with top-level FSDP and the ``always_wrap_policy()`` auto wrap
policy.
"""
model = super(
AlwaysWrapNestedWrappedModule, AlwaysWrapNestedWrappedModule
).init(
group=group,
fsdp_init_mode=FSDPInitMode.NO_FSDP,
device_init_mode=device_init_mode,
fsdp_kwargs=fsdp_kwargs,
deterministic=deterministic,
)
if fsdp_init_mode == FSDPInitMode.NO_FSDP:
return model
elif fsdp_init_mode == FSDPInitMode.RECURSIVE:
fsdp_kwargs = fsdp_kwargs or {}
fsdp_model = FSDP(model, auto_wrap_policy=always_wrap_policy, **fsdp_kwargs)
if device_init_mode == DEVICEInitMode.DEVICE_AFTER:
fsdp_model = fsdp_model.to(DEVICE_TYPE)
return fsdp_model
| AlwaysWrapNestedWrappedModule |
python | miyuchina__mistletoe | mistletoe/contrib/scheme.py | {
"start": 212,
"end": 377
} | class ____(block_token.BlockToken):
def __init__(self, lines):
self.children = span_token.tokenize_inner(''.join([line.strip() for line in lines]))
| Program |
python | astropy__astropy | astropy/cosmology/_src/tests/io/test_mapping.py | {
"start": 9062,
"end": 9427
} | class ____(ToFromDirectTestBase, ToFromMappingTestMixin):
"""Directly test ``to/from_mapping``."""
def setup_class(self):
self.functions = {"to": to_mapping, "from": from_mapping}
@pytest.mark.skip("N/A")
def test_fromformat_subclass_partial_info_mapping(self):
"""This test does not apply to the direct functions."""
| TestToFromMapping |
python | getsentry__sentry | src/sentry/search/events/types.py | {
"start": 1128,
"end": 1932
} | class ____(TypedDict, total=False):
project_id: Sequence[int]
projects: list[Project]
project_objects: list[Project]
start: datetime
end: datetime
environment: NotRequired[str | list[str]]
organization_id: NotRequired[int | None]
use_case_id: NotRequired[str]
team_id: NotRequired[list[int]]
environment_objects: NotRequired[list[Environment]]
statsPeriod: NotRequired[str]
SelectType = Union[AliasedExpression, Column, Function, CurriedFunction]
NormalizedArg = Optional[Union[str, float]]
HistogramParams = namedtuple(
"HistogramParams", ["num_buckets", "bucket_size", "start_offset", "multiplier"]
)
# converter is to convert the aggregate filter to snuba query
Alias = namedtuple("Alias", "converter aggregate resolved_function")
@dataclass
| ParamsType |
python | pypa__warehouse | tests/unit/utils/test_paginate.py | {
"start": 147,
"end": 235
} | class ____:
def __init__(self, options):
self.options = options
| FakeSuggestion |
python | python-attrs__attrs | src/attr/_make.py | {
"start": 101670,
"end": 104141
} | class ____:
"""
Compose many validators to a single one.
"""
_validators = attrib()
def __call__(self, inst, attr, value):
for v in self._validators:
v(inst, attr, value)
def and_(*validators):
"""
A validator that composes multiple validators into one.
When called on a value, it runs all wrapped validators.
Args:
validators (~collections.abc.Iterable[typing.Callable]):
Arbitrary number of validators.
.. versionadded:: 17.1.0
"""
vals = []
for validator in validators:
vals.extend(
validator._validators
if isinstance(validator, _AndValidator)
else [validator]
)
return _AndValidator(tuple(vals))
def pipe(*converters):
"""
A converter that composes multiple converters into one.
When called on a value, it runs all wrapped converters, returning the
*last* value.
Type annotations will be inferred from the wrapped converters', if they
have any.
converters (~collections.abc.Iterable[typing.Callable]):
Arbitrary number of converters.
.. versionadded:: 20.1.0
"""
return_instance = any(isinstance(c, Converter) for c in converters)
if return_instance:
def pipe_converter(val, inst, field):
for c in converters:
val = (
c(val, inst, field) if isinstance(c, Converter) else c(val)
)
return val
else:
def pipe_converter(val):
for c in converters:
val = c(val)
return val
if not converters:
# If the converter list is empty, pipe_converter is the identity.
A = TypeVar("A")
pipe_converter.__annotations__.update({"val": A, "return": A})
else:
# Get parameter type from first converter.
t = _AnnotationExtractor(converters[0]).get_first_param_type()
if t:
pipe_converter.__annotations__["val"] = t
last = converters[-1]
if not PY_3_11_PLUS and isinstance(last, Converter):
last = last.__call__
# Get return type from last converter.
rt = _AnnotationExtractor(last).get_return_type()
if rt:
pipe_converter.__annotations__["return"] = rt
if return_instance:
return Converter(pipe_converter, takes_self=True, takes_field=True)
return pipe_converter
| _AndValidator |
python | huggingface__transformers | tests/models/rembert/test_tokenization_rembert.py | {
"start": 1095,
"end": 2994
} | class ____(TokenizerTesterMixin, unittest.TestCase):
from_pretrained_id = "google/rembert"
tokenizer_class = RemBertTokenizer
pre_trained_model_path = "google/rembert"
integration_expected_tokens = ['▁This', '▁is', '▁a', '▁test', '▁', '😊', '▁I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fals', 'é', '.', '▁', '生活', '的', '真', '谛', '是', '▁Hi', '▁Hello', '▁Hi', '▁Hello', '▁', '▁', '▁', '▁', '▁', '▁', '▁Hello', '▁', '<s>', '▁hi', '<s>', 'there', '▁The', '▁following', '▁string', '▁should', '▁be', '▁properly', '▁en', 'coded', ':', '▁Hello', '.', '▁But', '▁ir', 'd', '▁and', '▁ปี', '▁ir', 'd', '▁ด', '▁Hey', '▁how', '▁are', '▁you', '▁doing'] # fmt: skip
integration_expected_token_ids = [1357, 619, 577, 3515, 573, 119091, 623, 820, 18648, 586, 940, 7905, 571, 599, 902, 619, 98696, 780, 572, 573, 6334, 649, 3975, 244511, 1034, 3211, 24624, 3211, 24624, 573, 573, 573, 573, 573, 573, 24624, 573, 3, 1785, 3, 90608, 660, 6802, 15930, 2575, 689, 43272, 592, 185434, 581, 24624, 572, 2878, 1032, 620, 599, 9070, 1032, 620, 60827, 20490, 1865, 781, 734, 9711] # fmt: skip
expected_tokens_from_ids = ['▁This', '▁is', '▁a', '▁test', '▁', '😊', '▁I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fals', 'é', '.', '▁', '生活', '的', '真', '谛', '是', '▁Hi', '▁Hello', '▁Hi', '▁Hello', '▁', '▁', '▁', '▁', '▁', '▁', '▁Hello', '▁', '<s>', '▁hi', '<s>', 'there', '▁The', '▁following', '▁string', '▁should', '▁be', '▁properly', '▁en', 'coded', ':', '▁Hello', '.', '▁But', '▁ir', 'd', '▁and', '▁ปี', '▁ir', 'd', '▁ด', '▁Hey', '▁how', '▁are', '▁you', '▁doing'] # fmt: skip
integration_expected_decoded_text = "This is a test 😊 I was born in 92000, and this is falsé. 生活的真谛是 Hi Hello Hi Hello Hello <s> hi<s>there The following string should be properly encoded: Hello. But ird and ปี ird ด Hey how are you doing"
| RemBertTokenizationTest |
python | fastapi__sqlmodel | sqlmodel/sql/_expression_select_cls.py | {
"start": 1121,
"end": 1483
} | class ____(SelectBase[_T]):
inherit_cache = True
# This is not comparable to sqlalchemy.sql.selectable.ScalarSelect, that has a different
# purpose. This is the same as a normal SQLAlchemy Select class where there's only one
# entity, so the result will be converted to a scalar by default. This way writing
# for loops on the results will feel natural.
| Select |
python | huggingface__transformers | tests/quantization/eetq_integration/test_eetq.py | {
"start": 2253,
"end": 6553
} | class ____(unittest.TestCase):
model_name = "facebook/opt-350m"
input_text = "What are we having for dinner?"
max_new_tokens = 9
EXPECTED_OUTPUT = "What are we having for dinner?\nI'm having a steak and a salad"
device_map = "cuda"
# called only once for all test in this class
@classmethod
def setUpClass(cls):
"""
Setup quantized model
"""
quantization_config = EetqConfig(weights="int8")
cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name)
cls.quantized_model = AutoModelForCausalLM.from_pretrained(
cls.model_name, device_map=cls.device_map, quantization_config=quantization_config
)
def tearDown(self):
gc.collect()
backend_empty_cache(torch_device)
gc.collect()
def test_quantized_model_conversion(self):
"""
Simple test that checks if the quantized model has been converted properly
"""
from eetq import EetqLinear
from transformers.integrations import replace_with_eetq_linear
model_id = "facebook/opt-350m"
config = AutoConfig.from_pretrained(model_id, revision="cb32f77e905cccbca1d970436fb0f5e6b58ee3c5")
quantization_config = EetqConfig(weights="int8")
with init_empty_weights():
model = OPTForCausalLM(config)
nb_linears = 0
for module in model.modules():
if isinstance(module, torch.nn.Linear):
nb_linears += 1
model = replace_with_eetq_linear(model, quantization_config=quantization_config)
nb_eetq_linear = 0
for module in model.modules():
if isinstance(module, EetqLinear):
nb_eetq_linear += 1
self.assertEqual(nb_linears - 1, nb_eetq_linear)
# Try with `modules_to_not_convert`
with init_empty_weights():
model = OPTForCausalLM(config)
quantization_config = EetqConfig(modules_to_not_convert=["fc1"])
model = replace_with_eetq_linear(model, quantization_config=quantization_config)
nb_eetq_linear = 0
for module in model.modules():
if isinstance(module, EetqLinear):
nb_eetq_linear += 1
# 25 corresponds to the lm_head along with 24 fc1 layers.
self.assertEqual(nb_linears - 25, nb_eetq_linear)
def test_quantized_model(self):
"""
Simple test that checks if the quantized model is working properly
"""
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
output = self.quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
def test_save_pretrained(self):
"""
Simple test that checks if the quantized model is working properly after being saved and loaded
"""
with tempfile.TemporaryDirectory() as tmpdirname:
self.quantized_model.save_pretrained(tmpdirname)
model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=self.device_map)
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
output = model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
@require_torch_multi_gpu
def test_quantized_model_multi_gpu(self):
"""
Simple test that checks if the quantized model is working properly with multiple GPUs
set CUDA_VISIBLE_DEVICES=0,1 if you have more than 2 GPUs
"""
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
quantization_config = EetqConfig()
quantized_model = AutoModelForCausalLM.from_pretrained(
self.model_name, device_map="auto", quantization_config=quantization_config
)
self.assertTrue(set(quantized_model.hf_device_map.values()) == {0, 1})
output = quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
| EetqTest |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-moorcheh/llama_index/vector_stores/moorcheh/base.py | {
"start": 864,
"end": 16786
} | class ____(BasePydanticVectorStore):
"""
Moorcheh Vector Store.
In this vector store, embeddings and docs are stored within a Moorcheh namespace.
During query time, the index uses Moorcheh to query for the top k most similar nodes.
Args:
api_key (Optional[str]): API key for Moorcheh.
If not provided, will look for MOORCHEH_API_KEY environment variable.
namespace (str): Namespace name to use for this vector store.
namespace_type (str): Type of namespace - "text" or "vector".
vector_dimension (Optional[int]): Vector dimension for vector namespace.
batch_size (int): Batch size for adding nodes. Defaults to DEFAULT_EMBED_BATCH_SIZE.
**kwargs: Additional arguments to pass to MoorchehClient.
"""
# Default values and capabilities
DEFAULT_NAMESPACE: ClassVar[str] = "llamaindex_default"
DEFAULT_EMBED_BATCH_SIZE: ClassVar[int] = 64 # customize as needed
stores_text: bool = True
flat_metadata: bool = True
api_key: Optional[str]
namespace: Optional[str]
namespace_type: Optional[Literal["text", "vector"]] = None
vector_dimension: Optional[int]
add_sparse_vector: Optional[bool]
ai_model: Optional[str]
batch_size: int
sparse_embedding_model: Optional[BaseSparseEmbedding] = None
def __init__(
self,
api_key: Optional[str] = None,
namespace: Optional[str] = None,
namespace_type: Optional[str] = "text",
vector_dimension: Optional[int] = None,
add_sparse_vector: Optional[bool] = False,
tokenizer: Optional[Callable] = None,
ai_model: Optional[str] = "anthropic.claude-3-7-sonnet-20250219-v1:0",
batch_size: int = 64,
sparse_embedding_model: Optional[BaseSparseEmbedding] = None,
) -> None:
# Initialize store attributes
if add_sparse_vector:
if sparse_embedding_model is not None:
sparse_embedding_model = sparse_embedding_model
elif tokenizer is not None:
sparse_embedding_model = DefaultMoorchehSparseEmbedding(
tokenizer=tokenizer
)
else:
sparse_embedding_model = DefaultMoorchehSparseEmbedding()
else:
sparse_embedding_model = None
super().__init__(
api_key=api_key,
namespace=namespace,
namespace_type=namespace_type,
vector_dimension=vector_dimension,
add_sparse_vector=add_sparse_vector,
batch_size=batch_size,
sparse_embedding_model=sparse_embedding_model,
ai_model=ai_model,
)
# Fallback to env var if API key not provided
if not self.api_key:
self.api_key = os.getenv("MOORCHEH_API_KEY")
if not self.api_key:
raise ValueError("`api_key` is required for Moorcheh client initialization")
if not self.namespace:
raise ValueError(
"`namespace` is required for Moorcheh client initialization"
)
# Initialize Moorcheh client
logger.debug("Initializing MoorchehClient")
self._client = MoorchehClient(api_key=self.api_key)
self.is_embedding_query = False
self._sparse_embedding_model = sparse_embedding_model
self.namespace = namespace
logger.debug("Listing namespaces...")
try:
namespaces_response = self._client.list_namespaces()
namespaces = [
namespace["namespace_name"]
for namespace in namespaces_response.get("namespaces", [])
]
logger.debug("Found namespaces.")
except Exception as e:
logger.debug("Failed to list namespaces: {e}")
raise
# Check if the namespace exists
if self.namespace in namespaces:
logger.debug(
"Namespace '{self.namespace}' already exists. No action required."
)
else:
logger.debug("Namespace '{self.namespace}' not found. Creating it.")
# If the namespace doesn't exist, create it
try:
self._client.create_namespace(
namespace_name=self.namespace,
type=self.namespace_type,
vector_dimension=self.vector_dimension,
)
logger.debug("Namespace '{self.namespace}' created.")
except Exception as e:
logger.debug("Failed to create namespace: {e}")
raise
# _client: MoorchehClient = PrivateAttr()
@property
def client(self) -> MoorchehClient:
"""Return initialized Moorcheh client."""
return self._client
@classmethod
def class_name(cls) -> str:
"""Return class name."""
return "MoorchehVectorStore"
def add(
self,
nodes: List[BaseNode],
**add_kwargs: Any,
) -> List[str]:
"""Add nodes to Moorcheh."""
if not nodes:
return []
if self.namespace_type == "text":
return self._add_text_nodes(nodes, **add_kwargs)
else:
return self._add_vector_nodes(nodes, **add_kwargs)
def _add_text_nodes(self, nodes: List[BaseNode], **kwargs: Any) -> List[str]:
"""Add text documents to a text namespace."""
documents = []
ids = []
sparse_inputs = []
for node in nodes:
node_id = node.node_id or str(uuid.uuid4())
ids.append(node_id)
document = {
"id": node_id,
"text": node.get_content(metadata_mode=MetadataMode.NONE),
}
# Add metadata if present
if node.metadata:
document["metadata"] = node.metadata
if self.add_sparse_vector and self._sparse_embedding_model is not None:
sparse_inputs.append(node.get_content(metadata_mode=MetadataMode.EMBED))
documents.append(document)
if sparse_inputs:
sparse_vectors = self._sparse_embedding_model.get_text_embedding_batch(
sparse_inputs
)
for i, sparse_vector in enumerate(sparse_vectors):
documents[i][SPARSE_VECTOR_KEY] = {
"indices": list(sparse_vector.keys()),
"values": list(sparse_vector.values()),
}
# Process in batches
for i in range(0, len(documents), self.batch_size):
batch = documents[i : i + self.batch_size]
try:
result = self._client.upload_documents(
namespace_name=self.namespace, documents=batch
)
logger.debug(f"Uploaded batch of {len(batch)} documents")
except MoorchehError as e:
logger.error(f"Error uploading documents batch: {e}")
raise
logger.info(
f"Added {len(documents)} text documents to namespace {self.namespace}"
)
return ids
def _add_vector_nodes(self, nodes: List[BaseNode], **kwargs: Any) -> List[str]:
"""Add vector nodes to vector namespace."""
vectors = []
ids = []
sparse_inputs = []
if all(node.embedding is None for node in nodes):
raise ValueError("No embeddings could be found within your nodes")
for node in nodes:
if node.embedding is None:
warnings.warn(
f"Node {node.node_id} has no embedding for vector namespace",
UserWarning,
)
node_id = node.node_id or str(uuid.uuid4())
ids.append(node_id)
vector = {
"id": node_id,
"vector": node.embedding,
}
# Add metadata, including text content
metadata = dict(node.metadata) if node.metadata else {}
metadata["text"] = metadata.pop(
"text", node.get_content(metadata_mode=MetadataMode.NONE)
)
vector["metadata"] = metadata
if self.add_sparse_vector and self._sparse_embedding_model is not None:
sparse_inputs.append(node.get_content(metadata_mode=MetadataMode.EMBED))
vectors.append(vector)
if sparse_inputs:
sparse_vectors = self._sparse_embedding_model.get_text_embedding_batch(
sparse_inputs
)
for i, sparse_vector in enumerate(sparse_vectors):
documents[i][SPARSE_VECTOR_KEY] = {
"indices": list(sparse_vector.keys()),
"values": list(sparse_vector.values()),
}
# Process in batches
for i in range(0, len(vectors), self.batch_size):
batch = vectors[i : i + self.batch_size]
try:
result = self._client.upload_vectors(
namespace_name=self.namespace, vectors=batch
)
logger.debug(f"Uploaded batch of {len(batch)} vectors")
except MoorchehError as e:
logger.error(f"Error uploading vectors batch: {e}")
raise
logger.info(f"Added {len(vectors)} vectors to namespace {self.namespace}")
return ids
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Delete nodes using with ref_doc_id.
Args:
ref_doc_id (str): The doc_id of the document to delete.
"""
try:
if self.namespace_type == "text":
result = self._client.delete_documents(
namespace_name=self.namespace, ids=[ref_doc_id]
)
else:
result = self._client.delete_vectors(
namespace_name=self.namespace, ids=[ref_doc_id]
)
logger.info(
f"Deleted document {ref_doc_id} from namespace {self.namespace}"
)
except MoorchehError as e:
logger.error(f"Error deleting document {ref_doc_id}: {e}")
raise
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
"""
Query Moorcheh vector store.
Args:
query (VectorStoreQuery): query object
Returns:
VectorStoreQueryResult: query result
"""
moorcheh_sparse_vector = None
if (
query.mode in (VectorStoreQueryMode.SPARSE, VectorStoreQueryMode.HYBRID)
and self._sparse_embedding_model is not None
):
if query.query_str is None:
raise ValueError(
"query_str must be specified if mode is SPARSE or HYBRID."
)
sparse_vector = self._sparse_embedding_model.get_query_embedding(
query.query_str
)
if query.alpha is not None:
moorcheh_sparse_vector = {
"indices": list(sparse_vector.keys()),
"values": [v * (1 - query.alpha) for v in sparse_vector.values()],
}
else:
moorcheh_sparse_vector = {
"indices": list(sparse_vector.keys()),
"values": list(sparse_vector.values()),
}
"""
if query.mode != VectorStoreQueryMode.DEFAULT:
logger.warning(
f"Moorcheh does not support query mode {query.mode}. "
"Using default mode instead."
)
"""
# Prepare search parameters
search_kwargs = {
"namespaces": [self.namespace],
"top_k": query.similarity_top_k,
}
# Add similarity threshold if provided
# if query.similarity_top_k is not None:
# search_kwargs["threshold"] = query.similarity_top_k
# Handle query input
if query.query_str is not None:
search_kwargs["query"] = query.query_str
elif query.query_embedding is not None:
search_kwargs["query"] = query.query_embedding
else:
raise ValueError("Either query_str or query_embedding must be provided")
# TODO: Add metadata filter support when available in Moorcheh SDK
if query.filters is not None:
logger.warning(
"Metadata filters are not yet supported by Moorcheh integration"
)
try:
# Execute search
search_result = self._client.search(**search_kwargs)
# Parse results
nodes = []
similarities = []
ids = []
results = search_result.get("results", [])
for result in results:
node_id = result.get("id")
score = result.get("score", 0.0)
if node_id is None:
logger.warning("Found result with no ID, skipping")
continue
ids.append(node_id)
similarities.append(score)
# Extract text and metadata
if self.namespace_type == "text":
text = result.get("text", "")
metadata = result.get("metadata", {})
else:
# For vector namespace, text is stored in metadata
metadata = result.get("metadata", {})
text = metadata.pop("text", "") # Remove text from metadata
# Create node
node = TextNode(
text=text,
id_=node_id,
metadata=metadata,
)
nodes.append(node)
return VectorStoreQueryResult(
nodes=nodes,
similarities=similarities,
ids=ids,
)
except MoorchehError as e:
logger.error(f"Error executing query: {e}")
raise
def get_generative_answer(
self,
query: str,
top_k: int = 5,
ai_model: str = "anthropic.claude-3-7-sonnet-20250219-v1:0",
llm: Optional[LLM] = None,
**kwargs: Any,
) -> str:
"""
Get a generative AI answer using Moorcheh's built-in RAG capability.
This method leverages Moorcheh's information-theoretic approach
to provide context-aware answers directly from the API.
Args:
query (str): The query string.
top_k (int): Number of top results to use for context.
**kwargs: Additional keyword arguments passed to Moorcheh.
Returns:
str: Generated answer string.
"""
try:
# incorporate llama_index llms
if llm:
vs_query = VectorStoreQuery(query_str=query, similarity_top_k=top_k)
result = self.query(vs_query)
context = "\n\n".join([node.text for node in result.nodes])
prompt = f"""Use the context below to answer the question. Context: {context} Question: {query} Answer:"""
return llm.complete(prompt).text
else:
result = self._client.get_generative_answer(
namespace=self.namespace,
query=query,
top_k=top_k,
ai_model=ai_model,
**kwargs,
)
return result.get("answer", "")
except MoorchehError as e:
logger.error(f"Error getting generative answer: {e}")
raise
if __name__ == "__main__":
print("MoorchehVectorStore loaded successfully.")
| MoorchehVectorStore |
python | scipy__scipy | scipy/optimize/tests/test_optimize.py | {
"start": 39442,
"end": 39545
} | class ____(CheckOptimizeParameterized):
use_wrapper = True
disp = False
| TestOptimizeWrapperNoDisp |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyupgrade/UP039.py | {
"start": 256,
"end": 286
} | class ____:
pass
@decorator
| A |
python | huggingface__transformers | src/transformers/modeling_outputs.py | {
"start": 69799,
"end": 71373
} | class ____(ModelOutput):
"""
Base class for outputs of token classification models.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided) :
Classification loss.
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`):
Classification scores (before SoftMax).
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass
| TokenClassifierOutput |
python | coleifer__peewee | tests/fields.py | {
"start": 5024,
"end": 5163
} | class ____(TestModel):
date_time = DateTimeField(formats=[
'%m/%d/%Y %I:%M %p',
'%Y-%m-%d %H:%M:%S'])
| CustomDateTimeModel |
python | pandas-dev__pandas | asv_bench/benchmarks/reindex.py | {
"start": 133,
"end": 1731
} | class ____:
def setup(self):
rng = date_range(start="1/1/1970", periods=10000, freq="1min")
self.df = DataFrame(np.random.rand(10000, 10), index=rng, columns=range(10))
self.df["foo"] = "bar"
self.rng_subset = Index(rng[::2])
self.df2 = DataFrame(
index=range(10000), data=np.random.rand(10000, 30), columns=range(30)
)
N = 5000
K = 200
level1 = Index([f"i-{i}" for i in range(N)], dtype=object).values.repeat(K)
level2 = np.tile(Index([f"i-{i}" for i in range(K)], dtype=object).values, N)
index = MultiIndex.from_arrays([level1, level2])
self.s = Series(np.random.randn(N * K), index=index)
self.s_subset = self.s[::2]
self.s_subset_no_cache = self.s[::2].copy()
mi = MultiIndex.from_product([rng, range(100)])
self.s2 = Series(np.random.randn(len(mi)), index=mi)
self.s2_subset = self.s2[::2].copy()
def time_reindex_dates(self):
self.df.reindex(self.rng_subset)
def time_reindex_columns(self):
self.df2.reindex(columns=self.df.columns[1:5])
def time_reindex_multiindex_with_cache(self):
# MultiIndex._values gets cached
self.s.reindex(self.s_subset.index)
def time_reindex_multiindex_no_cache(self):
# Copy to avoid MultiIndex._values getting cached
self.s.reindex(self.s_subset_no_cache.index.copy())
def time_reindex_multiindex_no_cache_dates(self):
# Copy to avoid MultiIndex._values getting cached
self.s2_subset.reindex(self.s2.index.copy())
| Reindex |
python | dagster-io__dagster | python_modules/libraries/dagster-gcp/dagster_gcp/pipes/message_readers.py | {
"start": 2388,
"end": 5181
} | class ____(PipesBlobStoreMessageReader):
"""Message reader that reads messages by periodically reading message chunks from a specified GCS
bucket.
If `log_readers` is passed, this reader will also start the passed readers
when the first message is received from the external process.
Args:
interval (float): interval in seconds between attempts to download a chunk
bucket (str): The GCS bucket to read from.
client (Optional[cloud.google.storage.Client]): The GCS client to use.
log_readers (Optional[Sequence[PipesLogReader]]): A set of log readers for logs on GCS.
include_stdio_in_messages (bool): Whether to send stdout/stderr to Dagster via Pipes messages. Defaults to False.
"""
def __init__(
self,
*,
interval: float = 10,
bucket: str,
client: Optional[GCSClient] = None,
log_readers: Optional[Sequence[PipesLogReader]] = None,
include_stdio_in_messages: bool = False,
):
super().__init__(
interval=interval,
log_readers=log_readers,
)
self.bucket = check.str_param(bucket, "bucket")
self.include_stdio_in_messages = check.bool_param(
include_stdio_in_messages, "include_stdio_in_messages"
)
self.client = client or GCSClient()
@contextmanager
def get_params(self) -> Iterator[PipesParams]:
key_prefix = "".join(random.choices(string.ascii_letters, k=30))
yield {
"bucket": self.bucket,
"key_prefix": key_prefix,
PipesBlobStoreMessageWriter.INCLUDE_STDIO_IN_MESSAGES_KEY: self.include_stdio_in_messages,
}
def messages_are_readable(self, params: PipesParams) -> bool:
key_prefix = params.get("key_prefix")
if key_prefix is not None:
try:
# just call head object on f"{key_prefix}/1.json" (no need to download it)
self.client.get_bucket(self.bucket).blob(f"{key_prefix}/1.json").exists()
return True
except Exception:
return False
else:
return False
def download_messages_chunk(self, index: int, params: PipesParams) -> Optional[str]:
key = f"{params['key_prefix']}/{index}.json"
try:
obj = self.client.get_bucket(self.bucket).blob(key).download_as_bytes()
return obj.decode("utf-8")
except Exception:
return None
def no_messages_debug_text(self) -> str:
return (
f"Attempted to read messages from GCS bucket {self.bucket}. Expected"
" PipesGCSMessageWriter to be explicitly passed to open_dagster_pipes in the external"
" process."
)
| PipesGCSMessageReader |
python | getsentry__sentry | tests/sentry/incidents/test_logic.py | {
"start": 14220,
"end": 14866
} | class ____(TestCase, BaseIncidentsTest):
def test_no_snapshot(self) -> None:
incident = self.create_incident()
activity = create_incident_activity(
incident,
IncidentActivityType.STATUS_CHANGE,
value=str(IncidentStatus.CLOSED.value),
previous_value=str(IncidentStatus.WARNING.value),
)
assert activity.incident == incident
assert activity.type == IncidentActivityType.STATUS_CHANGE.value
assert activity.value == str(IncidentStatus.CLOSED.value)
assert activity.previous_value == str(IncidentStatus.WARNING.value)
| CreateIncidentActivityTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/annotations1.py | {
"start": 1640,
"end": 1680
} | class ____:
uuid = uuid.uuid4()
| ClassG |
python | patrick-kidger__equinox | equinox/debug/_backward_nan.py | {
"start": 1333,
"end": 1997
} | class ____(Module):
obj: Any
def __repr__(self):
return tree_pformat(self.obj, short_arrays=False)
@_backward_nan.def_bwd
def _backward_nan_bwd(residuals, grad_x, perturbed, x, name, terminate):
del residuals, perturbed
msg = " primals={x}\ncotangents={grad_x}"
if name is not None:
msg = f"{name}:\n" + msg
jax.debug.print(msg, x=_LongRepr(x), grad_x=_LongRepr(grad_x), ordered=True)
if terminate:
nans = [
jnp.isnan(a).any() for a in jtu.tree_leaves(filter(grad_x, is_array_like))
]
grad_x = error_if(grad_x, jnp.any(jnp.stack(nans)), "Encountered NaN")
return grad_x
| _LongRepr |
python | wandb__wandb | wandb/automations/_filters/operators.py | {
"start": 1276,
"end": 2075
} | class ____:
def __or__(self, other: Any) -> Or:
"""Implements default `|` behavior: `a | b -> Or(a, b)`."""
return Or(exprs=(self, other))
def __and__(self, other: Any) -> And:
"""Implements default `&` behavior: `a & b -> And(a, b)`."""
from .expressions import FilterExpr
if isinstance(other, (BaseOp, FilterExpr)):
return And(exprs=(self, other))
return NotImplemented
def __invert__(self) -> Not:
"""Implements default `~` behavior: `~a -> Not(a)`."""
return Not(expr=self)
# Base type for parsing MongoDB filter operators, e.g. from dicts like
# `{"$and": [...]}`, `{"$or": [...]}`, `{"$gt": 1.0}`, etc.
# Instances are frozen for easier comparison and more predictable behavior.
| SupportsBitwiseLogicalOps |
python | google__pytype | pytype/tests/test_pyi2.py | {
"start": 7761,
"end": 8163
} | class ____(test_base.BaseTest):
"""Tests for __future__."""
def test_skip_reexport(self):
"""Check that we don't reexport __future__ imports."""
ty = self.Infer("""
from __future__ import annotations
class A:
pass
""")
self.assertTypesMatchPytd(
ty,
"""
class A: ...
""",
)
if __name__ == "__main__":
test_base.main()
| PYITestFuture |
python | dagster-io__dagster | python_modules/libraries/dagster-pandas/dagster_pandas/constraints.py | {
"start": 37004,
"end": 37825
} | class ____(Constraint):
"""Base constraint object that represent dataframe column shape constraints.
Args:
error_description (Optional[str]): The plain string description that is output in the terminal if the constraint fails.
markdown_description (Optional[str]): A markdown supported description that is shown in the Dagster UI if the constraint fails.
"""
def __init__(self, error_description=None, markdown_description=None):
super().__init__(
error_description=error_description, markdown_description=markdown_description
)
def validate(self, dataframe, column_name):
pass
@staticmethod
def get_offending_row_pairs(dataframe, column_name):
return zip(dataframe.index.tolist(), dataframe[column_name].tolist())
| ColumnConstraint |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/methodOverride6.py | {
"start": 1225,
"end": 1603
} | class ____(Parent1[str]):
@overload
def m1(self, x: Literal[True]) -> int: ...
@overload
def m1(self, x: Literal[False]) -> float: ...
@overload
def m1(self, x: bytes) -> bytes: ...
# This should generate an error because the overloads are
# in the wrong order.
def m1(self, x: bool | bytes) -> int | float | bytes:
return x
| Child1_5 |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/errors.py | {
"start": 1506,
"end": 1643
} | class ____(DagsterError):
"""Indicates that the rules for a definition have been violated by the user."""
| DagsterInvalidDefinitionError |
python | squidfunk__mkdocs-material | material/plugins/privacy/config.py | {
"start": 1736,
"end": 3227
} | class ____(Config):
enabled = Type(bool, default = True)
concurrency = Type(int, default = max(1, os.cpu_count() - 1))
# Settings for caching
cache = Type(bool, default = True)
cache_dir = Type(str, default = ".cache/plugin/privacy")
# Settings for logging
log = Type(bool, default = True)
log_level = Choice(LogLevel, default = "info")
# Settings for external assets
assets = Type(bool, default = True)
assets_fetch = Type(bool, default = True)
assets_fetch_dir = Type(str, default = "assets/external")
assets_include = ListOfItems(Type(str), default = [])
assets_exclude = ListOfItems(Type(str), default = [])
assets_expr_map = DictOfItems(Type(str), default = {})
# Settings for external links
links = Type(bool, default = True)
links_attr_map = DictOfItems(Type(str), default = {})
links_noopener = Type(bool, default = True)
# Deprecated settings
external_assets = Deprecated(message = "Deprecated, use 'assets_fetch'")
external_assets_dir = Deprecated(moved_to = "assets_fetch_dir")
external_assets_include = Deprecated(moved_to = "assets_include")
external_assets_exclude = Deprecated(moved_to = "assets_exclude")
external_assets_expr = Deprecated(moved_to = "assets_expr_map")
external_links = Deprecated(moved_to = "links")
external_links_attr_map = Deprecated(moved_to = "links_attr_map")
external_links_noopener = Deprecated(moved_to = "links_noopener")
| PrivacyConfig |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/check_ops_test.py | {
"start": 30407,
"end": 33427
} | class ____(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_equal(self):
small = constant_op.constant([1, 2], name="small")
with ops.control_dependencies(
[check_ops.assert_greater_equal(small, small)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
@test_util.run_deprecated_v1
def test_raises_when_less(self):
small = constant_op.constant([1, 2], name="small")
big = constant_op.constant([3, 4], name="big")
with self.assertRaisesOpError( # pylint:disable=g-error-prone-assert-raises
"fail"):
with ops.control_dependencies(
[check_ops.assert_greater_equal(
small, big, message="fail")]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_greater_equal(self):
small = constant_op.constant([1, 2], name="small")
big = constant_op.constant([3, 2], name="big")
with ops.control_dependencies(
[check_ops.assert_greater_equal(big, small)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_greater_equal_and_broadcastable_shapes(self):
small = constant_op.constant([1], name="small")
big = constant_op.constant([3, 1], name="big")
with ops.control_dependencies(
[check_ops.assert_greater_equal(big, small)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_raises_when_less_equal_but_non_broadcastable_shapes(self):
small = constant_op.constant([1, 1, 1], name="big")
big = constant_op.constant([3, 1], name="small")
# The exception in eager and non-eager mode is different because
# eager mode relies on shape check done as part of the C++ op, while
# graph mode does shape checks when creating the `Operation` instance.
with self.assertRaisesRegex( # pylint:disable=g-error-prone-assert-raises
(errors.InvalidArgumentError, ValueError),
(r"Incompatible shapes: \[2\] vs. \[3\]|"
r"Dimensions must be equal, but are 2 and 3")):
with ops.control_dependencies(
[check_ops.assert_greater_equal(big, small)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_both_empty(self):
larry = constant_op.constant([])
curly = constant_op.constant([])
with ops.control_dependencies(
[check_ops.assert_greater_equal(larry, curly)]):
out = array_ops.identity(larry)
self.evaluate(out)
def test_static_check_in_graph_mode(self):
with ops.Graph().as_default():
with self.assertRaisesRegex( # pylint:disable=g-error-prone-assert-raises
errors.InvalidArgumentError, "Custom error message"):
check_ops.assert_greater_equal(0, 1, message="Custom error message")
| AssertGreaterEqualTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.