language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | PyCQA__pylint | tests/functional/s/super/super_checks.py | {
"start": 239,
"end": 423
} | class ____:
"""old style"""
def hop(self):
"""hop"""
super(Aaaa, self).hop() # >=3.0:[no-member]
def __init__(self):
super(Aaaa, self).__init__()
| Aaaa |
python | django__django | tests/admin_changelist/admin.py | {
"start": 1468,
"end": 1690
} | class ____(admin.ModelAdmin):
list_display = ["name", "parent__name", "parent__parent__name"]
search_fields = ["parent__name__exact", "parent__age__exact"]
site.register(GrandChild, GrandChildAdmin)
| GrandChildAdmin |
python | huggingface__transformers | src/transformers/models/nanochat/modeling_nanochat.py | {
"start": 15364,
"end": 18539
} | class ____(NanoChatPreTrainedModel):
def __init__(self, config: NanoChatConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[NanoChatDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = NanoChatRMSNorm(eps=config.rms_norm_eps)
self.rotary_emb = NanoChatRotaryEmbedding(config=config)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
@check_model_inputs()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
cache_position: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds: torch.Tensor = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position: torch.Tensor = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
hidden_states = self.norm(hidden_states) # Additional norm before the layers
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask,
position_embeddings=position_embeddings,
position_ids=position_ids,
past_key_values=past_key_values,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
@auto_docstring
| NanoChatModel |
python | Netflix__metaflow | metaflow/_vendor/importlib_metadata/__init__.py | {
"start": 12577,
"end": 19872
} | class ____:
"""A Python distribution package."""
@abc.abstractmethod
def read_text(self, filename):
"""Attempt to load metadata file given by the name.
:param filename: The name of the file in the distribution info.
:return: The text if found, otherwise None.
"""
@abc.abstractmethod
def locate_file(self, path):
"""
Given a path to a file in this distribution, return a path
to it.
"""
@classmethod
def from_name(cls, name):
"""Return the Distribution for the given package name.
:param name: The name of the distribution package to search for.
:return: The Distribution instance (or subclass thereof) for the named
package, if found.
:raises PackageNotFoundError: When the named package's distribution
metadata cannot be found.
"""
for resolver in cls._discover_resolvers():
dists = resolver(DistributionFinder.Context(name=name))
dist = next(iter(dists), None)
if dist is not None:
return dist
else:
raise PackageNotFoundError(name)
@classmethod
def discover(cls, **kwargs):
"""Return an iterable of Distribution objects for all packages.
Pass a ``context`` or pass keyword arguments for constructing
a context.
:context: A ``DistributionFinder.Context`` object.
:return: Iterable of Distribution objects for all packages.
"""
context = kwargs.pop('context', None)
if context and kwargs:
raise ValueError("cannot accept context and kwargs")
context = context or DistributionFinder.Context(**kwargs)
return itertools.chain.from_iterable(
resolver(context) for resolver in cls._discover_resolvers()
)
@staticmethod
def at(path):
"""Return a Distribution for the indicated metadata path
:param path: a string or path-like object
:return: a concrete Distribution instance for the path
"""
return PathDistribution(pathlib.Path(path))
@staticmethod
def _discover_resolvers():
"""Search the meta_path for resolvers."""
declared = (
getattr(finder, 'find_distributions', None) for finder in sys.meta_path
)
return filter(None, declared)
@classmethod
def _local(cls, root='.'):
from pep517 import build, meta
system = build.compat_system(root)
builder = functools.partial(
meta.build,
source_dir=root,
system=system,
)
return PathDistribution(zipp.Path(meta.build_as_zip(builder)))
@property
def metadata(self) -> _meta.PackageMetadata:
"""Return the parsed metadata for this Distribution.
The returned object will have keys that name the various bits of
metadata. See PEP 566 for details.
"""
text = (
self.read_text('METADATA')
or self.read_text('PKG-INFO')
# This last clause is here to support old egg-info files. Its
# effect is to just end up using the PathDistribution's self._path
# (which points to the egg-info file) attribute unchanged.
or self.read_text('')
)
return _adapters.Message(email.message_from_string(text))
@property
def name(self):
"""Return the 'Name' metadata for the distribution package."""
return self.metadata['Name']
@property
def _normalized_name(self):
"""Return a normalized version of the name."""
return Prepared.normalize(self.name)
@property
def version(self):
"""Return the 'Version' metadata for the distribution package."""
return self.metadata['Version']
@property
def entry_points(self):
return EntryPoints._from_text_for(self.read_text('entry_points.txt'), self)
@property
def files(self):
"""Files in this distribution.
:return: List of PackagePath for this distribution or None
Result is `None` if the metadata file that enumerates files
(i.e. RECORD for dist-info or SOURCES.txt for egg-info) is
missing.
Result may be empty if the metadata exists but is empty.
"""
def make_file(name, hash=None, size_str=None):
result = PackagePath(name)
result.hash = FileHash(hash) if hash else None
result.size = int(size_str) if size_str else None
result.dist = self
return result
@pass_none
def make_files(lines):
return list(starmap(make_file, csv.reader(lines)))
return make_files(self._read_files_distinfo() or self._read_files_egginfo())
def _read_files_distinfo(self):
"""
Read the lines of RECORD
"""
text = self.read_text('RECORD')
return text and text.splitlines()
def _read_files_egginfo(self):
"""
SOURCES.txt might contain literal commas, so wrap each line
in quotes.
"""
text = self.read_text('SOURCES.txt')
return text and map('"{}"'.format, text.splitlines())
@property
def requires(self):
"""Generated requirements specified for this Distribution"""
reqs = self._read_dist_info_reqs() or self._read_egg_info_reqs()
return reqs and list(reqs)
def _read_dist_info_reqs(self):
return self.metadata.get_all('Requires-Dist')
def _read_egg_info_reqs(self):
source = self.read_text('requires.txt')
return source and self._deps_from_requires_text(source)
@classmethod
def _deps_from_requires_text(cls, source):
return cls._convert_egg_info_reqs_to_simple_reqs(Sectioned.read(source))
@staticmethod
def _convert_egg_info_reqs_to_simple_reqs(sections):
"""
Historically, setuptools would solicit and store 'extra'
requirements, including those with environment markers,
in separate sections. More modern tools expect each
dependency to be defined separately, with any relevant
extras and environment markers attached directly to that
requirement. This method converts the former to the
latter. See _test_deps_from_requires_text for an example.
"""
def make_condition(name):
return name and f'extra == "{name}"'
def quoted_marker(section):
section = section or ''
extra, sep, markers = section.partition(':')
if extra and markers:
markers = f'({markers})'
conditions = list(filter(None, [markers, make_condition(extra)]))
return '; ' + ' and '.join(conditions) if conditions else ''
def url_req_space(req):
"""
PEP 508 requires a space between the url_spec and the quoted_marker.
Ref python/importlib_metadata#357.
"""
# '@' is uniquely indicative of a url_req.
return ' ' * ('@' in req)
for section in sections:
space = url_req_space(section.value)
yield section.value + space + quoted_marker(section.name)
| Distribution |
python | Pylons__pyramid | tests/test_config/test_adapters.py | {
"start": 13035,
"end": 13114
} | class ____:
def __init__(self, root):
self.root = root
| DummyTraverser |
python | python__mypy | mypy/nodes.py | {
"start": 56252,
"end": 57822
} | class ____(Statement):
__slots__ = (
"index",
"index_type",
"unanalyzed_index_type",
"inferred_item_type",
"inferred_iterator_type",
"expr",
"body",
"else_body",
"is_async",
)
__match_args__ = ("index", "index_type", "expr", "body", "else_body")
# Index variables
index: Lvalue
# Type given by type comments for index, can be None
index_type: mypy.types.Type | None
# Original, not semantically analyzed type in annotation (used for reprocessing)
unanalyzed_index_type: mypy.types.Type | None
# Inferred iterable item type
inferred_item_type: mypy.types.Type | None
# Inferred iterator type
inferred_iterator_type: mypy.types.Type | None
# Expression to iterate
expr: Expression
body: Block
else_body: Block | None
is_async: bool # True if `async for ...` (PEP 492, Python 3.5)
def __init__(
self,
index: Lvalue,
expr: Expression,
body: Block,
else_body: Block | None,
index_type: mypy.types.Type | None = None,
) -> None:
super().__init__()
self.index = index
self.index_type = index_type
self.unanalyzed_index_type = index_type
self.inferred_item_type = None
self.inferred_iterator_type = None
self.expr = expr
self.body = body
self.else_body = else_body
self.is_async = False
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_for_stmt(self)
| ForStmt |
python | ray-project__ray | python/ray/data/_internal/datasource/json_datasource.py | {
"start": 9730,
"end": 10899
} | class ____(io.RawIOBase):
"""Wrapper that prevents premature file closure and ensures full-buffered reads.
This is necessary for two reasons:
1. The datasource reads the file twice -- first to sample and determine the chunk size,
and again to load the actual data. Since pandas assumes ownership of the file and
may close it, we prevent that by explicitly detaching the underlying file before
closing the buffer.
2. pandas wraps the file in a TextIOWrapper to decode bytes into text. TextIOWrapper
prefers calling read1(), which doesn't prefetch for random-access files
(e.g., from PyArrow). This wrapper forces all reads through the full buffer to
avoid inefficient small-range S3 GETs.
"""
def __init__(self, file: io.RawIOBase, buffer_size: int):
self._file = io.BufferedReader(file, buffer_size=buffer_size)
def read(self, size=-1, /):
return self._file.read(size)
def readable(self) -> bool:
return True
def close(self):
if not self.closed:
self._file.detach()
self._file.close()
super().close()
| StrictBufferedReader |
python | huggingface__transformers | examples/pytorch/3d_parallel_checks.py | {
"start": 26522,
"end": 27696
} | class ____:
"""Collator for context parallel training that splits sequences into chunks."""
def __init__(self, cp_mesh: Optional[DeviceMesh] = None):
self.cp_mesh = cp_mesh
def __call__(self, batch: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
batch = default_collate(batch)
if self.cp_mesh is not None and self.cp_mesh.size() > 1:
# Get sequence length from the input batch
seq_len = batch["input_ids"].shape[1]
assert seq_len % self.cp_mesh.size() == 0, (
f"Sequence length {seq_len} must be divisible by CP size {self.cp_mesh.size()}"
)
chunk_size = seq_len // self.cp_mesh.size()
cp_rank = self.cp_mesh.get_local_rank()
start_idx = cp_rank * chunk_size
end_idx = start_idx + chunk_size
# Keep only the local chunk of the sequence
batch["input_ids"] = batch["input_ids"][:, start_idx:end_idx]
batch["attention_mask"] = batch["attention_mask"][:, start_idx:end_idx]
batch["labels"] = batch["labels"][:, start_idx:end_idx]
return batch
| ContextParallelCollator |
python | tensorflow__tensorflow | tensorflow/python/autograph/impl/api.py | {
"start": 3474,
"end": 3568
} | class ____(AutoGraphError):
"""Raised during the conversion process."""
pass
| ConversionError |
python | OmkarPathak__pygorithm | tests/test_data_structure.py | {
"start": 4444,
"end": 5133
} | class ____(unittest.TestCase):
def test_binary_tree(self):
root = tree.Node(1)
root.set_left(tree.Node(2))
root.set_right(tree.Node(3))
root.left.set_left(tree.Node(4))
Tree = tree.BinaryTree()
inorderTraversal = Tree.inorder(root)
expectedResult = [4, 2, 1, 3]
self.assertEqual(inorderTraversal, expectedResult)
preorderTraversal = Tree.preorder(root)
expectedResult = [1, 2, 4, 3]
self.assertEqual(preorderTraversal, expectedResult)
postorderTraversal = Tree.postorder(root)
expectedResult = [4, 2, 3, 1]
self.assertEqual(postorderTraversal, expectedResult)
| TestBinaryTree |
python | getsentry__sentry | tests/sentry/sentry_metrics/test_base_indexer.py | {
"start": 3173,
"end": 7419
} | class ____(TestCase):
def test_basic(self) -> None:
key_results = KeyResults()
assert key_results.results == {}
assert key_results.get_mapped_results() == {}
assert key_results.get_mapped_key_strings_to_ints() == {}
org_strings = {1: {"a", "b", "c"}, 2: {"e", "f"}}
collection = KeyCollection(org_strings)
assert key_results.get_unmapped_keys(collection).mapping == org_strings
key_result = KeyResult(1, "a", 10)
key_results.add_key_results([key_result])
assert key_results.get_mapped_key_strings_to_ints() == {"1:a": 10}
assert key_results.get_mapped_results() == {1: {"a": 10}}
assert key_results.get_unmapped_keys(collection).mapping == {1: {"b", "c"}, 2: {"e", "f"}}
key_result_list = [
KeyResult(1, "b", 11),
KeyResult(1, "c", 12),
KeyResult(2, "e", 13),
KeyResult(2, "f", 14),
]
key_results.add_key_results(key_result_list)
assert key_results.get_mapped_key_strings_to_ints() == {
"1:a": 10,
"1:b": 11,
"1:c": 12,
"2:e": 13,
"2:f": 14,
}
assert key_results.get_mapped_results() == {
1: {"a": 10, "b": 11, "c": 12},
2: {"e": 13, "f": 14},
}
assert key_results.get_unmapped_keys(collection).mapping == {}
def test_merges_with_metadata(self) -> None:
org_id = 1
cache_mappings = {"cache1": 1, "cache2": 2}
read_mappings = {"read3": 3, "read4": 4}
hardcode_mappings = {"hardcode5": 5, "hardcode6": 6}
write_mappings = {"write7": 7, "write8": 8}
rate_limited_mappings = {"limited9": None, "limited10": None}
mappings = {
*cache_mappings,
*read_mappings,
*hardcode_mappings,
*write_mappings,
*rate_limited_mappings,
}
kr_cache = KeyResults()
kr_dbread = KeyResults()
kr_hardcoded = KeyResults()
kr_write = KeyResults()
kr_limited = KeyResults()
assert kr_cache.results == {} and kr_cache.meta == {}
assert kr_dbread.results == {} and kr_dbread.meta == {}
assert kr_hardcoded.results == {} and kr_hardcoded.meta == {}
assert kr_write.results == {} and kr_write.meta == {}
assert kr_limited.results == {} and kr_limited.meta == {}
kr_cache.add_key_results(
[KeyResult(org_id=org_id, string=k, id=v) for k, v in cache_mappings.items()],
FetchType.CACHE_HIT,
)
kr_dbread.add_key_results(
[KeyResult(org_id=org_id, string=k, id=v) for k, v in read_mappings.items()],
FetchType.DB_READ,
)
kr_hardcoded.add_key_results(
[KeyResult(org_id=org_id, string=k, id=v) for k, v in hardcode_mappings.items()],
FetchType.HARDCODED,
)
kr_write.add_key_results(
[KeyResult(org_id=org_id, string=k, id=v) for k, v in write_mappings.items()],
FetchType.FIRST_SEEN,
)
kr_limited.add_key_results(
[KeyResult(org_id=org_id, string=k, id=v) for k, v in rate_limited_mappings.items()],
FetchType.RATE_LIMITED,
FetchTypeExt(is_global=False),
)
kr_merged = kr_cache.merge(kr_dbread).merge(kr_hardcoded).merge(kr_write).merge(kr_limited)
assert len(kr_merged.get_mapped_results()[org_id]) == len(mappings)
meta = kr_merged.get_fetch_metadata()
assert_fetch_type_for_tag_string_set(
meta[org_id], FetchType.DB_READ, set(read_mappings.keys())
)
assert_fetch_type_for_tag_string_set(
meta[org_id], FetchType.HARDCODED, set(hardcode_mappings.keys())
)
assert_fetch_type_for_tag_string_set(
meta[org_id], FetchType.FIRST_SEEN, set(write_mappings.keys())
)
assert_fetch_type_for_tag_string_set(
meta[org_id], FetchType.CACHE_HIT, set(cache_mappings.keys())
)
assert_fetch_type_for_tag_string_set(
meta[org_id], FetchType.RATE_LIMITED, set(rate_limited_mappings.keys())
)
| KeyResultsTest |
python | huggingface__transformers | src/transformers/models/depth_pro/modeling_depth_pro.py | {
"start": 22395,
"end": 23448
} | class ____(nn.Module):
def __init__(self, config: DepthProConfig):
super().__init__()
self.config = config
self.feature_upsample = DepthProFeatureUpsample(config)
self.fuse_image_with_low_res = nn.Conv2d(
in_channels=config.scaled_images_feature_dims[0] * 2,
out_channels=config.scaled_images_feature_dims[0],
kernel_size=1,
stride=1,
padding=0,
bias=True,
)
self.feature_projection = DepthProFeatureProjection(config)
def forward(self, features: list[torch.Tensor]) -> list[torch.Tensor]:
features = self.feature_upsample(features)
# global features = low res features + image features
global_features = torch.cat((features[1], features[0]), dim=1)
global_features = self.fuse_image_with_low_res(global_features)
features = [global_features, *features[2:]]
features = self.feature_projection(features)
return features
# General docstring
@auto_docstring
| DepthProNeck |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-confluence/tests/test_new_features.py | {
"start": 517,
"end": 2709
} | class ____:
"""Test custom parsers and custom folder functionality."""
def test_custom_folder_without_parsers_raises_error(self):
"""Test that custom_folder raises error when used without custom_parsers."""
with pytest.raises(
ValueError,
match="custom_folder can only be used when custom_parsers are provided",
):
ConfluenceReader(
base_url="https://example.atlassian.net/wiki",
api_token="test_token",
custom_folder="/tmp/test",
)
def test_custom_parsers_with_custom_folder(self):
"""Test that custom_parsers and custom_folder work together."""
mock_parser = MagicMock()
custom_parsers = {FileType.PDF: mock_parser}
reader = ConfluenceReader(
base_url="https://example.atlassian.net/wiki",
api_token="test_token",
custom_parsers=custom_parsers,
custom_folder="/tmp/test",
)
assert reader.custom_parsers == custom_parsers
assert reader.custom_folder == "/tmp/test"
assert reader.custom_parser_manager is not None
def test_custom_parsers_with_default_folder(self):
"""Test that custom_parsers uses current directory when custom_folder not specified."""
mock_parser = MagicMock()
custom_parsers = {FileType.PDF: mock_parser}
reader = ConfluenceReader(
base_url="https://example.atlassian.net/wiki",
api_token="test_token",
custom_parsers=custom_parsers,
)
assert reader.custom_parsers == custom_parsers
assert reader.custom_folder == os.getcwd()
assert reader.custom_parser_manager is not None
def test_no_custom_parsers_no_folder(self):
"""Test that without custom_parsers, custom_folder is None and no parser manager is created."""
reader = ConfluenceReader(
base_url="https://example.atlassian.net/wiki", api_token="test_token"
)
assert reader.custom_parsers == {}
assert reader.custom_folder is None
assert reader.custom_parser_manager is None
| TestCustomParsersAndFolder |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/auth_manager/cli/test_definition.py | {
"start": 1134,
"end": 1263
} | class ____:
def test_aws_auth_manager_cli_commands(self):
assert len(AWS_AUTH_MANAGER_COMMANDS) == 2
| TestAwsCliDefinition |
python | walkccc__LeetCode | solutions/1740. Find Distance in a Binary Tree/1740.py | {
"start": 0,
"end": 562
} | class ____:
def findDistance(self, root: TreeNode, p: int, q: int) -> int:
def getLCA(root, p, q):
if not root or root.val == p or root.val == q:
return root
l = getLCA(root.left, p, q)
r = getLCA(root.right, p, q)
if l and r:
return root
return l or r
def dist(lca, target):
if not lca:
return 10000
if lca.val == target:
return 0
return 1 + min(dist(lca.left, target), dist(lca.right, target))
lca = getLCA(root, p, q)
return dist(lca, p) + dist(lca, q)
| Solution |
python | apache__airflow | providers/apache/kafka/tests/integration/apache/kafka/hooks/test_producer.py | {
"start": 1218,
"end": 2300
} | class ____:
"""
Test consumer hook.
"""
@pytest.fixture(autouse=True)
def setup_connections(self, create_connection_without_db):
create_connection_without_db(
Connection(
conn_id="kafka_default",
conn_type="kafka",
extra=json.dumps(config),
)
)
def test_produce(self):
"""test producer hook functionality"""
topic = "producer_hook_integration_test"
def acked(err, msg):
if err is not None:
raise Exception(f"{err}")
assert msg.topic() == topic
assert msg.partition() == 0
assert msg.offset() == 0
# Standard Init
p_hook = KafkaProducerHook(kafka_config_id="kafka_default")
producer = p_hook.get_producer()
producer.produce(topic, key="p1", value="p2", on_delivery=acked)
producer.poll(0)
producer.flush()
hook = KafkaAdminClientHook(kafka_config_id="kafka_default")
hook.delete_topic(topics=[topic])
| TestProducerHook |
python | mlflow__mlflow | mlflow/models/auth_policy.py | {
"start": 65,
"end": 784
} | class ____:
"""
A minimal list of scopes that the user should have access to
in order to invoke this model
Note: This is only compatible with Databricks Environment currently.
TODO: Add Databricks Documentation for User Auth Policy
Args:
api_scopes: A list of scopes. Example: "vectorsearch.vector-search-indexes", "sql"
"""
def __init__(self, api_scopes: list[str]):
self._api_scopes = api_scopes
@property
def api_scopes(self) -> list[str]:
return self._api_scopes
@api_scopes.setter
def api_scopes(self, value: list[str]):
self._api_scopes = value
def to_dict(self):
return {"api_scopes": self.api_scopes}
| UserAuthPolicy |
python | pandas-dev__pandas | pandas/tests/series/indexing/test_getitem.py | {
"start": 22736,
"end": 23328
} | class ____:
@pytest.mark.parametrize("key", [{1}, {1: 1}])
def test_getitem_dict_and_set_deprecated(self, key):
# GH#42825 enforced in 2.0
ser = Series([1, 2, 3])
with pytest.raises(TypeError, match="as an indexer is not supported"):
ser[key]
@pytest.mark.parametrize("key", [{1}, {1: 1}])
def test_setitem_dict_and_set_disallowed(self, key):
# GH#42825 enforced in 2.0
ser = Series([1, 2, 3])
with pytest.raises(TypeError, match="as an indexer is not supported"):
ser[key] = 1
| TestGetitemDeprecatedIndexers |
python | kamyu104__LeetCode-Solutions | Python/the-wording-game.py | {
"start": 104,
"end": 943
} | class ____(object):
def canAliceWin(self, a, b):
"""
:type a: List[str]
:type b: List[str]
:rtype: bool
"""
def is_closely_greater(a, b):
return ord(a[0])-ord(b[0]) <= 1 and a > b
result = True
i, j = 0, -1
for _ in xrange(len({w[0] for w in a})+len({w[0] for w in b})): # each player takes turns using a word with a different first letter than the last word he played
j = next((j for j in xrange(j+1, len(b)) if is_closely_greater(b[j], a[i])), len(b))
if j == len(b):
break
while j+1 < len(b) and b[j+1][0] == b[j][0]: # play the lexicographically greatest word with the same first letter
j += 1
a, b, i, j, result = b, a, j, i, not result
return result
| Solution |
python | numba__numba | numba/core/typeinfer.py | {
"start": 20786,
"end": 26728
} | class ____(object):
"""Constraint for calling functions.
Perform case analysis foreach combinations of argument types.
"""
signature = None
def __init__(self, target, func, args, kws, vararg, loc):
self.target = target
self.func = func
self.args = args
self.kws = kws or {}
self.vararg = vararg
self.loc = loc
def __call__(self, typeinfer):
with new_error_context("typing of call at {loc}", loc=self.loc):
typevars = typeinfer.typevars
with new_error_context(
"resolving caller type: {func}", func=self.func):
fnty = typevars[self.func].getone()
with new_error_context("resolving callee type: {fnty}",
fnty=fnty):
self.resolve(typeinfer, typevars, fnty)
def resolve(self, typeinfer, typevars, fnty):
assert fnty
context = typeinfer.context
r = fold_arg_vars(typevars, self.args, self.vararg, self.kws)
if r is None:
# Cannot resolve call type until all argument types are known
return
pos_args, kw_args = r
# Check argument to be precise
for a in itertools.chain(pos_args, kw_args.values()):
# Forbids imprecise type except array of undefined dtype
if not a.is_precise() and not isinstance(a, types.Array):
return
# Resolve call type
if isinstance(fnty, types.TypeRef):
# Unwrap TypeRef
fnty = fnty.instance_type
try:
sig = typeinfer.resolve_call(fnty, pos_args, kw_args)
except ForceLiteralArg as e:
# Adjust for bound methods
folding_args = ((fnty.this,) + tuple(self.args)
if isinstance(fnty, types.BoundFunction)
else self.args)
folded = e.fold_arguments(folding_args, self.kws)
requested = set()
unsatisfied = set()
for idx in e.requested_args:
maybe_arg = typeinfer.func_ir.get_definition(folded[idx])
if isinstance(maybe_arg, ir.Arg):
requested.add(maybe_arg.index)
else:
unsatisfied.add(idx)
if unsatisfied:
raise TypingError("Cannot request literal type.", loc=self.loc)
elif requested:
raise ForceLiteralArg(requested, loc=self.loc)
if sig is None:
# Note: duplicated error checking.
# See types.BaseFunction.get_call_type
# Arguments are invalid => explain why
headtemp = "Invalid use of {0} with parameters ({1})"
args = [str(a) for a in pos_args]
args += ["%s=%s" % (k, v) for k, v in sorted(kw_args.items())]
head = headtemp.format(fnty, ', '.join(map(str, args)))
desc = context.explain_function_type(fnty)
msg = '\n'.join([head, desc])
raise TypingError(msg)
typeinfer.add_type(self.target, sig.return_type, loc=self.loc)
# If the function is a bound function and its receiver type
# was refined, propagate it.
if (isinstance(fnty, types.BoundFunction)
and sig.recvr is not None
and sig.recvr != fnty.this):
refined_this = context.unify_pairs(sig.recvr, fnty.this)
if (refined_this is None and
fnty.this.is_precise() and
sig.recvr.is_precise()):
msg = "Cannot refine type {} to {}".format(
sig.recvr, fnty.this,
)
raise TypingError(msg, loc=self.loc)
if refined_this is not None and refined_this.is_precise():
refined_fnty = fnty.copy(this=refined_this)
typeinfer.propagate_refined_type(self.func, refined_fnty)
# If the return type is imprecise but can be unified with the
# target variable's inferred type, use the latter.
# Useful for code such as::
# s = set()
# s.add(1)
# (the set() call must be typed as int64(), not undefined())
if not sig.return_type.is_precise():
target = typevars[self.target]
if target.defined:
targetty = target.getone()
if context.unify_pairs(targetty, sig.return_type) == targetty:
sig = sig.replace(return_type=targetty)
self.signature = sig
self._add_refine_map(typeinfer, typevars, sig)
def _add_refine_map(self, typeinfer, typevars, sig):
"""Add this expression to the refine_map base on the type of target_type
"""
target_type = typevars[self.target].getone()
# Array
if (isinstance(target_type, types.Array)
and isinstance(sig.return_type.dtype, types.Undefined)):
typeinfer.refine_map[self.target] = self
# DictType
if (isinstance(target_type, types.DictType) and
not target_type.is_precise()):
typeinfer.refine_map[self.target] = self
def refine(self, typeinfer, updated_type):
# Is getitem?
if self.func == operator.getitem:
aryty = typeinfer.typevars[self.args[0].name].getone()
# is array not precise?
if _is_array_not_precise(aryty):
# allow refinement of dtype
assert updated_type.is_precise()
newtype = aryty.copy(dtype=updated_type.dtype)
typeinfer.add_type(self.args[0].name, newtype, loc=self.loc)
else:
m = 'no type refinement implemented for function {} updating to {}'
raise TypingError(m.format(self.func, updated_type))
def get_call_signature(self):
return self.signature
| CallConstraint |
python | pypa__hatch | tests/backend/builders/test_config.py | {
"start": 67138,
"end": 67923
} | class ____:
def test_include(self, isolation):
builder = MockBuilder(str(isolation))
assert builder.config.default_include() == []
def test_exclude(self, isolation):
builder = MockBuilder(str(isolation))
assert builder.config.default_exclude() == []
def test_packages(self, isolation):
builder = MockBuilder(str(isolation))
assert builder.config.default_packages() == []
def test_only_include(self, isolation):
builder = MockBuilder(str(isolation))
assert builder.config.default_only_include() == []
def test_global_exclude(self, isolation):
builder = MockBuilder(str(isolation))
assert builder.config.default_global_exclude() == ["*.py[cdo]", "/dist"]
| TestFileSelectionDefaults |
python | sqlalchemy__sqlalchemy | examples/inheritance/single.py | {
"start": 2427,
"end": 5053
} | class ____(Person):
manager_name: Mapped[str50subclass]
# illustrate a single-inh "conflicting" mapped_column declaration,
# where both subclasses want to share the same column that is nonetheless
# not "local" to the base class
@declared_attr
def status(cls) -> Mapped[str50]:
return Person.__table__.c.get(
"status", mapped_column(String(30)) # type: ignore
)
__mapper_args__ = {"polymorphic_identity": "manager"}
def __repr__(self):
return (
f"Manager {self.name}, status {self.status}, "
f"manager_name {self.manager_name}"
)
engine = create_engine("sqlite://", echo=True)
Base.metadata.create_all(engine)
with Session(engine) as session:
c = Company(
name="company1",
employees=[
Manager(
name="mr krabs",
status="AAB",
manager_name="manager1",
),
Engineer(
name="spongebob",
status="BBA",
engineer_name="engineer1",
primary_language="java",
),
Person(name="joesmith"),
Engineer(
name="patrick",
status="CGG",
engineer_name="engineer2",
primary_language="python",
),
Manager(name="jsmith", status="ABA", manager_name="manager2"),
],
)
session.add(c)
session.commit()
for e in c.employees:
print(e)
spongebob = session.scalars(
select(Person).filter_by(name="spongebob")
).one()
spongebob2 = session.scalars(
select(Engineer).filter_by(name="spongebob")
).one()
assert spongebob is spongebob2
spongebob2.engineer_name = "hes spongebob!"
session.commit()
# query using with_polymorphic.
eng_manager = with_polymorphic(Person, [Engineer, Manager])
print(
session.scalars(
select(eng_manager).filter(
or_(
eng_manager.Engineer.engineer_name == "engineer1",
eng_manager.Manager.manager_name == "manager2",
)
)
).all()
)
# illustrate join from Company.
print(
session.scalars(
select(Company)
.join(Company.employees.of_type(eng_manager))
.filter(
or_(
eng_manager.Engineer.engineer_name == "engineer1",
eng_manager.Manager.manager_name == "manager2",
)
)
).all()
)
| Manager |
python | matplotlib__matplotlib | lib/mpl_toolkits/mplot3d/tests/test_axes3d.py | {
"start": 48413,
"end": 93634
} | class ____:
@mpl3d_image_comparison(['voxels-simple.png'], style='mpl20')
def test_simple(self):
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
x, y, z = np.indices((5, 4, 3))
voxels = (x == y) | (y == z)
ax.voxels(voxels)
@mpl3d_image_comparison(['voxels-edge-style.png'], style='mpl20')
def test_edge_style(self):
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
x, y, z = np.indices((5, 5, 4))
voxels = ((x - 2)**2 + (y - 2)**2 + (z-1.5)**2) < 2.2**2
v = ax.voxels(voxels, linewidths=3, edgecolor='C1')
# change the edge color of one voxel
v[max(v.keys())].set_edgecolor('C2')
@mpl3d_image_comparison(['voxels-named-colors.png'], style='mpl20')
def test_named_colors(self):
"""Test with colors set to a 3D object array of strings."""
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
x, y, z = np.indices((10, 10, 10))
voxels = (x == y) | (y == z)
voxels = voxels & ~(x * y * z < 1)
colors = np.full((10, 10, 10), 'C0', dtype=np.object_)
colors[(x < 5) & (y < 5)] = '0.25'
colors[(x + z) < 10] = 'cyan'
ax.voxels(voxels, facecolors=colors)
@mpl3d_image_comparison(['voxels-rgb-data.png'], style='mpl20')
def test_rgb_data(self):
"""Test with colors set to a 4d float array of rgb data."""
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
x, y, z = np.indices((10, 10, 10))
voxels = (x == y) | (y == z)
colors = np.zeros((10, 10, 10, 3))
colors[..., 0] = x / 9
colors[..., 1] = y / 9
colors[..., 2] = z / 9
ax.voxels(voxels, facecolors=colors)
@mpl3d_image_comparison(['voxels-alpha.png'], style='mpl20')
def test_alpha(self):
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
x, y, z = np.indices((10, 10, 10))
v1 = x == y
v2 = np.abs(x - y) < 2
voxels = v1 | v2
colors = np.zeros((10, 10, 10, 4))
colors[v2] = [1, 0, 0, 0.5]
colors[v1] = [0, 1, 0, 0.5]
v = ax.voxels(voxels, facecolors=colors)
assert type(v) is dict
for coord, poly in v.items():
assert voxels[coord], "faces returned for absent voxel"
assert isinstance(poly, art3d.Poly3DCollection)
@mpl3d_image_comparison(['voxels-xyz.png'],
tol=0.01, remove_text=False, style='mpl20')
def test_xyz(self):
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
def midpoints(x):
sl = ()
for i in range(x.ndim):
x = (x[sl + np.index_exp[:-1]] +
x[sl + np.index_exp[1:]]) / 2.0
sl += np.index_exp[:]
return x
# prepare some coordinates, and attach rgb values to each
r, g, b = np.indices((17, 17, 17)) / 16.0
rc = midpoints(r)
gc = midpoints(g)
bc = midpoints(b)
# define a sphere about [0.5, 0.5, 0.5]
sphere = (rc - 0.5)**2 + (gc - 0.5)**2 + (bc - 0.5)**2 < 0.5**2
# combine the color components
colors = np.zeros(sphere.shape + (3,))
colors[..., 0] = rc
colors[..., 1] = gc
colors[..., 2] = bc
# and plot everything
ax.voxels(r, g, b, sphere,
facecolors=colors,
edgecolors=np.clip(2*colors - 0.5, 0, 1), # brighter
linewidth=0.5)
def test_calling_conventions(self):
x, y, z = np.indices((3, 4, 5))
filled = np.ones((2, 3, 4))
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
# all the valid calling conventions
for kw in (dict(), dict(edgecolor='k')):
ax.voxels(filled, **kw)
ax.voxels(filled=filled, **kw)
ax.voxels(x, y, z, filled, **kw)
ax.voxels(x, y, z, filled=filled, **kw)
# duplicate argument
with pytest.raises(TypeError, match='voxels'):
ax.voxels(x, y, z, filled, filled=filled)
# missing arguments
with pytest.raises(TypeError, match='voxels'):
ax.voxels(x, y)
# x, y, z are positional only - this passes them on as attributes of
# Poly3DCollection
with pytest.raises(AttributeError, match="keyword argument 'x'") as exec_info:
ax.voxels(filled=filled, x=x, y=y, z=z)
assert exec_info.value.name == 'x'
def test_line3d_set_get_data_3d():
x, y, z = [0, 1], [2, 3], [4, 5]
x2, y2, z2 = [6, 7], [8, 9], [10, 11]
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
lines = ax.plot(x, y, z)
line = lines[0]
np.testing.assert_array_equal((x, y, z), line.get_data_3d())
line.set_data_3d(x2, y2, z2)
np.testing.assert_array_equal((x2, y2, z2), line.get_data_3d())
line.set_xdata(x)
line.set_ydata(y)
line.set_3d_properties(zs=z, zdir='z')
np.testing.assert_array_equal((x, y, z), line.get_data_3d())
line.set_3d_properties(zs=0, zdir='z')
np.testing.assert_array_equal((x, y, np.zeros_like(z)), line.get_data_3d())
@check_figures_equal()
def test_inverted(fig_test, fig_ref):
# Plot then invert.
ax = fig_test.add_subplot(projection="3d")
ax.plot([1, 1, 10, 10], [1, 10, 10, 10], [1, 1, 1, 10])
ax.invert_yaxis()
# Invert then plot.
ax = fig_ref.add_subplot(projection="3d")
ax.invert_yaxis()
ax.plot([1, 1, 10, 10], [1, 10, 10, 10], [1, 1, 1, 10])
def test_inverted_cla():
# GitHub PR #5450. Setting autoscale should reset
# axes to be non-inverted.
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
# 1. test that a new axis is not inverted per default
assert not ax.xaxis_inverted()
assert not ax.yaxis_inverted()
assert not ax.zaxis_inverted()
ax.set_xlim(1, 0)
ax.set_ylim(1, 0)
ax.set_zlim(1, 0)
assert ax.xaxis_inverted()
assert ax.yaxis_inverted()
assert ax.zaxis_inverted()
ax.cla()
assert not ax.xaxis_inverted()
assert not ax.yaxis_inverted()
assert not ax.zaxis_inverted()
def test_ax3d_tickcolour():
fig = plt.figure()
ax = Axes3D(fig)
ax.tick_params(axis='x', colors='red')
ax.tick_params(axis='y', colors='red')
ax.tick_params(axis='z', colors='red')
fig.canvas.draw()
for tick in ax.xaxis.get_major_ticks():
assert tick.tick1line._color == 'red'
for tick in ax.yaxis.get_major_ticks():
assert tick.tick1line._color == 'red'
for tick in ax.zaxis.get_major_ticks():
assert tick.tick1line._color == 'red'
@check_figures_equal()
def test_ticklabel_format(fig_test, fig_ref):
axs = fig_test.subplots(4, 5, subplot_kw={"projection": "3d"})
for ax in axs.flat:
ax.set_xlim(1e7, 1e7 + 10)
for row, name in zip(axs, ["x", "y", "z", "both"]):
row[0].ticklabel_format(
axis=name, style="plain")
row[1].ticklabel_format(
axis=name, scilimits=(-2, 2))
row[2].ticklabel_format(
axis=name, useOffset=not mpl.rcParams["axes.formatter.useoffset"])
row[3].ticklabel_format(
axis=name, useLocale=not mpl.rcParams["axes.formatter.use_locale"])
row[4].ticklabel_format(
axis=name,
useMathText=not mpl.rcParams["axes.formatter.use_mathtext"])
def get_formatters(ax, names):
return [getattr(ax, name).get_major_formatter() for name in names]
axs = fig_ref.subplots(4, 5, subplot_kw={"projection": "3d"})
for ax in axs.flat:
ax.set_xlim(1e7, 1e7 + 10)
for row, names in zip(
axs, [["xaxis"], ["yaxis"], ["zaxis"], ["xaxis", "yaxis", "zaxis"]]
):
for fmt in get_formatters(row[0], names):
fmt.set_scientific(False)
for fmt in get_formatters(row[1], names):
fmt.set_powerlimits((-2, 2))
for fmt in get_formatters(row[2], names):
fmt.set_useOffset(not mpl.rcParams["axes.formatter.useoffset"])
for fmt in get_formatters(row[3], names):
fmt.set_useLocale(not mpl.rcParams["axes.formatter.use_locale"])
for fmt in get_formatters(row[4], names):
fmt.set_useMathText(
not mpl.rcParams["axes.formatter.use_mathtext"])
@check_figures_equal()
def test_quiver3D_smoke(fig_test, fig_ref):
pivot = "middle"
# Make the grid
x, y, z = np.meshgrid(
np.arange(-0.8, 1, 0.2),
np.arange(-0.8, 1, 0.2),
np.arange(-0.8, 1, 0.8)
)
u = v = w = np.ones_like(x)
for fig, length in zip((fig_ref, fig_test), (1, 1.0)):
ax = fig.add_subplot(projection="3d")
ax.quiver(x, y, z, u, v, w, length=length, pivot=pivot)
@image_comparison(["minor_ticks.png"], style="mpl20")
def test_minor_ticks():
ax = plt.figure().add_subplot(projection="3d")
ax.set_xticks([0.25], minor=True)
ax.set_xticklabels(["quarter"], minor=True)
ax.set_yticks([0.33], minor=True)
ax.set_yticklabels(["third"], minor=True)
ax.set_zticks([0.50], minor=True)
ax.set_zticklabels(["half"], minor=True)
# remove tolerance when regenerating the test image
@mpl3d_image_comparison(['errorbar3d_errorevery.png'], style='mpl20', tol=0.003)
def test_errorbar3d_errorevery():
"""Tests errorevery functionality for 3D errorbars."""
t = np.arange(0, 2*np.pi+.1, 0.01)
x, y, z = np.sin(t), np.cos(3*t), np.sin(5*t)
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
estep = 15
i = np.arange(t.size)
zuplims = (i % estep == 0) & (i // estep % 3 == 0)
zlolims = (i % estep == 0) & (i // estep % 3 == 2)
ax.errorbar(x, y, z, 0.2, zuplims=zuplims, zlolims=zlolims,
errorevery=estep)
@mpl3d_image_comparison(['errorbar3d.png'], style='mpl20',
tol=0 if platform.machine() == 'x86_64' else 0.02)
def test_errorbar3d():
"""Tests limits, color styling, and legend for 3D errorbars."""
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
d = [1, 2, 3, 4, 5]
e = [.5, .5, .5, .5, .5]
ax.errorbar(x=d, y=d, z=d, xerr=e, yerr=e, zerr=e, capsize=3,
zuplims=[False, True, False, True, True],
zlolims=[True, False, False, True, False],
yuplims=True,
ecolor='purple', label='Error lines')
ax.legend()
@image_comparison(['stem3d.png'], style='mpl20', tol=0.009)
def test_stem3d():
plt.rcParams['axes3d.automargin'] = True # Remove when image is regenerated
fig, axs = plt.subplots(2, 3, figsize=(8, 6),
constrained_layout=True,
subplot_kw={'projection': '3d'})
theta = np.linspace(0, 2*np.pi)
x = np.cos(theta - np.pi/2)
y = np.sin(theta - np.pi/2)
z = theta
for ax, zdir in zip(axs[0], ['x', 'y', 'z']):
ax.stem(x, y, z, orientation=zdir)
ax.set_title(f'orientation={zdir}')
x = np.linspace(-np.pi/2, np.pi/2, 20)
y = np.ones_like(x)
z = np.cos(x)
for ax, zdir in zip(axs[1], ['x', 'y', 'z']):
markerline, stemlines, baseline = ax.stem(
x, y, z,
linefmt='C4-.', markerfmt='C1D', basefmt='C2',
orientation=zdir)
ax.set_title(f'orientation={zdir}')
markerline.set(markerfacecolor='none', markeredgewidth=2)
baseline.set_linewidth(3)
@image_comparison(["equal_box_aspect.png"], style="mpl20")
def test_equal_box_aspect():
from itertools import product, combinations
fig = plt.figure()
ax = fig.add_subplot(projection="3d")
# Make data
u = np.linspace(0, 2 * np.pi, 100)
v = np.linspace(0, np.pi, 100)
x = np.outer(np.cos(u), np.sin(v))
y = np.outer(np.sin(u), np.sin(v))
z = np.outer(np.ones_like(u), np.cos(v))
# Plot the surface
ax.plot_surface(x, y, z)
# draw cube
r = [-1, 1]
for s, e in combinations(np.array(list(product(r, r, r))), 2):
if np.sum(np.abs(s - e)) == r[1] - r[0]:
ax.plot3D(*zip(s, e), color="b")
# Make axes limits
xyzlim = np.column_stack(
[ax.get_xlim3d(), ax.get_ylim3d(), ax.get_zlim3d()]
)
XYZlim = [min(xyzlim[0]), max(xyzlim[1])]
ax.set_xlim3d(XYZlim)
ax.set_ylim3d(XYZlim)
ax.set_zlim3d(XYZlim)
ax.axis('off')
ax.set_box_aspect((1, 1, 1))
with pytest.raises(ValueError, match="Argument zoom ="):
ax.set_box_aspect((1, 1, 1), zoom=-1)
def test_colorbar_pos():
num_plots = 2
fig, axs = plt.subplots(1, num_plots, figsize=(4, 5),
constrained_layout=True,
subplot_kw={'projection': '3d'})
for ax in axs:
p_tri = ax.plot_trisurf(np.random.randn(5), np.random.randn(5),
np.random.randn(5))
cbar = plt.colorbar(p_tri, ax=axs, orientation='horizontal')
fig.canvas.draw()
# check that actually on the bottom
assert cbar.ax.get_position().extents[1] < 0.2
def test_inverted_zaxis():
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.set_zlim(0, 1)
assert not ax.zaxis_inverted()
assert ax.get_zlim() == (0, 1)
assert ax.get_zbound() == (0, 1)
# Change bound
ax.set_zbound((0, 2))
assert not ax.zaxis_inverted()
assert ax.get_zlim() == (0, 2)
assert ax.get_zbound() == (0, 2)
# Change invert
ax.invert_zaxis()
assert ax.zaxis_inverted()
assert ax.get_zlim() == (2, 0)
assert ax.get_zbound() == (0, 2)
# Set upper bound
ax.set_zbound(upper=1)
assert ax.zaxis_inverted()
assert ax.get_zlim() == (1, 0)
assert ax.get_zbound() == (0, 1)
# Set lower bound
ax.set_zbound(lower=2)
assert ax.zaxis_inverted()
assert ax.get_zlim() == (2, 1)
assert ax.get_zbound() == (1, 2)
def test_set_zlim():
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
assert np.allclose(ax.get_zlim(), (-1/48, 49/48))
ax.set_zlim(zmax=2)
assert np.allclose(ax.get_zlim(), (-1/48, 2))
ax.set_zlim(zmin=1)
assert ax.get_zlim() == (1, 2)
with pytest.raises(
TypeError, match="Cannot pass both 'lower' and 'min'"):
ax.set_zlim(bottom=0, zmin=1)
with pytest.raises(
TypeError, match="Cannot pass both 'upper' and 'max'"):
ax.set_zlim(top=0, zmax=1)
@check_figures_equal()
def test_shared_view(fig_test, fig_ref):
elev, azim, roll = 5, 20, 30
ax1 = fig_test.add_subplot(131, projection="3d")
ax2 = fig_test.add_subplot(132, projection="3d", shareview=ax1)
ax3 = fig_test.add_subplot(133, projection="3d")
ax3.shareview(ax1)
ax2.view_init(elev=elev, azim=azim, roll=roll, share=True)
for subplot_num in (131, 132, 133):
ax = fig_ref.add_subplot(subplot_num, projection="3d")
ax.view_init(elev=elev, azim=azim, roll=roll)
def test_shared_axes_retick():
fig = plt.figure()
ax1 = fig.add_subplot(211, projection="3d")
ax2 = fig.add_subplot(212, projection="3d", sharez=ax1)
ax1.plot([0, 1], [0, 1], [0, 2])
ax2.plot([0, 1], [0, 1], [0, 2])
ax1.set_zticks([-0.5, 0, 2, 2.5])
# check that setting ticks on a shared axis is synchronized
assert ax1.get_zlim() == (-0.5, 2.5)
assert ax2.get_zlim() == (-0.5, 2.5)
def test_quaternion():
# 1:
q1 = Quaternion(1, [0, 0, 0])
assert q1.scalar == 1
assert (q1.vector == [0, 0, 0]).all
# __neg__:
assert (-q1).scalar == -1
assert ((-q1).vector == [0, 0, 0]).all
# i, j, k:
qi = Quaternion(0, [1, 0, 0])
assert qi.scalar == 0
assert (qi.vector == [1, 0, 0]).all
qj = Quaternion(0, [0, 1, 0])
assert qj.scalar == 0
assert (qj.vector == [0, 1, 0]).all
qk = Quaternion(0, [0, 0, 1])
assert qk.scalar == 0
assert (qk.vector == [0, 0, 1]).all
# i^2 = j^2 = k^2 = -1:
assert qi*qi == -q1
assert qj*qj == -q1
assert qk*qk == -q1
# identity:
assert q1*qi == qi
assert q1*qj == qj
assert q1*qk == qk
# i*j=k, j*k=i, k*i=j:
assert qi*qj == qk
assert qj*qk == qi
assert qk*qi == qj
assert qj*qi == -qk
assert qk*qj == -qi
assert qi*qk == -qj
# __mul__:
assert (Quaternion(2, [3, 4, 5]) * Quaternion(6, [7, 8, 9])
== Quaternion(-86, [28, 48, 44]))
# conjugate():
for q in [q1, qi, qj, qk]:
assert q.conjugate().scalar == q.scalar
assert (q.conjugate().vector == -q.vector).all
assert q.conjugate().conjugate() == q
assert ((q*q.conjugate()).vector == 0).all
# norm:
q0 = Quaternion(0, [0, 0, 0])
assert q0.norm == 0
assert q1.norm == 1
assert qi.norm == 1
assert qj.norm == 1
assert qk.norm == 1
for q in [q0, q1, qi, qj, qk]:
assert q.norm == (q*q.conjugate()).scalar
# normalize():
for q in [
Quaternion(2, [0, 0, 0]),
Quaternion(0, [3, 0, 0]),
Quaternion(0, [0, 4, 0]),
Quaternion(0, [0, 0, 5]),
Quaternion(6, [7, 8, 9])
]:
assert q.normalize().norm == 1
# reciprocal():
for q in [q1, qi, qj, qk]:
assert q*q.reciprocal() == q1
assert q.reciprocal()*q == q1
# rotate():
assert (qi.rotate([1, 2, 3]) == np.array([1, -2, -3])).all
# rotate_from_to():
for r1, r2, q in [
([1, 0, 0], [0, 1, 0], Quaternion(np.sqrt(1/2), [0, 0, np.sqrt(1/2)])),
([1, 0, 0], [0, 0, 1], Quaternion(np.sqrt(1/2), [0, -np.sqrt(1/2), 0])),
([1, 0, 0], [1, 0, 0], Quaternion(1, [0, 0, 0]))
]:
assert Quaternion.rotate_from_to(r1, r2) == q
# rotate_from_to(), special case:
for r1 in [[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 1, 1]]:
r1 = np.array(r1)
with pytest.warns(UserWarning):
q = Quaternion.rotate_from_to(r1, -r1)
assert np.isclose(q.norm, 1)
assert np.dot(q.vector, r1) == 0
# from_cardan_angles(), as_cardan_angles():
for elev, azim, roll in [(0, 0, 0),
(90, 0, 0), (0, 90, 0), (0, 0, 90),
(0, 30, 30), (30, 0, 30), (30, 30, 0),
(47, 11, -24)]:
for mag in [1, 2]:
q = Quaternion.from_cardan_angles(
np.deg2rad(elev), np.deg2rad(azim), np.deg2rad(roll))
assert np.isclose(q.norm, 1)
q = Quaternion(mag * q.scalar, mag * q.vector)
np.testing.assert_allclose(np.rad2deg(Quaternion.as_cardan_angles(q)),
(elev, azim, roll), atol=1e-6)
@pytest.mark.parametrize('style',
('azel', 'trackball', 'sphere', 'arcball'))
def test_rotate(style):
"""Test rotating using the left mouse button."""
if style == 'azel':
s = 0.5
else:
s = mpl.rcParams['axes3d.trackballsize'] / 2
s *= 0.5
mpl.rcParams['axes3d.trackballborder'] = 0
with mpl.rc_context({'axes3d.mouserotationstyle': style}):
for roll, dx, dy in [
[0, 1, 0],
[30, 1, 0],
[0, 0, 1],
[30, 0, 1],
[0, 0.5, np.sqrt(3)/2],
[30, 0.5, np.sqrt(3)/2],
[0, 2, 0]]:
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection='3d')
ax.view_init(0, 0, roll)
ax.figure.canvas.draw()
# drag mouse to change orientation
MouseEvent._from_ax_coords(
"button_press_event", ax, (0, 0), MouseButton.LEFT)._process()
MouseEvent._from_ax_coords(
"motion_notify_event", ax, (s*dx*ax._pseudo_w, s*dy*ax._pseudo_h),
MouseButton.LEFT)._process()
ax.figure.canvas.draw()
c = np.sqrt(3)/2
expectations = {
('azel', 0, 1, 0): (0, -45, 0),
('azel', 0, 0, 1): (-45, 0, 0),
('azel', 0, 0.5, c): (-38.971143, -22.5, 0),
('azel', 0, 2, 0): (0, -90, 0),
('azel', 30, 1, 0): (22.5, -38.971143, 30),
('azel', 30, 0, 1): (-38.971143, -22.5, 30),
('azel', 30, 0.5, c): (-22.5, -38.971143, 30),
('trackball', 0, 1, 0): (0, -28.64789, 0),
('trackball', 0, 0, 1): (-28.64789, 0, 0),
('trackball', 0, 0.5, c): (-24.531578, -15.277726, 3.340403),
('trackball', 0, 2, 0): (0, -180/np.pi, 0),
('trackball', 30, 1, 0): (13.869588, -25.319385, 26.87008),
('trackball', 30, 0, 1): (-24.531578, -15.277726, 33.340403),
('trackball', 30, 0.5, c): (-13.869588, -25.319385, 33.129920),
('sphere', 0, 1, 0): (0, -30, 0),
('sphere', 0, 0, 1): (-30, 0, 0),
('sphere', 0, 0.5, c): (-25.658906, -16.102114, 3.690068),
('sphere', 0, 2, 0): (0, -90, 0),
('sphere', 30, 1, 0): (14.477512, -26.565051, 26.565051),
('sphere', 30, 0, 1): (-25.658906, -16.102114, 33.690068),
('sphere', 30, 0.5, c): (-14.477512, -26.565051, 33.434949),
('arcball', 0, 1, 0): (0, -60, 0),
('arcball', 0, 0, 1): (-60, 0, 0),
('arcball', 0, 0.5, c): (-48.590378, -40.893395, 19.106605),
('arcball', 0, 2, 0): (0, 180, 0),
('arcball', 30, 1, 0): (25.658906, -56.309932, 16.102114),
('arcball', 30, 0, 1): (-48.590378, -40.893395, 49.106605),
('arcball', 30, 0.5, c): (-25.658906, -56.309932, 43.897886)}
new_elev, new_azim, new_roll = expectations[(style, roll, dx, dy)]
np.testing.assert_allclose((ax.elev, ax.azim, ax.roll),
(new_elev, new_azim, new_roll), atol=1e-6)
def test_pan():
"""Test mouse panning using the middle mouse button."""
def convert_lim(dmin, dmax):
"""Convert min/max limits to center and range."""
center = (dmin + dmax) / 2
range_ = dmax - dmin
return center, range_
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.scatter(0, 0, 0)
fig.canvas.draw()
x_center0, x_range0 = convert_lim(*ax.get_xlim3d())
y_center0, y_range0 = convert_lim(*ax.get_ylim3d())
z_center0, z_range0 = convert_lim(*ax.get_zlim3d())
# move mouse diagonally to pan along all axis.
MouseEvent._from_ax_coords(
"button_press_event", ax, (0, 0), MouseButton.MIDDLE)._process()
MouseEvent._from_ax_coords(
"motion_notify_event", ax, (1, 1), MouseButton.MIDDLE)._process()
x_center, x_range = convert_lim(*ax.get_xlim3d())
y_center, y_range = convert_lim(*ax.get_ylim3d())
z_center, z_range = convert_lim(*ax.get_zlim3d())
# Ranges have not changed
assert x_range == pytest.approx(x_range0)
assert y_range == pytest.approx(y_range0)
assert z_range == pytest.approx(z_range0)
# But center positions have
assert x_center != pytest.approx(x_center0)
assert y_center != pytest.approx(y_center0)
assert z_center != pytest.approx(z_center0)
@pytest.mark.parametrize("tool,button,key,expected",
[("zoom", MouseButton.LEFT, None, # zoom in
((0.00, 0.06), (0.01, 0.07), (0.02, 0.08))),
("zoom", MouseButton.LEFT, 'x', # zoom in
((-0.01, 0.10), (-0.03, 0.08), (-0.06, 0.06))),
("zoom", MouseButton.LEFT, 'y', # zoom in
((-0.07, 0.05), (-0.04, 0.08), (0.00, 0.12))),
("zoom", MouseButton.RIGHT, None, # zoom out
((-0.09, 0.15), (-0.08, 0.17), (-0.07, 0.18))),
("pan", MouseButton.LEFT, None,
((-0.70, -0.58), (-1.04, -0.91), (-1.27, -1.15))),
("pan", MouseButton.LEFT, 'x',
((-0.97, -0.84), (-0.58, -0.46), (-0.06, 0.06))),
("pan", MouseButton.LEFT, 'y',
((0.20, 0.32), (-0.51, -0.39), (-1.27, -1.15)))])
def test_toolbar_zoom_pan(tool, button, key, expected):
# NOTE: The expected zoom values are rough ballparks of moving in the view
# to make sure we are getting the right direction of motion.
# The specific values can and should change if the zoom movement
# scaling factor gets updated.
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.scatter(0, 0, 0)
fig.canvas.draw()
xlim0, ylim0, zlim0 = ax.get_xlim3d(), ax.get_ylim3d(), ax.get_zlim3d()
# Mouse from (0, 0) to (1, 1)
d0 = (0, 0)
d1 = (1, 1)
# Convert to screen coordinates ("s"). Events are defined only with pixel
# precision, so round the pixel values, and below, check against the
# corresponding xdata/ydata, which are close but not equal to d0/d1.
s0 = ax.transData.transform(d0).astype(int)
s1 = ax.transData.transform(d1).astype(int)
# Set up the mouse movements
start_event = MouseEvent(
"button_press_event", fig.canvas, *s0, button, key=key)
drag_event = MouseEvent(
"motion_notify_event", fig.canvas, *s1, button, key=key, buttons={button})
stop_event = MouseEvent(
"button_release_event", fig.canvas, *s1, button, key=key)
tb = NavigationToolbar2(fig.canvas)
if tool == "zoom":
tb.zoom()
else:
tb.pan()
start_event._process()
drag_event._process()
stop_event._process()
# Should be close, but won't be exact due to screen integer resolution
xlim, ylim, zlim = expected
assert ax.get_xlim3d() == pytest.approx(xlim, abs=0.01)
assert ax.get_ylim3d() == pytest.approx(ylim, abs=0.01)
assert ax.get_zlim3d() == pytest.approx(zlim, abs=0.01)
# Ensure that back, forward, and home buttons work
tb.back()
assert ax.get_xlim3d() == pytest.approx(xlim0)
assert ax.get_ylim3d() == pytest.approx(ylim0)
assert ax.get_zlim3d() == pytest.approx(zlim0)
tb.forward()
assert ax.get_xlim3d() == pytest.approx(xlim, abs=0.01)
assert ax.get_ylim3d() == pytest.approx(ylim, abs=0.01)
assert ax.get_zlim3d() == pytest.approx(zlim, abs=0.01)
tb.home()
assert ax.get_xlim3d() == pytest.approx(xlim0)
assert ax.get_ylim3d() == pytest.approx(ylim0)
assert ax.get_zlim3d() == pytest.approx(zlim0)
@mpl.style.context('default')
@check_figures_equal()
def test_scalarmap_update(fig_test, fig_ref):
x, y, z = np.array(list(itertools.product(*[np.arange(0, 5, 1),
np.arange(0, 5, 1),
np.arange(0, 5, 1)]))).T
c = x + y
# test
ax_test = fig_test.add_subplot(111, projection='3d')
sc_test = ax_test.scatter(x, y, z, c=c, s=40, cmap='viridis')
# force a draw
fig_test.canvas.draw()
# mark it as "stale"
sc_test.changed()
# ref
ax_ref = fig_ref.add_subplot(111, projection='3d')
sc_ref = ax_ref.scatter(x, y, z, c=c, s=40, cmap='viridis')
def test_subfigure_simple():
# smoketest that subfigures can work...
fig = plt.figure()
sf = fig.subfigures(1, 2)
ax = sf[0].add_subplot(1, 1, 1, projection='3d')
ax = sf[1].add_subplot(1, 1, 1, projection='3d', label='other')
# Update style when regenerating the test image
@image_comparison(baseline_images=['computed_zorder'], remove_text=True,
extensions=['png'], style=('mpl20'))
def test_computed_zorder():
plt.rcParams['axes3d.automargin'] = True # Remove when image is regenerated
fig = plt.figure()
ax1 = fig.add_subplot(221, projection='3d')
ax2 = fig.add_subplot(222, projection='3d')
ax2.computed_zorder = False
# create a horizontal plane
corners = ((0, 0, 0), (0, 5, 0), (5, 5, 0), (5, 0, 0))
for ax in (ax1, ax2):
tri = art3d.Poly3DCollection([corners],
facecolors='white',
edgecolors='black',
zorder=1)
ax.add_collection3d(tri)
# plot a vector
ax.plot((2, 2), (2, 2), (0, 4), c='red', zorder=2)
# plot some points
ax.scatter((3, 3), (1, 3), (1, 3), c='red', zorder=10)
ax.set_xlim(0, 5.0)
ax.set_ylim(0, 5.0)
ax.set_zlim(0, 2.5)
ax3 = fig.add_subplot(223, projection='3d')
ax4 = fig.add_subplot(224, projection='3d')
ax4.computed_zorder = False
dim = 10
X, Y = np.meshgrid((-dim, dim), (-dim, dim))
Z = np.zeros((2, 2))
angle = 0.5
X2, Y2 = np.meshgrid((-dim, dim), (0, dim))
Z2 = Y2 * angle
X3, Y3 = np.meshgrid((-dim, dim), (-dim, 0))
Z3 = Y3 * angle
r = 7
M = 1000
th = np.linspace(0, 2 * np.pi, M)
x, y, z = r * np.cos(th), r * np.sin(th), angle * r * np.sin(th)
for ax in (ax3, ax4):
ax.plot_surface(X2, Y3, Z3,
color='blue',
alpha=0.5,
linewidth=0,
zorder=-1)
ax.plot(x[y < 0], y[y < 0], z[y < 0],
lw=5,
linestyle='--',
color='green',
zorder=0)
ax.plot_surface(X, Y, Z,
color='red',
alpha=0.5,
linewidth=0,
zorder=1)
ax.plot(r * np.sin(th), r * np.cos(th), np.zeros(M),
lw=5,
linestyle='--',
color='black',
zorder=2)
ax.plot_surface(X2, Y2, Z2,
color='blue',
alpha=0.5,
linewidth=0,
zorder=3)
ax.plot(x[y > 0], y[y > 0], z[y > 0], lw=5,
linestyle='--',
color='green',
zorder=4)
ax.view_init(elev=20, azim=-20, roll=0)
ax.axis('off')
def test_format_coord():
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
x = np.arange(10)
ax.plot(x, np.sin(x))
xv = 0.1
yv = 0.1
fig.canvas.draw()
assert ax.format_coord(xv, yv) == 'x=10.5227, y pane=1.0417, z=0.1444'
# Modify parameters
ax.view_init(roll=30, vertical_axis="y")
fig.canvas.draw()
assert ax.format_coord(xv, yv) == 'x pane=9.1875, y=0.9761, z=0.1291'
# Reset parameters
ax.view_init()
fig.canvas.draw()
assert ax.format_coord(xv, yv) == 'x=10.5227, y pane=1.0417, z=0.1444'
# Check orthographic projection
ax.set_proj_type('ortho')
fig.canvas.draw()
assert ax.format_coord(xv, yv) == 'x=10.8869, y pane=1.0417, z=0.1528'
# Check non-default perspective projection
ax.set_proj_type('persp', focal_length=0.1)
fig.canvas.draw()
assert ax.format_coord(xv, yv) == 'x=9.0620, y pane=1.0417, z=0.1110'
def test_get_axis_position():
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
x = np.arange(10)
ax.plot(x, np.sin(x))
fig.canvas.draw()
assert ax.get_axis_position() == (False, True, False)
def test_margins():
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.margins(0.2)
assert ax.margins() == (0.2, 0.2, 0.2)
ax.margins(0.1, 0.2, 0.3)
assert ax.margins() == (0.1, 0.2, 0.3)
ax.margins(x=0)
assert ax.margins() == (0, 0.2, 0.3)
ax.margins(y=0.1)
assert ax.margins() == (0, 0.1, 0.3)
ax.margins(z=0)
assert ax.margins() == (0, 0.1, 0)
def test_margin_getters():
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.margins(0.1, 0.2, 0.3)
assert ax.get_xmargin() == 0.1
assert ax.get_ymargin() == 0.2
assert ax.get_zmargin() == 0.3
@pytest.mark.parametrize('err, args, kwargs, match', (
(ValueError, (-1,), {}, r'margin must be greater than -0\.5'),
(ValueError, (1, -1, 1), {}, r'margin must be greater than -0\.5'),
(ValueError, (1, 1, -1), {}, r'margin must be greater than -0\.5'),
(ValueError, tuple(), {'x': -1}, r'margin must be greater than -0\.5'),
(ValueError, tuple(), {'y': -1}, r'margin must be greater than -0\.5'),
(ValueError, tuple(), {'z': -1}, r'margin must be greater than -0\.5'),
(TypeError, (1, ), {'x': 1},
'Cannot pass both positional and keyword'),
(TypeError, (1, ), {'x': 1, 'y': 1, 'z': 1},
'Cannot pass both positional and keyword'),
(TypeError, (1, ), {'x': 1, 'y': 1},
'Cannot pass both positional and keyword'),
(TypeError, (1, 1), {}, 'Must pass a single positional argument for'),
))
def test_margins_errors(err, args, kwargs, match):
with pytest.raises(err, match=match):
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.margins(*args, **kwargs)
@check_figures_equal()
def test_text_3d(fig_test, fig_ref):
ax = fig_ref.add_subplot(projection="3d")
txt = Text(0.5, 0.5, r'Foo bar $\int$')
art3d.text_2d_to_3d(txt, z=1)
ax.add_artist(txt)
assert txt.get_position_3d() == (0.5, 0.5, 1)
ax = fig_test.add_subplot(projection="3d")
t3d = art3d.Text3D(0.5, 0.5, 1, r'Foo bar $\int$')
ax.add_artist(t3d)
assert t3d.get_position_3d() == (0.5, 0.5, 1)
def test_draw_single_lines_from_Nx1():
# Smoke test for GH#23459
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.plot([[0], [1]], [[0], [1]], [[0], [1]])
@check_figures_equal()
def test_pathpatch_3d(fig_test, fig_ref):
ax = fig_ref.add_subplot(projection="3d")
path = Path.unit_rectangle()
patch = PathPatch(path)
art3d.pathpatch_2d_to_3d(patch, z=(0, 0.5, 0.7, 1, 0), zdir='y')
ax.add_artist(patch)
ax = fig_test.add_subplot(projection="3d")
pp3d = art3d.PathPatch3D(path, zs=(0, 0.5, 0.7, 1, 0), zdir='y')
ax.add_artist(pp3d)
@image_comparison(baseline_images=['scatter_spiral.png'],
remove_text=True,
style='mpl20')
def test_scatter_spiral():
plt.rcParams['axes3d.automargin'] = True # Remove when image is regenerated
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
th = np.linspace(0, 2 * np.pi * 6, 256)
sc = ax.scatter(np.sin(th), np.cos(th), th, s=(1 + th * 5), c=th ** 2)
# force at least 1 draw!
fig.canvas.draw()
def test_Poly3DCollection_get_path():
# Smoke test to see that get_path does not raise
# See GH#27361
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
p = Circle((0, 0), 1.0)
ax.add_patch(p)
art3d.pathpatch_2d_to_3d(p)
p.get_path()
def test_Poly3DCollection_get_facecolor():
# Smoke test to see that get_facecolor does not raise
# See GH#4067
y, x = np.ogrid[1:10:100j, 1:10:100j]
z2 = np.cos(x) ** 3 - np.sin(y) ** 2
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
r = ax.plot_surface(x, y, z2, cmap='hot')
r.get_facecolor()
def test_Poly3DCollection_get_edgecolor():
# Smoke test to see that get_edgecolor does not raise
# See GH#4067
y, x = np.ogrid[1:10:100j, 1:10:100j]
z2 = np.cos(x) ** 3 - np.sin(y) ** 2
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
r = ax.plot_surface(x, y, z2, cmap='hot')
r.get_edgecolor()
@pytest.mark.parametrize(
"vertical_axis, proj_expected, axis_lines_expected, tickdirs_expected",
[
(
"z",
[
[0.0, 1.142857, 0.0, -0.571429],
[0.0, 0.0, 0.857143, -0.428571],
[0.0, 0.0, 0.0, -10.0],
[-1.142857, 0.0, 0.0, 10.571429],
],
[
([0.05617978, 0.06329114], [-0.04213483, -0.04746835]),
([-0.06329114, 0.06329114], [-0.04746835, -0.04746835]),
([-0.06329114, -0.06329114], [-0.04746835, 0.04746835]),
],
[1, 0, 0],
),
(
"y",
[
[1.142857, 0.0, 0.0, -0.571429],
[0.0, 0.857143, 0.0, -0.428571],
[0.0, 0.0, 0.0, -10.0],
[0.0, 0.0, -1.142857, 10.571429],
],
[
([-0.06329114, 0.06329114], [0.04746835, 0.04746835]),
([0.06329114, 0.06329114], [-0.04746835, 0.04746835]),
([-0.05617978, -0.06329114], [0.04213483, 0.04746835]),
],
[2, 2, 0],
),
(
"x",
[
[0.0, 0.0, 1.142857, -0.571429],
[0.857143, 0.0, 0.0, -0.428571],
[0.0, 0.0, 0.0, -10.0],
[0.0, -1.142857, 0.0, 10.571429],
],
[
([-0.06329114, -0.06329114], [0.04746835, -0.04746835]),
([0.06329114, 0.05617978], [0.04746835, 0.04213483]),
([0.06329114, -0.06329114], [0.04746835, 0.04746835]),
],
[1, 2, 1],
),
],
)
def test_view_init_vertical_axis(
vertical_axis, proj_expected, axis_lines_expected, tickdirs_expected
):
"""
Test the actual projection, axis lines and ticks matches expected values.
Parameters
----------
vertical_axis : str
Axis to align vertically.
proj_expected : ndarray
Expected values from ax.get_proj().
axis_lines_expected : tuple of arrays
Edgepoints of the axis line. Expected values retrieved according
to ``ax.get_[xyz]axis().line.get_data()``.
tickdirs_expected : list of int
indexes indicating which axis to create a tick line along.
"""
rtol = 2e-06
ax = plt.subplot(1, 1, 1, projection="3d")
ax.view_init(elev=0, azim=0, roll=0, vertical_axis=vertical_axis)
ax.get_figure().canvas.draw()
# Assert the projection matrix:
proj_actual = ax.get_proj()
np.testing.assert_allclose(proj_expected, proj_actual, rtol=rtol)
for i, axis in enumerate([ax.get_xaxis(), ax.get_yaxis(), ax.get_zaxis()]):
# Assert black lines are correctly aligned:
axis_line_expected = axis_lines_expected[i]
axis_line_actual = axis.line.get_data()
np.testing.assert_allclose(axis_line_expected, axis_line_actual,
rtol=rtol)
# Assert ticks are correctly aligned:
tickdir_expected = tickdirs_expected[i]
tickdir_actual = axis._get_tickdir('default')
np.testing.assert_array_equal(tickdir_expected, tickdir_actual)
@pytest.mark.parametrize("vertical_axis", ["x", "y", "z"])
def test_on_move_vertical_axis(vertical_axis: str) -> None:
"""
Test vertical axis is respected when rotating the plot interactively.
"""
ax = plt.subplot(1, 1, 1, projection="3d")
ax.view_init(elev=0, azim=0, roll=0, vertical_axis=vertical_axis)
ax.get_figure().canvas.draw()
proj_before = ax.get_proj()
MouseEvent._from_ax_coords(
"button_press_event", ax, (0, 1), MouseButton.LEFT)._process()
MouseEvent._from_ax_coords(
"motion_notify_event", ax, (.5, .8), MouseButton.LEFT)._process()
assert ax._axis_names.index(vertical_axis) == ax._vertical_axis
# Make sure plot has actually moved:
proj_after = ax.get_proj()
np.testing.assert_raises(
AssertionError, np.testing.assert_allclose, proj_before, proj_after
)
@pytest.mark.parametrize(
"vertical_axis, aspect_expected",
[
("x", [1.190476, 0.892857, 1.190476]),
("y", [0.892857, 1.190476, 1.190476]),
("z", [1.190476, 1.190476, 0.892857]),
],
)
def test_set_box_aspect_vertical_axis(vertical_axis, aspect_expected):
ax = plt.subplot(1, 1, 1, projection="3d")
ax.view_init(elev=0, azim=0, roll=0, vertical_axis=vertical_axis)
ax.get_figure().canvas.draw()
ax.set_box_aspect(None)
np.testing.assert_allclose(aspect_expected, ax._box_aspect, rtol=1e-6)
@image_comparison(baseline_images=['arc_pathpatch.png'],
remove_text=True,
style='mpl20')
def test_arc_pathpatch():
ax = plt.subplot(1, 1, 1, projection="3d")
a = mpatch.Arc((0.5, 0.5), width=0.5, height=0.9,
angle=20, theta1=10, theta2=130)
ax.add_patch(a)
art3d.pathpatch_2d_to_3d(a, z=0, zdir='z')
@image_comparison(baseline_images=['panecolor_rcparams.png'],
remove_text=True,
style='mpl20')
def test_panecolor_rcparams():
with plt.rc_context({'axes3d.xaxis.panecolor': 'r',
'axes3d.yaxis.panecolor': 'g',
'axes3d.zaxis.panecolor': 'b'}):
fig = plt.figure(figsize=(1, 1))
fig.add_subplot(projection='3d')
@check_figures_equal()
def test_mutating_input_arrays_y_and_z(fig_test, fig_ref):
"""
Test to see if the `z` axis does not get mutated
after a call to `Axes3D.plot`
test cases came from GH#8990
"""
ax1 = fig_test.add_subplot(111, projection='3d')
x = [1, 2, 3]
y = [0.0, 0.0, 0.0]
z = [0.0, 0.0, 0.0]
ax1.plot(x, y, z, 'o-')
# mutate y,z to get a nontrivial line
y[:] = [1, 2, 3]
z[:] = [1, 2, 3]
# draw the same plot without mutating x and y
ax2 = fig_ref.add_subplot(111, projection='3d')
x = [1, 2, 3]
y = [0.0, 0.0, 0.0]
z = [0.0, 0.0, 0.0]
ax2.plot(x, y, z, 'o-')
def test_scatter_masked_color():
"""
Test color parameter usage with non-finite coordinate arrays.
GH#26236
"""
x = [np.nan, 1, 2, 1]
y = [0, np.inf, 2, 1]
z = [0, 1, -np.inf, 1]
colors = [
[0.0, 0.0, 0.0, 1],
[0.0, 0.0, 0.0, 1],
[0.0, 0.0, 0.0, 1],
[0.0, 0.0, 0.0, 1]
]
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
path3d = ax.scatter(x, y, z, color=colors)
# Assert sizes' equality
assert len(path3d.get_offsets()) ==\
len(super(type(path3d), path3d).get_facecolors())
@mpl3d_image_comparison(['surface3d_zsort_inf.png'], style='mpl20')
def test_surface3d_zsort_inf():
plt.rcParams['axes3d.automargin'] = True # Remove when image is regenerated
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
x, y = np.mgrid[-2:2:0.1, -2:2:0.1]
z = np.sin(x)**2 + np.cos(y)**2
z[x.shape[0] // 2:, x.shape[1] // 2:] = np.inf
ax.plot_surface(x, y, z, cmap='jet')
ax.view_init(elev=45, azim=145)
def test_Poly3DCollection_init_value_error():
# smoke test to ensure the input check works
# GH#26420
with pytest.raises(ValueError,
match='You must provide facecolors, edgecolors, '
'or both for shade to work.'):
poly = np.array([[0, 0, 1], [0, 1, 1], [0, 0, 0]], float)
c = art3d.Poly3DCollection([poly], shade=True)
def test_ndarray_color_kwargs_value_error():
# smoke test
# ensures ndarray can be passed to color in kwargs for 3d projection plot
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(1, 0, 0, color=np.array([0, 0, 0, 1]))
fig.canvas.draw()
def test_line3dcollection_autolim_ragged():
"""Test Line3DCollection with autolim=True and lines of different lengths."""
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
# Create lines with different numbers of points (ragged arrays)
edges = [
[(0, 0, 0), (1, 1, 1), (2, 2, 2)], # 3 points
[(0, 1, 0), (1, 2, 1)], # 2 points
[(1, 0, 1), (2, 1, 2), (3, 2, 3), (4, 3, 4)] # 4 points
]
# This should not raise an exception.
collections = ax.add_collection3d(art3d.Line3DCollection(edges), autolim=True)
# Check that limits were computed correctly with margins
# The limits should include all points with default margins
assert np.allclose(ax.get_xlim3d(), (-0.08333333333333333, 4.083333333333333))
assert np.allclose(ax.get_ylim3d(), (-0.0625, 3.0625))
assert np.allclose(ax.get_zlim3d(), (-0.08333333333333333, 4.083333333333333))
def test_axes3d_set_aspect_deperecated_params():
"""
Test that using the deprecated 'anchor' and 'share' kwargs in
set_aspect raises the correct warning.
"""
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
# Test that providing the `anchor` parameter raises a deprecation warning.
with pytest.warns(_api.MatplotlibDeprecationWarning, match="'anchor' parameter"):
ax.set_aspect('equal', anchor='C')
# Test that using the 'share' parameter is now deprecated.
with pytest.warns(_api.MatplotlibDeprecationWarning, match="'share' parameter"):
ax.set_aspect('equal', share=True)
# Test that the `adjustable` parameter is correctly processed to satisfy
# code coverage.
ax.set_aspect('equal', adjustable='box')
assert ax.get_adjustable() == 'box'
ax.set_aspect('equal', adjustable='datalim')
assert ax.get_adjustable() == 'datalim'
with pytest.raises(ValueError, match="adjustable"):
ax.set_aspect('equal', adjustable='invalid_value')
| TestVoxels |
python | python-openxml__python-docx | tests/opc/test_pkgreader.py | {
"start": 10095,
"end": 14117
} | class ____:
def it_can_construct_from_ct_item_xml(self, from_xml_fixture):
content_types_xml, expected_defaults, expected_overrides = from_xml_fixture
ct_map = _ContentTypeMap.from_xml(content_types_xml)
assert ct_map._defaults == expected_defaults
assert ct_map._overrides == expected_overrides
def it_matches_an_override_on_case_insensitive_partname(self, match_override_fixture):
ct_map, partname, content_type = match_override_fixture
assert ct_map[partname] == content_type
def it_falls_back_to_case_insensitive_extension_default_match(self, match_default_fixture):
ct_map, partname, content_type = match_default_fixture
assert ct_map[partname] == content_type
def it_should_raise_on_partname_not_found(self):
ct_map = _ContentTypeMap()
with pytest.raises(KeyError):
ct_map[PackURI("/!blat/rhumba.1x&")]
def it_should_raise_on_key_not_instance_of_PackURI(self):
ct_map = _ContentTypeMap()
ct_map._overrides = {PackURI("/part/name1.xml"): "app/vnd.type1"}
with pytest.raises(KeyError):
ct_map["/part/name1.xml"]
# fixtures ---------------------------------------------
@pytest.fixture
def from_xml_fixture(self):
entries = (
("Default", "xml", CT.XML),
("Default", "PNG", CT.PNG),
("Override", "/ppt/presentation.xml", CT.PML_PRESENTATION_MAIN),
)
content_types_xml = self._xml_from(entries)
expected_defaults = {}
expected_overrides = {}
for entry in entries:
if entry[0] == "Default":
ext = entry[1].lower()
content_type = entry[2]
expected_defaults[ext] = content_type
elif entry[0] == "Override":
partname, content_type = entry[1:]
expected_overrides[partname] = content_type
return content_types_xml, expected_defaults, expected_overrides
@pytest.fixture(
params=[
("/foo/bar.xml", "xml", "application/xml"),
("/foo/bar.PNG", "png", "image/png"),
("/foo/bar.jpg", "JPG", "image/jpeg"),
]
)
def match_default_fixture(self, request):
partname_str, ext, content_type = request.param
partname = PackURI(partname_str)
ct_map = _ContentTypeMap()
ct_map._add_override(PackURI("/bar/foo.xyz"), "application/xyz")
ct_map._add_default(ext, content_type)
return ct_map, partname, content_type
@pytest.fixture(
params=[
("/foo/bar.xml", "/foo/bar.xml"),
("/foo/bar.xml", "/FOO/Bar.XML"),
("/FoO/bAr.XmL", "/foo/bar.xml"),
]
)
def match_override_fixture(self, request):
partname_str, should_match_partname_str = request.param
partname = PackURI(partname_str)
should_match_partname = PackURI(should_match_partname_str)
content_type = "appl/vnd-foobar"
ct_map = _ContentTypeMap()
ct_map._add_override(partname, content_type)
return ct_map, should_match_partname, content_type
def _xml_from(self, entries):
"""
Return XML for a [Content_Types].xml based on items in `entries`.
"""
types_bldr = a_Types().with_nsdecls()
for entry in entries:
if entry[0] == "Default":
ext, content_type = entry[1:]
default_bldr = a_Default()
default_bldr.with_Extension(ext)
default_bldr.with_ContentType(content_type)
types_bldr.with_child(default_bldr)
elif entry[0] == "Override":
partname, content_type = entry[1:]
override_bldr = an_Override()
override_bldr.with_PartName(partname)
override_bldr.with_ContentType(content_type)
types_bldr.with_child(override_bldr)
return types_bldr.xml()
| Describe_ContentTypeMap |
python | streamlit__streamlit | lib/tests/streamlit/data_mocks/dask_mocks.py | {
"start": 2143,
"end": 2823
} | class ____:
"""This is dummy Index class, which imitates dask.dataframe.core.Index class
for testing purposes. We use this to make sure that our code does a special handling
if it detects a Dask Index.
This allows testing of the functionality without having the library installed,
but it won't capture changes in the API of the library. This requires
integration tests.
"""
__module__ = "dask.dataframe.core"
def __init__(self, data: pd.Index):
self._data: pd.Index = data
def head(self, n: int, compute: bool) -> pd.Index:
"""Returns the top n element of a mock version of Dask Index."""
return self._data[:n]
| Index |
python | tensorflow__tensorflow | tensorflow/python/data/experimental/kernel_tests/service/cross_trainer_cache_ft_test.py | {
"start": 1181,
"end": 3645
} | class ____(data_service_test_base.TestBase,
parameterized.TestCase):
"""Fault tolerance tests for tf.data service cross-trainer cache."""
@combinations.generate(test_base.default_test_combinations())
def testWorkerRestart(self):
cluster = self._create_cluster(num_workers=1)
dataset = dataset_ops.Dataset.range(10000000).repeat()
distributed_dataset = self.make_distributed_dataset(
dataset,
cluster,
job_name="job",
cross_trainer_cache=data_service_ops.CrossTrainerCache(
trainer_id="Trainer 1"))
get_next = self.getNext(distributed_dataset)
elements = self._get_next(get_next, 100)
self.assertEqual(elements, list(range(100)))
cluster.workers[0].restart()
# Read until we get results from the restarted worker, then read some more.
while self.evaluate(get_next()) != 0:
pass
elements = self._get_next(get_next, 100)
self.assertEqual(elements, list(range(1, 101)))
@combinations.generate(test_base.default_test_combinations())
def testDispatcherRestart(self):
cluster = self._create_cluster(num_workers=1)
dataset = dataset_ops.Dataset.range(10000000).repeat()
distributed_dataset = self.make_distributed_dataset(
dataset,
cluster,
job_name="job",
cross_trainer_cache=data_service_ops.CrossTrainerCache(
trainer_id="Trainer 1"))
get_next = self.getNext(distributed_dataset)
elements = self._get_next(get_next, 100)
self.assertEqual(elements, list(range(100)))
cluster.restart_dispatcher()
# Dispatcher restart should not affect the workers.
elements = self._get_next(get_next, 100)
self.assertEqual(elements, list(range(100, 200)))
def _get_next(self, get_next, num_elements):
return [self.evaluate(get_next()) for _ in range(num_elements)]
def _create_cluster(self,
num_workers,
cross_trainer_cache_size_bytes=10 * (2**30)):
cluster = data_service_test_base.TestCluster(num_workers=0)
for _ in range(num_workers):
worker = data_service_test_base.TestWorker(
dispatcher_address=cluster.dispatcher_address(),
shutdown_quiet_period_ms=0,
cross_trainer_cache_size_bytes=cross_trainer_cache_size_bytes)
worker.start()
cluster.workers.append(worker)
return cluster
if __name__ == "__main__":
test.main()
| CrossTrainerCacheFtTest |
python | redis__redis-py | redis/multidb/event.py | {
"start": 994,
"end": 1786
} | class ____(EventListenerInterface):
"""
Re-subscribe the currently active pub / sub to a new active database.
"""
def listen(self, event: ActiveDatabaseChanged):
old_pubsub = event.command_executor.active_pubsub
if old_pubsub is not None:
# Re-assign old channels and patterns so they will be automatically subscribed on connection.
new_pubsub = event.new_database.client.pubsub(**event.kwargs)
new_pubsub.channels = old_pubsub.channels
new_pubsub.patterns = old_pubsub.patterns
new_pubsub.shard_channels = old_pubsub.shard_channels
new_pubsub.on_connect(None)
event.command_executor.active_pubsub = new_pubsub
old_pubsub.close()
| ResubscribeOnActiveDatabaseChanged |
python | numba__numba | numba/tests/test_listobject.py | {
"start": 26884,
"end": 28995
} | class ____(MemoryLeakMixin, TestCase):
"""Test list insert. """
def test_list_insert_empty(self):
@njit
def foo(i):
l = listobject.new_list(int32)
l.insert(i, 1)
return len(l), l[0]
for i in (-10, -5, -1, 0, 1, 4, 9):
self.assertEqual(foo(i), (1, 1))
def test_list_insert_singleton(self):
@njit
def foo(i):
l = listobject.new_list(int32)
l.append(0)
l.insert(i, 1)
return len(l), l[0], l[1]
# insert before
for i in (-10, -3, -2, -1, 0):
self.assertEqual(foo(i), (2, 1, 0))
# insert after
for i in (1, 2, 3, 10):
self.assertEqual(foo(i), (2, 0, 1))
def test_list_insert_multiple(self):
@njit
def foo(i):
l = listobject.new_list(int32)
for j in range(10):
l.append(0)
l.insert(i, 1)
return len(l), l[i]
for i in (0, 4, 9):
self.assertEqual(foo(i), (11, 1))
def test_list_insert_multiple_before(self):
@njit
def foo(i):
l = listobject.new_list(int32)
for j in range(10):
l.append(0)
l.insert(i, 1)
return len(l), l[0]
for i in (-12, -11, -10, 0):
self.assertEqual(foo(i), (11, 1))
def test_list_insert_multiple_after(self):
@njit
def foo(i):
l = listobject.new_list(int32)
for j in range(10):
l.append(0)
l.insert(i, 1)
return len(l), l[10]
for i in (10, 11, 12):
self.assertEqual(foo(i), (11, 1))
def test_list_insert_typing_error(self):
self.disable_leak_check()
@njit
def foo():
l = listobject.new_list(int32)
l.insert("a", 0)
with self.assertRaises(TypingError) as raises:
foo()
self.assertIn(
"list insert indices must be integers",
str(raises.exception),
)
| TestInsert |
python | kamyu104__LeetCode-Solutions | Python/mirror-reflection.py | {
"start": 384,
"end": 970
} | class ____(object):
def mirrorReflection(self, p, q):
"""
:type p: int
:type q: int
:rtype: int
"""
def gcd(a, b):
while b:
a, b = b, a % b
return a
lcm = p*q // gcd(p, q)
# let a = lcm / p, b = lcm / q
if lcm // p % 2 == 1:
if lcm // q % 2 == 1:
return 1 # a is odd, b is odd <=> (p & -p) == (q & -q)
return 2 # a is odd, b is even <=> (p & -p) > (q & -q)
return 0 # a is even, b is odd <=> (p & -p) < (q & -q)
| Solution2 |
python | kamyu104__LeetCode-Solutions | Python/sum-of-number-and-its-reverse.py | {
"start": 90,
"end": 1065
} | class ____(object):
def sumOfNumberAndReverse(self, num):
"""
:type num: int
:rtype: bool
"""
def backtracking(num, chosen):
if num == 0:
return True
if chosen == 1:
return False
if num <= 18:
return (num%2 == 0) or (num == 11 and chosen == 0)
if chosen == 2:
return False
for x in (num%10, 10+num%10):
if not (1 <= x <= 18):
continue
base = 11
if chosen:
base = chosen
else:
while x*((base-1)*10+1) <= num:
base = (base-1)*10+1
if num-x*base >= 0 and backtracking((num-x*base)//10, base//100+1):
return True
return False
return backtracking(num, 0)
# Time: O(nlogn)
# Space: O(1)
# brute force
| Solution |
python | getsentry__sentry | src/sentry/feedback/migrations/0001_squashed_0004_index_together.py | {
"start": 334,
"end": 3526
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = True
replaces = [
("feedback", "0001_feedback"),
("feedback", "0002_feedback_add_org_id_and_rename_event_id"),
("feedback", "0003_feedback_add_env"),
("feedback", "0004_index_together"),
]
initial = True
checked = False # This is an initial migration and can take locks
dependencies = [
("sentry", "0001_squashed_0904_onboarding_task_project_id_idx"),
]
operations = [
migrations.CreateModel(
name="Feedback",
fields=[
(
"id",
sentry.db.models.fields.bounded.BoundedBigAutoField(
primary_key=True, serialize=False
),
),
(
"project_id",
sentry.db.models.fields.bounded.BoundedBigIntegerField(db_index=True),
),
("replay_id", models.CharField(db_index=True, max_length=100, null=True)),
("url", models.CharField(max_length=1000, null=True)),
("message", models.TextField()),
("feedback_id", sentry.db.models.fields.uuid.UUIDField(max_length=32, unique=True)),
("date_added", models.DateTimeField(default=django.utils.timezone.now)),
(
"organization_id",
sentry.db.models.fields.bounded.BoundedBigIntegerField(db_index=True),
),
("data", models.JSONField(null=True)),
(
"environment",
sentry.db.models.fields.foreignkey.FlexibleForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="sentry.environment",
),
),
],
options={
"db_table": "feedback_feedback",
"indexes": [
models.Index(
fields=["project_id", "date_added"], name="feedback_fe_project_84fbf7_idx"
)
],
},
),
]
| Migration |
python | tensorflow__tensorflow | tensorflow/python/tpu/preempted_hook.py | {
"start": 1022,
"end": 1680
} | class ____(session_run_hook.SessionRunHook):
"""The SessionRunHook for preemptible Cloud TPUs.
This is an implementation of SessionRunHook for the pre-emptible Google Cloud
TPU service. It attempts to close the session if the TPU is preempted, and
exits the coordinator process if the session cannot be closed.
"""
def __init__(self, cluster):
self._cluster = cluster
def after_create_session(self, session, coord):
if tpu_cluster_resolver.is_running_in_gce():
self._tpu_poller = _TPUPollingThread(self._cluster, session)
self._tpu_poller.start()
def end(self, session):
self._tpu_poller.stop()
| CloudTPUPreemptedHook |
python | django__django | tests/admin_inlines/tests.py | {
"start": 32995,
"end": 34637
} | class ____(TestDataMixin, TestCase):
def setUp(self):
self.client.force_login(self.superuser)
def test_inline_media_only_base(self):
holder = Holder(dummy=13)
holder.save()
Inner(dummy=42, holder=holder).save()
change_url = reverse("admin:admin_inlines_holder_change", args=(holder.id,))
response = self.client.get(change_url)
self.assertContains(response, "my_awesome_admin_scripts.js")
def test_inline_media_only_inline(self):
holder = Holder3(dummy=13)
holder.save()
Inner3(dummy=42, holder=holder).save()
change_url = reverse("admin:admin_inlines_holder3_change", args=(holder.id,))
response = self.client.get(change_url)
self.assertEqual(
response.context["inline_admin_formsets"][0].media._js,
[
"admin/js/vendor/jquery/jquery.min.js",
"my_awesome_inline_scripts.js",
"custom_number.js",
"admin/js/jquery.init.js",
"admin/js/inlines.js",
],
)
self.assertContains(response, "my_awesome_inline_scripts.js")
def test_all_inline_media(self):
holder = Holder2(dummy=13)
holder.save()
Inner2(dummy=42, holder=holder).save()
change_url = reverse("admin:admin_inlines_holder2_change", args=(holder.id,))
response = self.client.get(change_url)
self.assertContains(response, "my_awesome_admin_scripts.js")
self.assertContains(response, "my_awesome_inline_scripts.js")
@override_settings(ROOT_URLCONF="admin_inlines.urls")
| TestInlineMedia |
python | getsentry__sentry | src/sentry/utils/sdk_crashes/sdk_crash_detection_config.py | {
"start": 1062,
"end": 1280
} | class ____:
function_patterns: set[str]
path_patterns: set[str]
path_replacer: PathReplacer
function_and_path_patterns: list[FunctionAndPathPattern] = field(default_factory=list)
@unique
| SDKFrameConfig |
python | PrefectHQ__prefect | tests/_internal/concurrency/test_services.py | {
"start": 626,
"end": 1088
} | class ____(QueueService[int]):
mock = MagicMock()
def __init__(self, index: Optional[int] = None) -> None:
if index is not None:
super().__init__(index)
else:
super().__init__()
async def _handle(self, item: int):
# Checkpoint to catch errors where async cancellation has occurred
await asyncio.sleep(0)
self.mock(self, item)
print(f"Handled item {item} for {self}")
| MockService |
python | ray-project__ray | python/ray/dashboard/modules/job/cli_utils.py | {
"start": 363,
"end": 1462
} | class ____(click.ParamType):
"""A click parameter that can be either a boolean or a string."""
name = "BOOL | TEXT"
def convert(self, value, param, ctx):
if isinstance(value, bool):
return value
else:
return bool_cast(value)
def add_common_job_options(func):
"""Decorator for adding CLI flags shared by all `ray job` commands."""
@click.option(
"--verify",
default=True,
show_default=True,
type=BoolOrStringParam(),
help=(
"Boolean indication to verify the server's TLS certificate or a path to"
" a file or directory of trusted certificates."
),
)
@click.option(
"--headers",
required=False,
type=str,
default=None,
help=(
"Used to pass headers through http/s to the Ray Cluster."
'please follow JSON formatting formatting {"key": "value"}'
),
)
@functools.wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
| BoolOrStringParam |
python | PrefectHQ__prefect | src/prefect/_internal/schemas/bases.py | {
"start": 3881,
"end": 4474
} | class ____(IDBaseModel):
"""
A PrefectBaseModel with an auto-generated UUID ID value and created /
updated timestamps, intended for compatibility with our standard ORM models.
The ID, created, and updated fields are reset on copy() and not included in
equality comparisons.
"""
_reset_fields: ClassVar[set[str]] = {"id", "created", "updated"}
model_config: ClassVar[ConfigDict] = ConfigDict(from_attributes=True)
created: Optional[DateTime] = Field(default=None, repr=False)
updated: Optional[DateTime] = Field(default=None, repr=False)
| ObjectBaseModel |
python | pytorch__pytorch | torch/profiler/_pattern_matcher.py | {
"start": 10306,
"end": 12737
} | class ____(Pattern):
def __init__(self, prof: profile, should_benchmark: bool = False) -> None:
super().__init__(prof, should_benchmark)
self.name = "FP32 MatMul Pattern"
self.description = (
"You are currently using GPU that supports TF32. "
"Please enable TF32 by setting 'torch.backends.cuda.matmul.allow_tf32 = True'"
)
self.url = "https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
@property
def skip(self):
if torch.version.hip is not None:
has_tf32 = False
else:
# Anything less than sm_80 is not Ampere which doesn't support TF32
has_tf32 = all(
int(re.sub("sm_|compute_", "", arch)) >= 80
for arch in torch.cuda.get_arch_list()
)
return has_tf32 is False or super().skip or not self.prof.record_shapes
def match(self, event: _ProfilerEvent) -> bool:
# If we saw this pattern once, we don't need to match it again
if event.tag != _EventType.TorchOp:
return False
assert isinstance(event.extra_fields, _ExtraFields_TorchOp)
if event.name == "aten::mm":
if event.extra_fields.allow_tf32_cublas is False:
return True
return False
def report(self, event: _ProfilerEvent):
return self.description
def benchmark(self, events: list[_ProfilerEvent]):
shapes_factor_map = {input_shapes(event): 0.0 for event in events}
for shape in shapes_factor_map:
matrixA = torch.randn(shape[0], device="cuda", dtype=torch.float32)
matrixB = torch.randn(shape[1], device="cuda", dtype=torch.float32)
fp32_timer = benchmark.Timer(
stmt="torch.mm(matrixA, matrixB)",
globals={"matrixA": matrixA, "matrixB": matrixB},
)
tf32_timer = benchmark.Timer(
stmt="torch.mm(matrixA, matrixB)",
setup="torch.backends.cuda.matmul.allow_tf32 = True",
globals={"matrixA": matrixA, "matrixB": matrixB},
)
torch.backends.cuda.matmul.allow_tf32 = False
fp32_time = fp32_timer.timeit(10).mean
tf32_time = tf32_timer.timeit(10).mean
shapes_factor_map[shape] = tf32_time / fp32_time
return shapes_factor_map
| FP32MatMulPattern |
python | spack__spack | lib/spack/spack/installer.py | {
"start": 98845,
"end": 108953
} | class ____:
"""This class implements the part installation that happens in the child process."""
def __init__(self, pkg: "spack.package_base.PackageBase", install_args: dict):
"""Create a new BuildProcessInstaller.
It is assumed that the lifecycle of this object is the same as the child
process in the build.
Arguments:
pkg: the package being installed.
install_args: arguments to the installer from parent process.
"""
self.pkg = pkg
# whether to do a fake install
self.fake = install_args.get("fake", False)
# whether to install source code with the package
self.install_source = install_args.get("install_source", False)
is_develop = pkg.spec.is_develop
# whether to keep the build stage after installation
# Note: user commands do not have an explicit choice to disable
# keeping stages (i.e., we have a --keep-stage option, but not
# a --destroy-stage option), so we can override a default choice
# to destroy
self.keep_stage = is_develop or install_args.get("keep_stage", False)
# whether to restage
self.restage = (not is_develop) and install_args.get("restage", False)
# whether to skip the patch phase
self.skip_patch = install_args.get("skip_patch", False)
# whether to enable echoing of build output initially or not
self.verbose = bool(install_args.get("verbose", False))
# whether installation was explicitly requested by the user
self.explicit = pkg.spec.dag_hash() in install_args.get("explicit", [])
# env before starting installation
self.unmodified_env = install_args.get("unmodified_env", {})
# env modifications by Spack
self.env_mods = install_args.get("env_modifications", EnvironmentModifications())
# timer for build phases
self.timer = timer.Timer()
# If we are using a padded path, filter the output to compress padded paths
padding = spack.config.get("config:install_tree:padded_length", None)
self.filter_fn = spack.util.path.padding_filter if padding else None
# info/debug information
self.pre = _log_prefix(pkg.name)
self.pkg_id = package_id(pkg.spec)
def run(self) -> bool:
"""Main entry point from ``build_process`` to kick off install in child."""
stage = self.pkg.stage
stage.keep = self.keep_stage
with stage:
if self.restage:
stage.destroy()
self.timer.start("stage")
if not self.fake:
if not self.skip_patch:
self.pkg.do_patch()
else:
self.pkg.do_stage()
self.timer.stop("stage")
tty.debug(
f"{self.pre} Building {self.pkg_id} [{self.pkg.build_system_class}]" # type: ignore[attr-defined] # noqa: E501
)
# get verbosity from install parameter or saved value
self.echo = self.verbose
if spack.package_base.PackageBase._verbose is not None:
self.echo = spack.package_base.PackageBase._verbose
# Run the pre-install hook in the child process after
# the directory is created.
spack.hooks.pre_install(self.pkg.spec)
if self.fake:
_do_fake_install(self.pkg)
else:
if self.install_source:
self._install_source()
self._real_install()
# Run post install hooks before build stage is removed.
self.timer.start("post-install")
spack.hooks.post_install(self.pkg.spec, self.explicit)
self.timer.stop("post-install")
# Stop the timer and save results
self.timer.stop()
_write_timer_json(self.pkg, self.timer, False)
print_install_test_log(self.pkg)
_print_timer(pre=self.pre, pkg_id=self.pkg_id, timer=self.timer)
_print_installed_pkg(self.pkg.prefix)
# preserve verbosity across runs
return self.echo
def _install_source(self) -> None:
"""Install source code from stage into share/pkg/src if necessary."""
pkg = self.pkg
if not os.path.isdir(pkg.stage.source_path):
return
src_target = os.path.join(pkg.spec.prefix, "share", pkg.name, "src")
tty.debug(f"{self.pre} Copying source to {src_target}")
fs.install_tree(pkg.stage.source_path, src_target)
def _real_install(self) -> None:
pkg = self.pkg
# Do the real install in the source directory.
with fs.working_dir(pkg.stage.source_path):
# Save the build environment in a file before building.
dump_environment(pkg.env_path)
# Save just the changes to the environment. This file can be
# safely installed, since it does not contain secret variables.
with open(pkg.env_mods_path, "w", encoding="utf-8") as env_mods_file:
mods = self.env_mods.shell_modifications(explicit=True, env=self.unmodified_env)
env_mods_file.write(mods)
for attr in ("configure_args", "cmake_args"):
try:
configure_args = getattr(pkg, attr)()
configure_args = " ".join(configure_args)
with open(pkg.configure_args_path, "w", encoding="utf-8") as args_file:
args_file.write(configure_args)
break
except Exception:
pass
# cache debug settings
debug_level = tty.debug_level()
# Spawn a daemon that reads from a pipe and redirects
# everything to log_path, and provide the phase for logging
builder = spack.builder.create(pkg)
for i, phase_fn in enumerate(builder):
# Keep a log file for each phase
log_dir = os.path.dirname(pkg.log_path)
log_file = "spack-build-%02d-%s-out.txt" % (i + 1, phase_fn.name.lower())
log_file = os.path.join(log_dir, log_file)
try:
# DEBUGGING TIP - to debug this section, insert an IPython
# embed here, and run the sections below without log capture
log_contextmanager = log_output(
log_file, self.echo, True, filter_fn=self.filter_fn
)
with log_contextmanager as logger:
# Redirect stdout and stderr to daemon pipe
with logger.force_echo():
inner_debug_level = tty.debug_level()
tty.set_debug(debug_level)
tty.msg(f"{self.pre} Executing phase: '{phase_fn.name}'")
tty.set_debug(inner_debug_level)
# Catch any errors to report to logging
self.timer.start(phase_fn.name)
phase_fn.execute()
self.timer.stop(phase_fn.name)
except BaseException:
combine_phase_logs(pkg.phase_log_files, pkg.log_path)
raise
# We assume loggers share echo True/False
self.echo = logger.echo
# After log, we can get all output/error files from the package stage
combine_phase_logs(pkg.phase_log_files, pkg.log_path)
log(pkg)
def build_process(pkg: "spack.package_base.PackageBase", install_args: dict) -> bool:
"""Perform the installation/build of the package.
This runs in a separate child process, and has its own process and
python module space set up by build_environment.start_build_process().
This essentially wraps an instance of ``BuildProcessInstaller`` so that we can
more easily create one in a subprocess.
This function's return value is returned to the parent process.
Arguments:
pkg: the package being installed.
install_args: arguments to installer from parent process.
"""
installer = BuildProcessInstaller(pkg, install_args)
# don't print long padded paths in executable debug output.
with spack.util.path.filter_padding():
return installer.run()
def deprecate(spec: "spack.spec.Spec", deprecator: "spack.spec.Spec", link_fn) -> None:
"""Deprecate this package in favor of deprecator spec"""
# Here we assume we don't deprecate across different stores, and that same hash
# means same binary artifacts
if spec.dag_hash() == deprecator.dag_hash():
return
# We can't really have control over external specs, and cannot link anything in their place
if spec.external:
return
# Install deprecator if it isn't installed already
if not spack.store.STORE.db.query(deprecator):
PackageInstaller([deprecator.package], explicit=True).install()
old_deprecator = spack.store.STORE.db.deprecator(spec)
if old_deprecator:
# Find this spec file from its old deprecation
specfile = spack.store.STORE.layout.deprecated_file_path(spec, old_deprecator)
else:
specfile = spack.store.STORE.layout.spec_file_path(spec)
# copy spec metadata to "deprecated" dir of deprecator
depr_specfile = spack.store.STORE.layout.deprecated_file_path(spec, deprecator)
fs.mkdirp(os.path.dirname(depr_specfile))
shutil.copy2(specfile, depr_specfile)
# Any specs deprecated in favor of this spec are re-deprecated in favor of its new deprecator
for deprecated in spack.store.STORE.db.specs_deprecated_by(spec):
deprecate(deprecated, deprecator, link_fn)
# Now that we've handled metadata, uninstall and replace with link
spack.package_base.PackageBase.uninstall_by_spec(spec, force=True, deprecator=deprecator)
link_fn(deprecator.prefix, spec.prefix)
| BuildProcessInstaller |
python | Pylons__pyramid | src/pyramid/authentication.py | {
"start": 14572,
"end": 22464
} | class ____(CallbackAuthenticationPolicy):
"""A :app:`Pyramid` :term:`authentication policy` which
obtains data from a Pyramid "auth ticket" cookie.
Constructor Arguments
``secret``
The secret (a string) used for auth_tkt cookie signing. This value
should be unique across all values provided to Pyramid for various
subsystem secrets (see :ref:`admonishment_against_secret_sharing`).
Required.
``callback``
Default: ``None``. A callback passed the userid and the
request, expected to return ``None`` if the userid doesn't
exist or a sequence of principal identifiers (possibly empty) if
the user does exist. If ``callback`` is ``None``, the userid
will be assumed to exist with no principals. Optional.
``cookie_name``
Default: ``auth_tkt``. The cookie name used
(string). Optional.
``secure``
Default: ``False``. Only send the cookie back over a secure
conn. Optional.
``include_ip``
Default: ``False``. Make the requesting IP address part of
the authentication data in the cookie. Optional.
For IPv6 this option is not recommended. The ``mod_auth_tkt``
specification does not specify how to handle IPv6 addresses, so using
this option in combination with IPv6 addresses may cause an
incompatible cookie. It ties the authentication ticket to that
individual's IPv6 address.
``timeout``
Default: ``None``. Maximum number of seconds which a newly
issued ticket will be considered valid. After this amount of
time, the ticket will expire (effectively logging the user
out). If this value is ``None``, the ticket never expires.
Optional.
``reissue_time``
Default: ``None``. If this parameter is set, it represents the number
of seconds that must pass before an authentication token cookie is
automatically reissued as the result of a request which requires
authentication. The duration is measured as the number of seconds
since the last auth_tkt cookie was issued and 'now'. If this value is
``0``, a new ticket cookie will be reissued on every request which
requires authentication.
A good rule of thumb: if you want auto-expired cookies based on
inactivity: set the ``timeout`` value to 1200 (20 mins) and set the
``reissue_time`` value to perhaps a tenth of the ``timeout`` value
(120 or 2 mins). It's nonsensical to set the ``timeout`` value lower
than the ``reissue_time`` value, as the ticket will never be reissued
if so. However, such a configuration is not explicitly prevented.
Optional.
``max_age``
Default: ``None``. The max age of the auth_tkt cookie, in
seconds. This differs from ``timeout`` inasmuch as ``timeout``
represents the lifetime of the ticket contained in the cookie,
while this value represents the lifetime of the cookie itself.
When this value is set, the cookie's ``Max-Age`` and
``Expires`` settings will be set, allowing the auth_tkt cookie
to last between browser sessions. It is typically nonsensical
to set this to a value that is lower than ``timeout`` or
``reissue_time``, although it is not explicitly prevented.
Optional.
``path``
Default: ``/``. The path for which the auth_tkt cookie is valid.
May be desirable if the application only serves part of a domain.
Optional.
``http_only``
Default: ``False``. Hide cookie from JavaScript by setting the
HttpOnly flag. Not honored by all browsers.
Optional.
``wild_domain``
Default: ``True``. An auth_tkt cookie will be generated for the
wildcard domain. If your site is hosted as ``example.com`` this
will make the cookie available for sites underneath ``example.com``
such as ``www.example.com``.
Optional.
``parent_domain``
Default: ``False``. An auth_tkt cookie will be generated for the
parent domain of the current site. For example if your site is
hosted under ``www.example.com`` a cookie will be generated for
``.example.com``. This can be useful if you have multiple sites
sharing the same domain. This option supercedes the ``wild_domain``
option.
Optional.
``domain``
Default: ``None``. If provided the auth_tkt cookie will only be
set for this domain. This option is not compatible with ``wild_domain``
and ``parent_domain``.
Optional.
``hashalg``
Default: ``sha512`` (the literal string).
Any hash algorithm supported by Python's ``hashlib.new()`` function
can be used as the ``hashalg``.
Cookies generated by different instances of AuthTktAuthenticationPolicy
using different ``hashalg`` options are not compatible. Switching the
``hashalg`` will imply that all existing users with a valid cookie will
be required to re-login.
Optional.
``debug``
Default: ``False``. If ``debug`` is ``True``, log messages to the
Pyramid debug logger about the results of various authentication
steps. The output from debugging is useful for reporting to maillist
or IRC channels when asking for support.
``samesite``
Default: ``'Lax'``. The 'samesite' option of the session cookie. Set
the value to the string ``'None'`` to turn off the samesite option.
.. versionchanged:: 1.4
Added the ``hashalg`` option, defaulting to ``sha512``.
.. versionchanged:: 1.5
Added the ``domain`` option.
Added the ``parent_domain`` option.
.. versionchanged:: 1.10
Added the ``samesite`` option and made the default ``'Lax'``.
Objects of this class implement the interface described by
:class:`pyramid.interfaces.IAuthenticationPolicy`.
"""
def __init__(
self,
secret,
callback=None,
cookie_name='auth_tkt',
secure=False,
include_ip=False,
timeout=None,
reissue_time=None,
max_age=None,
path="/",
http_only=False,
wild_domain=True,
debug=False,
hashalg='sha512',
parent_domain=False,
domain=None,
samesite='Lax',
):
self.cookie = AuthTktCookieHelper(
secret,
cookie_name=cookie_name,
secure=secure,
include_ip=include_ip,
timeout=timeout,
reissue_time=reissue_time,
max_age=max_age,
http_only=http_only,
path=path,
wild_domain=wild_domain,
hashalg=hashalg,
parent_domain=parent_domain,
domain=domain,
samesite=samesite,
)
self.callback = callback
self.debug = debug
def unauthenticated_userid(self, request):
"""The userid key within the auth_tkt cookie."""
result = self.cookie.identify(request)
if result:
return result['userid']
def remember(self, request, userid, **kw):
"""Accepts the following kw args: ``max_age=<int-seconds>,
``tokens=<sequence-of-ascii-strings>``.
Return a list of headers which will set appropriate cookies on
the response.
"""
return self.cookie.remember(request, userid, **kw)
def forget(self, request):
"""A list of headers which will delete appropriate cookies."""
return self.cookie.forget(request)
def b64encode(v):
return base64.b64encode(bytes_(v)).strip().replace(b'\n', b'')
def b64decode(v):
return base64.b64decode(bytes_(v))
# this class licensed under the MIT license (stolen from Paste)
| AuthTktAuthenticationPolicy |
python | walkccc__LeetCode | solutions/629. K Inverse Pairs Array/629.py | {
"start": 0,
"end": 560
} | class ____:
def kInversePairs(self, n: int, k: int) -> int:
MOD = 1_000_000_007
# dp[i][j] := the number of permutations of numbers 1..i with j inverse pairs
dp = [[0] * (k + 1) for _ in range(n + 1)]
# If there's no inverse pair, the permutation is unique '123..i'
for i in range(n + 1):
dp[i][0] = 1
for i in range(1, n + 1):
for j in range(1, k + 1):
dp[i][j] = (dp[i][j - 1] + dp[i - 1][j]) % MOD
if j - i >= 0:
dp[i][j] = (dp[i][j] - dp[i - 1][j - i] + MOD) % MOD
return dp[n][k]
| Solution |
python | Netflix__metaflow | test/unit/configs/flows/config_naming_flow.py | {
"start": 240,
"end": 3007
} | class ____(FlowSpec):
"""Test flow for Config names with underscores and dashes."""
# Config with underscore in name
config_with_underscore = Config(
"config_with_underscore", default_value={"test": "underscore", "value": 42}
)
# Config with dash in name
config_with_dash = Config(
"config-with-dash", default_value={"test": "dash", "value": 99}
)
# Config with both underscore and dash in name
config_mixed = Config(
"config-with_both-mixed", default_value={"test": "mixed", "value": 123}
)
@step
def start(self):
"""Access configs with different naming patterns and validate values."""
# Access underscore config
self.underscore_test = self.config_with_underscore.test
self.underscore_value = self.config_with_underscore.value
self.underscore_dict = dict(self.config_with_underscore)
# Validate underscore config values
assert (
self.underscore_test == "underscore"
), f"Expected 'underscore', got {self.underscore_test}"
assert self.underscore_value == 42, f"Expected 42, got {self.underscore_value}"
assert self.underscore_dict == {
"test": "underscore",
"value": 42,
}, f"Unexpected dict: {self.underscore_dict}"
# Access dash config
self.dash_test = self.config_with_dash.test
self.dash_value = self.config_with_dash.value
self.dash_dict = dict(self.config_with_dash)
# Validate dash config values
assert self.dash_test == "dash", f"Expected 'dash', got {self.dash_test}"
assert self.dash_value == 99, f"Expected 99, got {self.dash_value}"
assert self.dash_dict == {
"test": "dash",
"value": 99,
}, f"Unexpected dict: {self.dash_dict}"
# Access mixed config
self.mixed_test = self.config_mixed.test
self.mixed_value = self.config_mixed.value
self.mixed_dict = dict(self.config_mixed)
# Validate mixed config values
assert self.mixed_test == "mixed", f"Expected 'mixed', got {self.mixed_test}"
assert self.mixed_value == 123, f"Expected 123, got {self.mixed_value}"
assert self.mixed_dict == {
"test": "mixed",
"value": 123,
}, f"Unexpected dict: {self.mixed_dict}"
print(f"✓ Underscore config validated: {self.underscore_dict}")
print(f"✓ Dash config validated: {self.dash_dict}")
print(f"✓ Mixed config validated: {self.mixed_dict}")
self.next(self.end)
@step
def end(self):
"""End step."""
print("ConfigNamingFlow completed successfully")
if __name__ == "__main__":
ConfigNamingFlow()
| ConfigNamingFlow |
python | PyCQA__pylint | tests/functional/r/recursion/recursion_error_2667.py | {
"start": 140,
"end": 288
} | class ____:
def __init__(self):
self._slice = slice(0, 10)
def incr(self):
self._slice = slice(0, self._slice.stop + 1)
| MyClass |
python | huggingface__transformers | src/transformers/models/qwen2_moe/modular_qwen2_moe.py | {
"start": 2735,
"end": 3475
} | class ____(LlamaAttention):
def __init__(self, config: Qwen2MoeConfig, layer_idx: int):
super().__init__(config, layer_idx)
if self.config.layer_types[layer_idx] == "sliding_attention":
self.sliding_window = config.sliding_window
self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.qkv_bias)
self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.qkv_bias)
self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.qkv_bias)
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
| Qwen2MoeAttention |
python | Lightning-AI__lightning | src/lightning/fabric/strategies/launchers/multiprocessing.py | {
"start": 1485,
"end": 6322
} | class ____(_Launcher):
r"""Launches processes that run a given function in parallel, and joins them all at the end.
The main process in which this launcher is invoked creates N so-called worker processes (using
:func:`torch.multiprocessing.start_processes`) that run the given function.
Worker processes have a rank that ranges from 0 to N - 1.
Note:
- This launcher requires all objects to be pickleable.
- It is important that the entry point to the program/script is guarded by ``if __name__ == "__main__"``.
- With start method 'fork' the user must ensure that no CUDA context gets created in the main process before
the launcher is invoked. E.g., one should avoid creating cuda tensors or calling ``torch.cuda.*`` functions
before calling ``Trainer.fit``.
Args:
strategy: A reference to the strategy that is used together with this launcher.
start_method: The method how to start the processes.
- 'spawn': The default start method. Requires all objects to be pickleable.
- 'fork': Preferable for IPython/Jupyter environments where 'spawn' is not available. Not available on
the Windows platform for example.
- 'forkserver': Alternative implementation to 'fork'.
"""
def __init__(
self,
strategy: "ParallelStrategy",
start_method: Literal["spawn", "fork", "forkserver"] = "spawn",
) -> None:
self._strategy = strategy
self._start_method = start_method
if start_method not in mp.get_all_start_methods():
raise ValueError(
f"The start method '{self._start_method}' is not available on this platform. Available methods are:"
f" {', '.join(mp.get_all_start_methods())}"
)
@property
@override
def is_interactive_compatible(self) -> bool:
# The start method 'spawn' is not supported in interactive environments
# The start method 'fork' is the only one supported in Jupyter environments, with constraints around CUDA
# initialization. For more context, see https://github.com/Lightning-AI/pytorch-lightning/issues/7550
return self._start_method == "fork"
@override
def launch(self, function: Callable, *args: Any, **kwargs: Any) -> Any:
"""Launches processes that run the given function in parallel.
The function is allowed to have a return value. However, when all processes join, only the return value
of worker process 0 gets returned from this `launch` method in the main process.
Arguments:
function: The entry point for all launched processes.
*args: Optional positional arguments to be passed to the given function.
**kwargs: Optional keyword arguments to be passed to the given function.
"""
if self._start_method in ("fork", "forkserver"):
_check_bad_cuda_fork()
if self._start_method == "spawn":
_check_missing_main_guard()
# The default cluster environment in Lightning chooses a random free port number
# This needs to be done in the main process here before starting processes to ensure each rank will connect
# through the same port
assert self._strategy.cluster_environment is not None
os.environ["MASTER_PORT"] = str(self._strategy.cluster_environment.main_port)
context = mp.get_context(self._start_method)
return_queue = context.SimpleQueue()
if self._start_method == "spawn":
global_states = _GlobalStateSnapshot.capture()
process_args = [function, args, kwargs, return_queue, global_states]
else:
process_args = [function, args, kwargs, return_queue]
mp.start_processes(
self._wrapping_function,
args=process_args,
nprocs=self._strategy.num_processes,
start_method=self._start_method,
)
return return_queue.get()
def _wrapping_function(
self,
process_idx: int,
function: Callable,
args: Any,
kwargs: Any,
return_queue: SimpleQueue,
global_states: Optional["_GlobalStateSnapshot"] = None,
) -> None:
if global_states:
global_states.restore()
if self._start_method == "spawn" and isinstance(self._strategy.accelerator, CPUAccelerator):
args, kwargs = _disable_module_memory_sharing((args, kwargs))
_set_num_threads_if_needed(num_processes=self._strategy.num_processes)
os.environ["LOCAL_RANK"] = str(process_idx)
results = function(*args, **kwargs)
if process_idx == 0:
return_queue.put(move_data_to_device(results, "cpu"))
@dataclass
| _MultiProcessingLauncher |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/sqltypes.py | {
"start": 20991,
"end": 25241
} | class ____(NumericCommon[_N], TypeEngine[_N]):
"""Type representing floating point types, such as ``FLOAT`` or ``REAL``.
This type returns Python ``float`` objects by default, unless the
:paramref:`.Float.asdecimal` flag is set to ``True``, in which case they
are coerced to ``decimal.Decimal`` objects.
When a :paramref:`.Float.precision` is not provided in a
:class:`_types.Float` type some backend may compile this type as
an 8 bytes / 64 bit float datatype. To use a 4 bytes / 32 bit float
datatype a precision <= 24 can usually be provided or the
:class:`_types.REAL` type can be used.
This is known to be the case in the PostgreSQL and MSSQL dialects
that render the type as ``FLOAT`` that's in both an alias of
``DOUBLE PRECISION``. Other third party dialects may have similar
behavior.
"""
__visit_name__ = "float"
@overload
def __init__(
self: Float[float],
precision: Optional[int] = ...,
asdecimal: Literal[False] = ...,
decimal_return_scale: Optional[int] = ...,
): ...
@overload
def __init__(
self: Float[decimal.Decimal],
precision: Optional[int] = ...,
asdecimal: Literal[True] = ...,
decimal_return_scale: Optional[int] = ...,
): ...
def __init__(
self: Float[_N],
precision: Optional[int] = None,
asdecimal: bool = False,
decimal_return_scale: Optional[int] = None,
):
r"""
Construct a Float.
:param precision: the numeric precision for use in DDL ``CREATE
TABLE``. Backends **should** attempt to ensure this precision
indicates a number of digits for the generic
:class:`_sqltypes.Float` datatype.
.. note:: For the Oracle Database backend, the
:paramref:`_sqltypes.Float.precision` parameter is not accepted
when rendering DDL, as Oracle Database does not support float precision
specified as a number of decimal places. Instead, use the
Oracle Database-specific :class:`_oracle.FLOAT` datatype and specify the
:paramref:`_oracle.FLOAT.binary_precision` parameter. This is new
in version 2.0 of SQLAlchemy.
To create a database agnostic :class:`_types.Float` that
separately specifies binary precision for Oracle Database, use
:meth:`_types.TypeEngine.with_variant` as follows::
from sqlalchemy import Column
from sqlalchemy import Float
from sqlalchemy.dialects import oracle
Column(
"float_data",
Float(5).with_variant(oracle.FLOAT(binary_precision=16), "oracle"),
)
:param asdecimal: the same flag as that of :class:`.Numeric`, but
defaults to ``False``. Note that setting this flag to ``True``
results in floating point conversion.
:param decimal_return_scale: Default scale to use when converting
from floats to Python decimals. Floating point values will typically
be much longer due to decimal inaccuracy, and most floating point
database types don't have a notion of "scale", so by default the
float type looks for the first ten decimal places when converting.
Specifying this value will override that length. Note that the
MySQL float types, which do include "scale", will use "scale"
as the default for decimal_return_scale, if not otherwise specified.
""" # noqa: E501
super().__init__(
precision=precision,
scale=None,
asdecimal=asdecimal,
decimal_return_scale=decimal_return_scale,
)
@property
def _type_affinity(self):
return Float
def result_processor(self, dialect, coltype):
if self.asdecimal:
return processors.to_decimal_processor_factory(
decimal.Decimal, self._effective_decimal_return_scale
)
elif dialect.supports_native_decimal:
return processors.to_float
else:
return None
| Float |
python | pytorch__pytorch | torch/backends/cudnn/__init__.py | {
"start": 6208,
"end": 7418
} | class ____(PropModule):
enabled = ContextProp(torch._C._get_cudnn_enabled, torch._C._set_cudnn_enabled)
deterministic = ContextProp(
torch._C._get_cudnn_deterministic, torch._C._set_cudnn_deterministic
)
benchmark = ContextProp(
torch._C._get_cudnn_benchmark, torch._C._set_cudnn_benchmark
)
benchmark_limit = None
if is_available():
benchmark_limit = ContextProp(
torch._C._cuda_get_cudnn_benchmark_limit,
torch._C._cuda_set_cudnn_benchmark_limit,
)
allow_tf32 = ContextProp(
torch._C._get_cudnn_allow_tf32, torch._C._set_cudnn_allow_tf32
)
conv = _FP32Precision("cuda", "conv")
rnn = _FP32Precision("cuda", "rnn")
fp32_precision = ContextProp(
_get_fp32_precision_getter("cuda", "all"),
_set_fp32_precision_setter("cuda", "all"),
)
# This is the sys.modules replacement trick, see
# https://stackoverflow.com/questions/2447353/getattr-on-a-module/7668273#7668273
sys.modules[__name__] = CudnnModule(sys.modules[__name__], __name__)
# Add type annotation for the replaced module
enabled: bool
deterministic: bool
benchmark: bool
allow_tf32: bool
benchmark_limit: int
| CudnnModule |
python | kamyu104__LeetCode-Solutions | Python/optimize-water-distribution-in-a-village.py | {
"start": 538,
"end": 1118
} | class ____(object):
def minCostToSupplyWater(self, n, wells, pipes):
"""
:type n: int
:type wells: List[int]
:type pipes: List[List[int]]
:rtype: int
"""
w = [[c, 0, i] for i, c in enumerate(wells, 1)]
p = [[c, i, j] for i, j, c in pipes]
result = 0
union_find = UnionFind(n+1)
for c, x, y in sorted(w+p):
if not union_find.union_set(x, y):
continue
result += c
if union_find.count == 1:
break
return result
| Solution |
python | sphinx-doc__sphinx | utils/bump_version.py | {
"start": 454,
"end": 3291
} | class ____:
major: int
minor: int
micro: int
level: Literal['a', 'b', 'rc', 'final']
serial: int
@property
def releaselevel(self) -> Literal['alpha', 'beta', 'candidate', 'final']:
if self.level == 'final':
return 'final'
if self.level == 'a':
return 'alpha'
if self.level == 'b':
return 'beta'
if self.level == 'rc':
return 'candidate'
msg = f'Unknown release level: {self.level}'
raise RuntimeError(msg)
@property
def is_final(self) -> bool:
return self.level == 'final'
@property
def version(self) -> str:
return f'{self.major}.{self.minor}.{self.micro}'
@property
def release(self) -> str:
return f'{self.major}.{self.minor}.{self.micro}{self.level}{self.serial}'
@property
def version_tuple(self) -> tuple[int, int, int]:
return self.major, self.minor, self.micro
@property
def release_tuple(self) -> tuple[int, int, int, str, int]:
return self.major, self.minor, self.micro, self.releaselevel, self.serial
def parse_version(version: str) -> VersionInfo:
# Final version:
# - "X.Y.Z" -> (X, Y, Z, 'final', 0)
# - "X.Y" -> (X, Y, 0, 'final', 0) [shortcut]
if matched := re.fullmatch(r'(\d+)\.(\d+)(?:\.(\d+))?', version):
major, minor, micro = matched.groups(default='0')
return VersionInfo(int(major), int(minor), int(micro), 'final', 0)
# Pre-release versions:
# - "X.Y.ZaN" -> (X, Y, Z, 'alpha', N)
# - "X.Y.ZbN" -> (X, Y, Z, 'beta', N)
# - "X.Y.ZrcN" -> (X, Y, Z, 'candidate', N)
if matched := re.fullmatch(r'(\d+)\.(\d+)\.(\d+)(a|b|rc)(\d+)', version):
major, minor, micro, level, serial = matched.groups()
return VersionInfo(int(major), int(minor), int(micro), level, int(serial)) # type: ignore[arg-type]
msg = f'Unknown version: {version}'
raise RuntimeError(msg)
def bump_version(
path: Path, version_info: VersionInfo, in_develop: bool = True
) -> None:
if in_develop or version_info.is_final:
version = version_info.version
else:
version = version_info.release
with open(path, encoding='utf-8') as f:
lines = f.read().splitlines(keepends=True)
for i, line in enumerate(lines):
if line.startswith('__version__: Final = '):
lines[i] = f"__version__: Final = '{version}'\n"
continue
if line.startswith('version_info: Final = '):
lines[i] = f'version_info: Final = {version_info.release_tuple}\n'
continue
if line.startswith('_in_development = '):
lines[i] = f'_in_development = {in_develop}\n'
continue
with open(path, 'w', encoding='utf-8') as f:
f.writelines(lines)
| VersionInfo |
python | huggingface__transformers | src/transformers/models/xlnet/modeling_xlnet.py | {
"start": 74732,
"end": 81985
} | class ____(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = XLNetModel(config)
self.sequence_summary = XLNetSequenceSummary(config)
self.logits_proj = nn.Linear(config.d_model, 1)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
input_mask: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
mems: Optional[torch.Tensor] = None,
perm_mask: Optional[torch.Tensor] = None,
target_mapping: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
use_mems: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**kwargs, # delete when `use_cache` is removed in XLNetModel
) -> Union[tuple, XLNetForMultipleChoiceOutput]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
input_mask (`torch.FloatTensor` of shape `batch_size, num_choices, sequence_length`, *optional*):
Mask to avoid performing attention on padding token indices. Negative of `attention_mask`, i.e. with 0 for
real tokens and 1 for padding which is kept for compatibility with the original code base.
Mask values selected in `[0, 1]`:
- 1 for tokens that are **masked**,
- 0 for tokens that are **not masked**.
You can only uses one of `input_mask` and `attention_mask`.
mems (`list[torch.FloatTensor]` of length `config.n_layers`):
Contains pre-computed hidden-states (see `mems` output below) . Can be used to speed up sequential
decoding. The token ids which have their past given to this model should not be passed as `input_ids` as
they have already been computed.
`use_mems` has to be set to `True` to make use of `mems`.
perm_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length, sequence_length)`, *optional*):
Mask to indicate the attention pattern for each input token with values selected in `[0, 1]`:
- if `perm_mask[k, i, j] = 0`, i attend to j in batch k;
- if `perm_mask[k, i, j] = 1`, i does not attend to j in batch k.
If not set, each token attends to all the others (full bidirectional attention). Only used during
pretraining (to define factorization order) or for sequential decoding (generation).
target_mapping (`torch.FloatTensor` of shape `(batch_size, num_predict, sequence_length)`, *optional*):
Mask to indicate the output tokens to use. If `target_mapping[k, i, j] = 1`, the i-th predict in batch k is
on the j-th token. Only used during pretraining for partial prediction or for sequential decoding
(generation).
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
use_mems (`bool`, *optional*):
Whether to use memory states to speed up sequential decoding. If set to `True`, the model will use the hidden
states from previous forward passes to compute attention, which can significantly improve performance for
sequential decoding tasks.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
flat_input_mask = input_mask.view(-1, input_mask.size(-1)) if input_mask is not None else None
flat_inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
transformer_outputs = self.transformer(
flat_input_ids,
token_type_ids=flat_token_type_ids,
input_mask=flat_input_mask,
attention_mask=flat_attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
inputs_embeds=flat_inputs_embeds,
use_mems=use_mems,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
**kwargs,
)
output = transformer_outputs[0]
output = self.sequence_summary(output)
logits = self.logits_proj(output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels.view(-1))
if not return_dict:
output = (reshaped_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return XLNetForMultipleChoiceOutput(
loss=loss,
logits=reshaped_logits,
mems=transformer_outputs.mems,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@auto_docstring(
custom_intro="""
XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
"""
)
| XLNetForMultipleChoice |
python | getsentry__sentry | src/sentry/analytics/events/second_platform_added.py | {
"start": 78,
"end": 268
} | class ____(analytics.Event):
user_id: int
organization_id: int
project_id: int
platform: str | None = None
analytics.register(SecondPlatformAddedEvent)
| SecondPlatformAddedEvent |
python | pytoolz__toolz | toolz/functoolz.py | {
"start": 4561,
"end": 14156
} | class ____:
""" Curry a callable function
Enables partial application of arguments through calling a function with an
incomplete set of arguments.
>>> def mul(x, y):
... return x * y
>>> mul = curry(mul)
>>> double = mul(2)
>>> double(10)
20
Also supports keyword arguments
>>> @curry # Can use curry as a decorator
... def f(x, y, a=10):
... return a * (x + y)
>>> add = f(a=1)
>>> add(2, 3)
5
See Also:
toolz.curried - namespace of curried functions
https://toolz.readthedocs.io/en/latest/curry.html
"""
def __init__(self, *args, **kwargs):
if not args:
raise TypeError('__init__() takes at least 2 arguments (1 given)')
func, args = args[0], args[1:]
if not callable(func):
raise TypeError("Input must be callable")
# curry- or functools.partial-like object? Unpack and merge arguments
if (
hasattr(func, 'func')
and hasattr(func, 'args')
and hasattr(func, 'keywords')
and isinstance(func.args, tuple)
):
_kwargs = {}
if func.keywords:
_kwargs.update(func.keywords)
_kwargs.update(kwargs)
kwargs = _kwargs
args = func.args + args
func = func.func
if kwargs:
self._partial = partial(func, *args, **kwargs)
else:
self._partial = partial(func, *args)
self.__doc__ = getattr(func, '__doc__', None)
self.__name__ = getattr(func, '__name__', '<curry>')
self.__module__ = getattr(func, '__module__', None)
self.__qualname__ = getattr(func, '__qualname__', None)
self._sigspec = None
self._has_unknown_args = None
@instanceproperty
def func(self):
return self._partial.func
@instanceproperty
def __signature__(self):
sig = inspect.signature(self.func)
args = self.args or ()
keywords = self.keywords or {}
if is_partial_args(self.func, args, keywords, sig) is False:
raise TypeError('curry object has incorrect arguments')
params = list(sig.parameters.values())
skip = 0
for param in params[:len(args)]:
if param.kind == param.VAR_POSITIONAL:
break
skip += 1
kwonly = False
newparams = []
for param in params[skip:]:
kind = param.kind
default = param.default
if kind == param.VAR_KEYWORD:
pass
elif kind == param.VAR_POSITIONAL:
if kwonly:
continue
elif param.name in keywords:
default = keywords[param.name]
kind = param.KEYWORD_ONLY
kwonly = True
else:
if kwonly:
kind = param.KEYWORD_ONLY
if default is param.empty:
default = no_default
newparams.append(param.replace(default=default, kind=kind))
return sig.replace(parameters=newparams)
@instanceproperty
def args(self):
return self._partial.args
@instanceproperty
def keywords(self):
return self._partial.keywords
@instanceproperty
def func_name(self):
return self.__name__
def __str__(self):
return str(self.func)
def __repr__(self):
return repr(self.func)
def __hash__(self):
return hash((self.func, self.args,
frozenset(self.keywords.items()) if self.keywords
else None))
def __eq__(self, other):
return (isinstance(other, curry) and self.func == other.func and
self.args == other.args and self.keywords == other.keywords)
def __ne__(self, other):
return not self.__eq__(other)
def __call__(self, *args, **kwargs):
try:
return self._partial(*args, **kwargs)
except TypeError as exc:
if self._should_curry(args, kwargs, exc):
return self.bind(*args, **kwargs)
raise
def _should_curry(self, args, kwargs, exc=None):
func = self.func
args = self.args + args
if self.keywords:
kwargs = dict(self.keywords, **kwargs)
if self._sigspec is None:
sigspec = self._sigspec = _sigs.signature_or_spec(func)
self._has_unknown_args = has_varargs(func, sigspec) is not False
else:
sigspec = self._sigspec
if is_partial_args(func, args, kwargs, sigspec) is False:
# Nothing can make the call valid
return False
elif self._has_unknown_args:
# The call may be valid and raised a TypeError, but we curry
# anyway because the function may have `*args`. This is useful
# for decorators with signature `func(*args, **kwargs)`.
return True
elif not is_valid_args(func, args, kwargs, sigspec):
# Adding more arguments may make the call valid
return True
else:
# There was a genuine TypeError
return False
def bind(self, *args, **kwargs):
return type(self)(self, *args, **kwargs)
def call(self, *args, **kwargs):
return self._partial(*args, **kwargs)
def __get__(self, instance, owner):
if instance is None:
return self
return curry(self, instance)
def __reduce__(self):
func = self.func
modname = getattr(func, '__module__', None)
qualname = getattr(func, '__qualname__', None)
if qualname is None: # pragma: no cover
qualname = getattr(func, '__name__', None)
is_decorated = None
if modname and qualname:
attrs = []
obj = import_module(modname)
for attr in qualname.split('.'):
if isinstance(obj, curry):
attrs.append('func')
obj = obj.func
obj = getattr(obj, attr, None)
if obj is None:
break
attrs.append(attr)
if isinstance(obj, curry) and obj.func is func:
is_decorated = obj is self
qualname = '.'.join(attrs)
func = f'{modname}:{qualname}'
# functools.partial objects can't be pickled
userdict = tuple((k, v) for k, v in self.__dict__.items()
if k not in ('_partial', '_sigspec'))
state = (type(self), func, self.args, self.keywords, userdict,
is_decorated)
return _restore_curry, state
def _restore_curry(cls, func, args, kwargs, userdict, is_decorated):
if isinstance(func, str):
modname, qualname = func.rsplit(':', 1)
obj = import_module(modname)
for attr in qualname.split('.'):
obj = getattr(obj, attr)
if is_decorated:
return obj
func = obj.func
obj = cls(func, *args, **(kwargs or {}))
obj.__dict__.update(userdict)
return obj
@curry
def memoize(func, cache=None, key=None):
""" Cache a function's result for speedy future evaluation
Considerations:
Trades memory for speed.
Only use on pure functions.
>>> def add(x, y): return x + y
>>> add = memoize(add)
Or use as a decorator
>>> @memoize
... def add(x, y):
... return x + y
Use the ``cache`` keyword to provide a dict-like object as an initial cache
>>> @memoize(cache={(1, 2): 3})
... def add(x, y):
... return x + y
Note that the above works as a decorator because ``memoize`` is curried.
It is also possible to provide a ``key(args, kwargs)`` function that
calculates keys used for the cache, which receives an ``args`` tuple and
``kwargs`` dict as input, and must return a hashable value. However,
the default key function should be sufficient most of the time.
>>> # Use key function that ignores extraneous keyword arguments
>>> @memoize(key=lambda args, kwargs: args)
... def add(x, y, verbose=False):
... if verbose:
... print('Calculating %s + %s' % (x, y))
... return x + y
"""
if cache is None:
cache = {}
try:
may_have_kwargs = has_keywords(func) is not False
# Is unary function (single arg, no variadic argument or keywords)?
is_unary = is_arity(1, func)
except TypeError: # pragma: no cover
may_have_kwargs = True
is_unary = False
if key is None:
if is_unary:
def key(args, kwargs):
return args[0]
elif may_have_kwargs:
def key(args, kwargs):
return (
args or None,
frozenset(kwargs.items()) if kwargs else None,
)
else:
def key(args, kwargs):
return args
def memof(*args, **kwargs):
k = key(args, kwargs)
try:
return cache[k]
except TypeError:
raise TypeError("Arguments to memoized function must be hashable")
except KeyError:
cache[k] = result = func(*args, **kwargs)
return result
try:
memof.__name__ = func.__name__
except AttributeError:
pass
memof.__doc__ = func.__doc__
memof.__wrapped__ = func
return memof
| curry |
python | getsentry__sentry | src/sentry/organizations/services/organization/model.py | {
"start": 6493,
"end": 6562
} | class ____(TypedDict):
require_2fa: bool
| RpcOrganizationFlagsUpdate |
python | scrapy__scrapy | scrapy/core/downloader/middleware.py | {
"start": 739,
"end": 6309
} | class ____(MiddlewareManager):
component_name = "downloader middleware"
@classmethod
def _get_mwlist_from_settings(cls, settings: BaseSettings) -> list[Any]:
return build_component_list(settings.getwithbase("DOWNLOADER_MIDDLEWARES"))
def _add_middleware(self, mw: Any) -> None:
if hasattr(mw, "process_request"):
self.methods["process_request"].append(mw.process_request)
self._check_mw_method_spider_arg(mw.process_request)
if hasattr(mw, "process_response"):
self.methods["process_response"].appendleft(mw.process_response)
self._check_mw_method_spider_arg(mw.process_response)
if hasattr(mw, "process_exception"):
self.methods["process_exception"].appendleft(mw.process_exception)
self._check_mw_method_spider_arg(mw.process_exception)
@inlineCallbacks
def download(
self,
download_func: Callable[[Request], Deferred[Response]],
request: Request,
spider: Spider | None = None,
) -> Generator[Deferred[Any], Any, Response | Request]:
if argument_is_required(download_func, "spider"):
warnings.warn(
"The spider argument of download_func is deprecated"
" and will not be passed in future Scrapy versions.",
ScrapyDeprecationWarning,
stacklevel=2,
)
need_spider_arg = True
else:
need_spider_arg = False
@inlineCallbacks
def process_request(
request: Request,
) -> Generator[Deferred[Any], Any, Response | Request]:
for method in self.methods["process_request"]:
method = cast("Callable", method)
if method in self._mw_methods_requiring_spider:
response = yield deferred_from_coro(
method(request=request, spider=self._spider)
)
else:
response = yield deferred_from_coro(method(request=request))
if response is not None and not isinstance(
response, (Response, Request)
):
raise _InvalidOutput(
f"Middleware {method.__qualname__} must return None, Response or "
f"Request, got {response.__class__.__name__}"
)
if response:
return response
if need_spider_arg:
return (yield download_func(request, self._spider)) # type: ignore[call-arg]
return (yield download_func(request))
@inlineCallbacks
def process_response(
response: Response | Request,
) -> Generator[Deferred[Any], Any, Response | Request]:
if response is None:
raise TypeError("Received None in process_response")
if isinstance(response, Request):
return response
for method in self.methods["process_response"]:
method = cast("Callable", method)
if method in self._mw_methods_requiring_spider:
response = yield deferred_from_coro(
method(request=request, response=response, spider=self._spider)
)
else:
response = yield deferred_from_coro(
method(request=request, response=response)
)
if not isinstance(response, (Response, Request)):
raise _InvalidOutput(
f"Middleware {method.__qualname__} must return Response or Request, "
f"got {type(response)}"
)
if isinstance(response, Request):
return response
return response
@inlineCallbacks
def process_exception(
exception: Exception,
) -> Generator[Deferred[Any], Any, Response | Request]:
for method in self.methods["process_exception"]:
method = cast("Callable", method)
if method in self._mw_methods_requiring_spider:
response = yield deferred_from_coro(
method(
request=request, exception=exception, spider=self._spider
)
)
else:
response = yield deferred_from_coro(
method(request=request, exception=exception)
)
if response is not None and not isinstance(
response, (Response, Request)
):
raise _InvalidOutput(
f"Middleware {method.__qualname__} must return None, Response or "
f"Request, got {type(response)}"
)
if response:
return response
raise exception
if spider:
self._warn_spider_arg("download")
self._set_compat_spider(spider)
try:
result: Response | Request = yield process_request(request)
except Exception as ex:
yield _defer_sleep()
# either returns a request or response (which we pass to process_response())
# or reraises the exception
result = yield process_exception(ex)
return (yield process_response(result))
| DownloaderMiddlewareManager |
python | optuna__optuna | tests/test_experimental.py | {
"start": 167,
"end": 3857
} | class ____:
def __init__(self, a: Any, b: Any, c: Any) -> None:
pass
def _method(self) -> None:
"""summary
detail
"""
pass
def _method_expected(self) -> None:
"""summary
detail
.. note::
Added in v1.1.0 as an experimental feature. The interface may change in newer versions
without prior notice. See https://github.com/optuna/optuna/releases/tag/v1.1.0.
"""
pass
@staticmethod
def _static_method() -> None:
pass
@pytest.mark.parametrize("version", ["1.1", 100, None])
def test_experimental_raises_error_for_invalid_version(version: Any) -> None:
with pytest.raises(ValueError):
_experimental.experimental_func(version)
with pytest.raises(ValueError):
_experimental.experimental_class(version)
def test_experimental_func_decorator() -> None:
version = "1.1.0"
decorator_experimental = _experimental.experimental_func(version)
assert callable(decorator_experimental)
decorated_func = decorator_experimental(_sample_func)
assert decorated_func.__name__ == _sample_func.__name__
assert decorated_func.__doc__ == _experimental._EXPERIMENTAL_NOTE_TEMPLATE.format(ver=version)
with pytest.warns(ExperimentalWarning):
decorated_func()
def test_experimental_func_decorator_with_static_method() -> None:
version = "1.1.0"
decorator_experimental = _experimental.experimental_func(version)
assert callable(decorator_experimental)
decorated_func = decorator_experimental(_Sample._static_method)
assert decorated_func.__name__ == _Sample._static_method.__name__
assert decorated_func.__doc__ == _experimental._EXPERIMENTAL_NOTE_TEMPLATE.format(ver=version)
with pytest.warns(ExperimentalWarning):
decorated_func()
def test_experimental_instance_method_decorator() -> None:
version = "1.1.0"
decorator_experimental = _experimental.experimental_func(version)
assert callable(decorator_experimental)
decorated_method = decorator_experimental(_Sample._method)
assert decorated_method.__name__ == _Sample._method.__name__
assert decorated_method.__doc__ == _Sample._method_expected.__doc__
with pytest.warns(ExperimentalWarning):
decorated_method(None) # type: ignore
def test_experimental_class_decorator() -> None:
version = "1.1.0"
decorator_experimental = _experimental.experimental_class(version)
assert callable(decorator_experimental)
decorated_class = decorator_experimental(_Sample)
assert decorated_class.__name__ == "_Sample"
assert decorated_class.__init__.__name__ == "__init__"
assert decorated_class.__doc__ == _experimental._EXPERIMENTAL_NOTE_TEMPLATE.format(ver=version)
with pytest.warns(ExperimentalWarning):
decorated_class("a", "b", "c")
def test_experimental_class_decorator_name() -> None:
name = "foo"
decorator_experimental = _experimental.experimental_class("1.1.0", name=name)
decorated_sample = decorator_experimental(_Sample)
with pytest.warns(ExperimentalWarning) as record:
decorated_sample("a", "b", "c")
assert isinstance(record.list[0].message, Warning)
assert name in record.list[0].message.args[0]
def test_experimental_decorator_name() -> None:
name = "bar"
decorator_experimental = _experimental.experimental_func("1.1.0", name=name)
decorated_sample_func = decorator_experimental(_sample_func)
with pytest.warns(ExperimentalWarning) as record:
decorated_sample_func()
assert isinstance(record.list[0].message, Warning)
assert name in record.list[0].message.args[0]
| _Sample |
python | astropy__astropy | astropy/utils/misc.py | {
"start": 3608,
"end": 11798
} | class ____:
"""
A context manager (for use with the ``with`` statement) that will seed the
numpy random number generator (RNG) to a specific value, and then restore
the RNG state back to whatever it was before.
This is primarily intended for use in the astropy testing suit, but it
may be useful in ensuring reproducibility of Monte Carlo simulations in a
science context.
Parameters
----------
seed : int
The value to use to seed the numpy RNG
Examples
--------
A typical use case might be::
with NumpyRNGContext(<some seed value you pick>):
from numpy import random
randarr = random.randn(100)
... run your test using `randarr` ...
#Any code using numpy.random at this indent level will act just as it
#would have if it had been before the with statement - e.g. whatever
#the default seed is.
"""
def __init__(self, seed: int) -> None:
self.seed = seed
def __enter__(self) -> None:
self.startstate = np.random.get_state()
np.random.seed(self.seed)
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: TracebackType | None,
) -> None:
np.random.set_state(self.startstate)
def find_api_page(
obj: object,
version: str | None = None,
openinbrowser: bool = True,
timeout: float | None = None,
) -> str:
"""
Determines the URL of the API page for the specified object, and
optionally open that page in a web browser.
.. note::
You must be connected to the internet for this to function even if
``openinbrowser`` is `False`, unless you provide a local version of
the documentation to ``version`` (e.g., ``file:///path/to/docs``).
Parameters
----------
obj
The object to open the docs for or its fully-qualified name
(as a str).
version : str
The doc version - either a version number like '0.1', 'dev' for
the development/latest docs, or a URL to point to a specific
location that should be the *base* of the documentation. Defaults to
latest if you are on aren't on a release, otherwise, the version you
are on.
openinbrowser : bool
If `True`, the `webbrowser` package will be used to open the doc
page in a new web browser window.
timeout : number, optional
The number of seconds to wait before timing-out the query to
the astropy documentation. If not given, the default python
stdlib timeout will be used.
Returns
-------
url : str
The loaded URL
Raises
------
ValueError
If the documentation can't be found
"""
import webbrowser
from zlib import decompress
from astropy.utils.data import get_readable_fileobj
if (
not isinstance(obj, str)
and hasattr(obj, "__module__")
and hasattr(obj, "__name__")
):
obj = obj.__module__ + "." + obj.__name__
elif inspect.ismodule(obj):
obj = obj.__name__
if version is None:
from astropy import version
if version.release:
version = "v" + version.version
else:
version = "dev"
if "://" in version:
if version.endswith("index.html"):
baseurl = version[:-10]
elif version.endswith("/"):
baseurl = version
else:
baseurl = version + "/"
elif version == "dev" or version == "latest":
baseurl = "http://devdocs.astropy.org/"
else:
baseurl = f"https://docs.astropy.org/en/{version}/"
# Custom request headers; see
# https://github.com/astropy/astropy/issues/8990
url = baseurl + "objects.inv"
headers = {"User-Agent": f"Astropy/{version}"}
with get_readable_fileobj(
url, encoding="binary", remote_timeout=timeout, http_headers=headers
) as uf:
oiread = uf.read()
# need to first read/remove the first four lines, which have info before
# the compressed section with the actual object inventory
idx = -1
headerlines = []
for _ in range(4):
oldidx = idx
idx = oiread.index(b"\n", oldidx + 1)
headerlines.append(oiread[(oldidx + 1) : idx].decode("utf-8"))
# intersphinx version line, project name, and project version
ivers, proj, vers, compr = headerlines
if "The remainder of this file is compressed using zlib" not in compr:
raise ValueError(
f"The file downloaded from {baseurl}objects.inv does not seem to be"
"the usual Sphinx objects.inv format. Maybe it "
"has changed?"
)
compressed = oiread[(idx + 1) :]
decompressed = decompress(compressed).decode("utf-8")
resurl = None
for l in decompressed.strip().splitlines():
ls = l.split()
name = ls[0]
loc = ls[3]
if loc.endswith("$"):
loc = loc[:-1] + name
if name == obj:
resurl = baseurl + loc
break
if resurl is None:
raise ValueError(f"Could not find the docs for the object {obj}")
elif openinbrowser:
webbrowser.open(resurl)
return resurl
# The location of the online documentation for astropy
# This location will normally point to the current released version of astropy
online_docs_root: Final = "https://docs.astropy.org/en/{}/".format(
"latest" if "dev" in __version__ else f"v{__version__}"
)
def online_help(query: str) -> None:
"""
Search the online Astropy documentation for the given query.
Opens the results in the default web browser. Requires an active
Internet connection.
Parameters
----------
query : str
The search query.
"""
import webbrowser
webbrowser.open(online_docs_root + f"search.html?{urlencode({'q': query})}")
# _has_hidden_attribute() can be deleted together with deprecated is_path_hidden() and
# walk_skip_hidden().
if sys.platform == "win32":
import ctypes
def _has_hidden_attribute(filepath):
"""
Returns True if the given filepath has the hidden attribute on
MS-Windows. Based on a post here:
https://stackoverflow.com/questions/284115/cross-platform-hidden-file-detection.
"""
if isinstance(filepath, bytes):
filepath = filepath.decode(sys.getfilesystemencoding())
try:
attrs = ctypes.windll.kernel32.GetFileAttributesW(filepath)
result = bool(attrs & 2) and attrs != -1
except AttributeError:
result = False
return result
else:
def _has_hidden_attribute(filepath):
return False
@deprecated(since="6.0")
def is_path_hidden(filepath):
"""
Determines if a given file or directory is hidden.
Parameters
----------
filepath : str
The path to a file or directory
Returns
-------
hidden : bool
Returns `True` if the file is hidden
"""
name = os.path.basename(os.path.abspath(filepath))
if isinstance(name, bytes):
is_dotted = name.startswith(b".")
else:
is_dotted = name.startswith(".")
return is_dotted or _has_hidden_attribute(filepath)
@deprecated(since="6.0")
def walk_skip_hidden(top, onerror=None, followlinks=False):
"""
A wrapper for `os.walk` that skips hidden files and directories.
This function does not have the parameter ``topdown`` from
`os.walk`: the directories must always be recursed top-down when
using this function.
See Also
--------
os.walk : For a description of the parameters
"""
for root, dirs, files in os.walk(
top, topdown=True, onerror=onerror, followlinks=followlinks
):
# These lists must be updated in-place so os.walk will skip
# hidden directories
dirs[:] = [d for d in dirs if not is_path_hidden(d)]
files[:] = [f for f in files if not is_path_hidden(f)]
yield root, dirs, files
| NumpyRNGContext |
python | rq__rq | tests/test_cron.py | {
"start": 529,
"end": 910
} | class ____(Exception):
pass
def run_scheduler(redis_connection_kwargs):
"""Target function to run the scheduler in a separate process."""
scheduler = CronScheduler(connection=Redis(**redis_connection_kwargs))
# Register a job that runs every second to keep the scheduler busy
scheduler.register(do_nothing, 'default', interval=1)
scheduler.start()
| BreakLoop |
python | keras-team__keras | keras/src/losses/losses.py | {
"start": 3245,
"end": 4829
} | class ____(LossFunctionWrapper):
"""Computes the mean of absolute difference between labels and predictions.
Formula:
```python
loss = mean(abs(y_true - y_pred))
```
Args:
reduction: Type of reduction to apply to the loss. In almost all cases
this should be `"sum_over_batch_size"`. Supported options are
`"sum"`, `"sum_over_batch_size"`, `"mean"`,
`"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
`"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
sample size, and `"mean_with_sample_weight"` sums the loss and
divides by the sum of the sample weights. `"none"` and `None`
perform no aggregation. Defaults to `"sum_over_batch_size"`.
name: Optional name for the loss instance.
dtype: The dtype of the loss's computations. Defaults to `None`, which
means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
`"float32"` unless set to different value
(via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
provided, then the `compute_dtype` will be utilized.
"""
def __init__(
self,
reduction="sum_over_batch_size",
name="mean_absolute_error",
dtype=None,
):
super().__init__(
mean_absolute_error, name=name, reduction=reduction, dtype=dtype
)
def get_config(self):
return Loss.get_config(self)
@keras_export("keras.losses.MeanAbsolutePercentageError")
| MeanAbsoluteError |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/transfers/test_s3_to_sql.py | {
"start": 1144,
"end": 4840
} | class ____:
@pytest.fixture(autouse=True)
def setup_connections(self, create_connection_without_db):
create_connection_without_db(
models.Connection(
conn_id="s3_test",
conn_type="aws",
schema="test",
extra='{"aws_access_key_id": "aws_access_key_id", "aws_secret_access_key":'
' "aws_secret_access_key"}',
)
)
create_connection_without_db(
models.Connection(
conn_id="sql_test",
conn_type="postgres",
host="some.host.com",
schema="test_db",
login="user",
password="password",
)
)
self.s3_to_sql_transfer_kwargs = {
"task_id": "s3_to_sql_task",
"aws_conn_id": "s3_test",
"sql_conn_id": "sql_test",
"s3_key": "test/test.csv",
"s3_bucket": "testbucket",
"table": "sql_table",
"column_list": ["Column1", "Column2"],
"schema": "sql_schema",
"commit_every": 5000,
}
@pytest.fixture
def mock_parser(self):
return MagicMock()
@pytest.fixture
def mock_bad_hook(self):
bad_hook = MagicMock()
del bad_hook.insert_rows
return bad_hook
@patch("airflow.providers.amazon.aws.transfers.s3_to_sql.NamedTemporaryFile")
@patch("airflow.models.connection.Connection.get_hook")
@patch("airflow.providers.amazon.aws.transfers.s3_to_sql.S3Hook.get_key")
def test_execute(self, mock_get_key, mock_hook, mock_tempfile, mock_parser):
S3ToSqlOperator(parser=mock_parser, **self.s3_to_sql_transfer_kwargs).execute({})
mock_get_key.assert_called_once_with(
key=self.s3_to_sql_transfer_kwargs["s3_key"],
bucket_name=self.s3_to_sql_transfer_kwargs["s3_bucket"],
)
mock_get_key.return_value.download_fileobj.assert_called_once_with(
mock_tempfile.return_value.__enter__.return_value
)
mock_parser.assert_called_once_with(mock_tempfile.return_value.__enter__.return_value.name)
mock_hook.return_value.insert_rows.assert_called_once_with(
table=self.s3_to_sql_transfer_kwargs["table"],
schema=self.s3_to_sql_transfer_kwargs["schema"],
target_fields=self.s3_to_sql_transfer_kwargs["column_list"],
rows=mock_parser.return_value,
commit_every=self.s3_to_sql_transfer_kwargs["commit_every"],
)
@patch("airflow.providers.amazon.aws.transfers.s3_to_sql.NamedTemporaryFile")
@patch("airflow.models.connection.Connection.get_hook", return_value=mock_bad_hook)
@patch("airflow.providers.amazon.aws.transfers.s3_to_sql.S3Hook.get_key")
def test_execute_with_bad_hook(self, mock_get_key, mock_bad_hook, mock_tempfile, mock_parser):
with pytest.raises(AirflowException):
S3ToSqlOperator(parser=mock_parser, **self.s3_to_sql_transfer_kwargs).execute({})
def test_hook_params(self, mock_parser):
op = S3ToSqlOperator(
parser=mock_parser,
sql_hook_params={
"log_sql": False,
},
**self.s3_to_sql_transfer_kwargs,
)
hook = op.db_hook
assert hook.log_sql == op.sql_hook_params["log_sql"]
def teardown_method(self):
with create_session() as session:
(
session.query(models.Connection)
.filter(or_(models.Connection.conn_id == "s3_test", models.Connection.conn_id == "sql_test"))
.delete()
)
| TestS3ToSqlTransfer |
python | openai__openai-python | src/openai/types/graders/label_model_grader_param.py | {
"start": 757,
"end": 1311
} | class ____(TypedDict, total=False):
image_url: Required[str]
"""The URL of the image input."""
type: Required[Literal["input_image"]]
"""The type of the image input. Always `input_image`."""
detail: str
"""The detail level of the image to be sent to the model.
One of `high`, `low`, or `auto`. Defaults to `auto`.
"""
InputContent: TypeAlias = Union[
str,
ResponseInputTextParam,
InputContentOutputText,
InputContentInputImage,
ResponseInputAudioParam,
Iterable[object],
]
| InputContentInputImage |
python | simonw__datasette | tests/test_utils_check_callable.py | {
"start": 197,
"end": 1008
} | class ____:
pass
async def async_func():
pass
def non_async_func():
pass
@pytest.mark.parametrize(
"obj,expected_is_callable,expected_is_async_callable",
(
(async_func, True, True),
(non_async_func, True, False),
(AsyncClass(), True, True),
(NotAsyncClass(), True, False),
(ClassNoCall(), False, False),
(AsyncClass, True, False),
(NotAsyncClass, True, False),
(ClassNoCall, True, False),
("", False, False),
(1, False, False),
(str, True, False),
),
)
def test_check_callable(obj, expected_is_callable, expected_is_async_callable):
status = check_callable(obj)
assert status.is_callable == expected_is_callable
assert status.is_async_callable == expected_is_async_callable
| ClassNoCall |
python | pennersr__django-allauth | tests/apps/socialaccount/providers/twitch/tests.py | {
"start": 535,
"end": 3521
} | class ____(OAuth2TestsMixin, TestCase):
provider_id = TwitchProvider.id
def get_mocked_response(self):
return MockedResponse(
HTTPStatus.OK,
"""
{
"data": [{
"id": "44322889",
"login": "dallas",
"display_name": "dallas",
"type": "staff",
"broadcaster_type": "",
"description": "Just a gamer playing games and chatting. :)",
"profile_image_url": "https://static-cdn.jtvnw.net/jtv_user_pictures/dallas-profile_image-1a2c906ee2c35f12-300x300.png",
"offline_image_url": "https://static-cdn.jtvnw.net/jtv_user_pictures/dallas-channel_offline_image-1a2c906ee2c35f12-1920x1080.png",
"view_count": 191836881,
"email": "login@provider.com"
}]
}
""",
) # noqa
def get_expected_to_str(self):
return "dallas"
def test_response_over_400_raises_OAuth2Error(self):
resp_mock = MockedResponse(HTTPStatus.BAD_REQUEST, '{"error": "Invalid token"}')
expected_error = "Twitch API Error: Invalid token ()"
self.check_for_error(resp_mock, expected_error)
def test_empty_or_missing_data_key_raises_OAuth2Error(self):
resp_mock = MockedResponse(HTTPStatus.OK, '{"data": []}')
expected_error = "Invalid data from Twitch API: {'data': []}"
self.check_for_error(resp_mock, expected_error)
resp_mock = MockedResponse(HTTPStatus.OK, '{"missing_data": "key"}')
expected_error = "Invalid data from Twitch API: {'missing_data': 'key'}"
self.check_for_error(resp_mock, expected_error)
def test_missing_twitch_id_raises_OAuth2Error(self):
resp_mock = MockedResponse(
HTTPStatus.OK, '{"data": [{"login": "fake_twitch"}]}'
)
expected_error = "Invalid data from Twitch API: {'login': 'fake_twitch'}"
self.check_for_error(resp_mock, expected_error)
def check_for_error(self, resp_mock, expected_error):
with self.assertRaises(OAuth2Error) as error_ctx:
self._run_just_complete_login(resp_mock)
self.assertEqual(str(error_ctx.exception).replace("u", ""), expected_error)
def _run_just_complete_login(self, resp_mock):
"""
Helper function for checking that Error cases are
handled correctly. Running only `complete_login` means
we can check that the specific errors are raised before
they are caught and rendered to generic error HTML
"""
request = RequestFactory().get(
reverse(self.provider.id + "_login"),
{"process": "login"},
)
adapter = TwitchOAuth2Adapter(request)
app = adapter.get_provider().app
token = SocialToken(token="this-is-my-fake-token")
with mocked_response(resp_mock):
adapter = TwitchOAuth2Adapter(request)
adapter.complete_login(request, app, token)
| TwitchTests |
python | Lightning-AI__lightning | tests/tests_pytorch/callbacks/test_weight_averaging.py | {
"start": 2178,
"end": 2790
} | class ____:
"""EMA averaging function.
Functionally equivalent to the closure that ``get_ema_avg_fn()`` would return. This class is needed because we
cannot use a closure with ddp_spawn. (``Popen(process_obj)`` would fail with
``Can't get local object 'get_ema_avg_fn.<locals>.ema_update'``).
"""
def __init__(self, decay: float = 0.999) -> None:
self.decay = decay
@torch.no_grad()
def __call__(self, ema_param: Tensor, current_param: Tensor, num_averaged: Tensor) -> Tensor:
return self.decay * ema_param + (1 - self.decay) * current_param
| EMAAveragingFunction |
python | scikit-learn__scikit-learn | sklearn/model_selection/tests/common.py | {
"start": 119,
"end": 641
} | class ____:
"""A wrapper to make KFold single entry cv iterator"""
def __init__(self, n_splits=4, n_samples=99):
self.n_splits = n_splits
self.n_samples = n_samples
self.indices = iter(KFold(n_splits=n_splits).split(np.ones(n_samples)))
def split(self, X=None, y=None, groups=None):
"""Split can be called only once"""
for index in self.indices:
yield index
def get_n_splits(self, X=None, y=None, groups=None):
return self.n_splits
| OneTimeSplitter |
python | getsentry__sentry | tests/sentry/notifications/platform/email/test_provider.py | {
"start": 5302,
"end": 6739
} | class ____(TestCase):
def setUp(self) -> None:
self.provider = EmailNotificationProvider()
self.data = MockNotification(message="test message")
self.rendered_template = MockNotificationTemplate().render(self.data)
self.email = "test@example.com"
self.target = GenericNotificationTarget(
provider_key=NotificationProviderKey.EMAIL,
resource_type=NotificationTargetResourceType.EMAIL,
resource_id=self.email,
)
def test_provider_configuration(self) -> None:
assert self.provider.key == NotificationProviderKey.EMAIL
assert self.provider.target_class == GenericNotificationTarget
assert self.provider.target_resource_types == [NotificationTargetResourceType.EMAIL]
assert EmailNotificationProvider.is_available() is True
assert EmailNotificationProvider.is_available(organization=self.organization) is True
@mock.patch("sentry.notifications.platform.email.provider.send_messages")
def test_send(self, mock_send_messages: mock.MagicMock) -> None:
email = EmailRenderer.render(data=self.data, rendered_template=self.rendered_template)
EmailNotificationProvider.send(target=self.target, renderable=email)
mock_send_messages.assert_called_once()
[sent_message] = mock_send_messages.call_args[0][0]
assert sent_message.to == [self.email]
| EmailNotificationProviderTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-amazon-ads/unit_tests/integrations/ad_requests/base_request_builder.py | {
"start": 195,
"end": 766
} | class ____(abc.ABC):
@property
@abc.abstractmethod
def url(self) -> str:
""""""
@property
@abc.abstractmethod
def query_params(self) -> Dict[str, Any]:
""""""
@property
@abc.abstractmethod
def headers(self) -> Dict[str, Any]:
""""""
@property
@abc.abstractmethod
def request_body(self) -> Optional[str]:
""""""
def build(self) -> HttpRequest:
return HttpRequest(url=self.url, query_params=self.query_params, headers=self.headers, body=self.request_body)
| AmazonAdsRequestBuilder |
python | pytorch__pytorch | torch/_dynamo/variables/higher_order_ops.py | {
"start": 133189,
"end": 135027
} | class ____(TorchHigherOrderOperatorVariable):
def proxy_submod(self, tx, arg):
assert isinstance(arg.source.base, DictGetItemSource)
submod_name = tx.output.install_subgraph(arg.source.base.index, arg.value)
p_submod = make_attr(tx, submod_name)
set_example_value(p_submod.node, arg.value)
return p_submod
def to_proxy(self, tx, arg):
if isinstance(arg, UnspecializedNNModuleVariable):
return self.proxy_submod(tx, arg)
elif isinstance(arg, (ListVariable, TupleVariable)):
return arg.python_type()(
self.to_proxy(tx, nested_arg) for nested_arg in arg.items
)
else:
return arg.as_proxy()
def _call_function(
self, tx, args: "list[VariableTracker]", kwargs: "dict[str, VariableTracker]"
) -> "VariableTracker":
from .builder import wrap_fx_proxy
try:
p_args = tuple(self.to_proxy(tx, arg) for arg in args)
p_kwargs = {key: self.to_proxy(tx, arg) for key, arg in kwargs.items()}
except (NotImplementedError, Unsupported) as err:
unimplemented(
gb_type="failed to handle argument for FlexAttentionBackward HOP",
context=f"args: {args}, kwargs: {kwargs}",
explanation="Missing Dynamo support for FlexAttentionBackward HOP argument.",
hints=[
*graph_break_hints.SUPPORTABLE,
],
from_exc=err,
)
return wrap_fx_proxy(
tx=tx,
proxy=tx.output.create_proxy(
"call_function",
self.value,
args=p_args,
kwargs=p_kwargs,
),
example_value=None,
)
| FlexAttentionBackwardHighOrderVariable |
python | huggingface__transformers | src/transformers/models/patchtst/modeling_patchtst.py | {
"start": 37319,
"end": 38188
} | class ____(ModelOutput):
r"""
loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.
prediction_logits (`torch.FloatTensor` of shape `(batch_size, num_targets)`):
Prediction scores of the PatchTST modeling head (scores before SoftMax).
"""
loss: Optional[torch.FloatTensor] = None
prediction_logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass
@auto_docstring(
custom_intro="""
Base class for time series model's predictions outputs that contains the sampled values from the chosen
distribution.
"""
)
| PatchTSTForClassificationOutput |
python | spack__spack | lib/spack/spack/cmd/create.py | {
"start": 3911,
"end": 4509
} | class ____(BundlePackageTemplate):
"""Provides the default values to be used for the package file template"""
base_class_name = "Package"
package_class_import = "from spack_repo.builtin.build_systems.generic import Package"
body_def = """\
def install(self, spec, prefix):
# FIXME: Unknown build system
make()
make("install")"""
url_line = ' url = "{url}"'
def __init__(self, name, url, versions, languages: List[str]):
super().__init__(name, versions, languages)
self.url_def = self.url_line.format(url=url)
| PackageTemplate |
python | astropy__astropy | astropy/extern/configobj/validate.py | {
"start": 13718,
"end": 14149
} | class ____(VdtValueError):
"""The value supplied was of the correct type, but was too short."""
def __init__(self, value):
"""
>>> raise VdtValueTooShortError('jed')
Traceback (most recent call last):
VdtValueTooShortError: the value "jed" is too short.
"""
ValidateError.__init__(
self,
'the value "%s" is too short.' % (value,))
| VdtValueTooShortError |
python | viewflow__viewflow | viewflow/fields.py | {
"start": 3845,
"end": 4565
} | class ____(models.Lookup):
lookup_name = "exact"
def as_sql(self, compiler, connection):
fields = [
self.lhs.field.model._meta.get_field(column)
for column in self.lhs.field.columns
]
lookup_classes = [field.get_lookup("exact") for field in fields]
lookups = [
lookup_class(field.get_col(self.lhs.alias), self.rhs[column])
for lookup_class, field, column in zip(
lookup_classes, fields, self.lhs.field.columns
)
]
value_constraint = WhereNode()
for lookup in lookups:
value_constraint.add(lookup, AND)
return value_constraint.as_sql(compiler, connection)
| Exact |
python | modin-project__modin | modin/core/execution/ray/implementations/pandas_on_ray/partitioning/virtual_partition.py | {
"start": 9267,
"end": 9422
} | class ____(PandasOnRayDataframeVirtualPartition):
axis = 0
@_inherit_docstrings(PandasOnRayDataframeVirtualPartition)
| PandasOnRayDataframeColumnPartition |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/fixtures/sql.py | {
"start": 16171,
"end": 17005
} | class ____:
@config.fixture(params=["legacy", "new"])
def distinct_on_fixture(self, request):
from sqlalchemy.dialects.postgresql import distinct_on
def go(query, *expr):
if request.param == "legacy":
if expr:
with expect_deprecated(
"Passing expression to ``distinct`` to generate a "
"DISTINCT "
"ON clause is deprecated. Use instead the "
"``postgresql.distinct_on`` function as an extension."
):
return query.distinct(*expr)
else:
return query.distinct()
elif request.param == "new":
return query.ext(distinct_on(*expr))
return go
| DistinctOnFixture |
python | getsentry__sentry | src/sentry/hybridcloud/services/organization_mapping/impl.py | {
"start": 913,
"end": 6588
} | class ____(OrganizationMappingService):
def get(self, *, organization_id: int) -> RpcOrganizationMapping | None:
try:
org_mapping = OrganizationMapping.objects.get(organization_id=organization_id)
except OrganizationMapping.DoesNotExist:
return None
return serialize_organization_mapping(org_mapping)
def get_by_slug(self, *, slug: str) -> RpcOrganizationMapping | None:
try:
org_mapping = OrganizationMapping.objects.get(slug=slug)
except OrganizationMapping.DoesNotExist:
return None
return serialize_organization_mapping(org_mapping)
def get_owners(self, *, organization_id: int) -> list[RpcUser]:
owner_ids = list(
OrganizationMemberMapping.objects.filter(
organization_id=organization_id, role=roles.get_top_dog().id
).values_list("user_id", flat=True)
)
return user_service.get_many_by_id(ids=owner_ids)
def get_many(self, *, organization_ids: list[int]) -> list[RpcOrganizationMapping]:
org_mappings = OrganizationMapping.objects.filter(organization_id__in=organization_ids)
return [serialize_organization_mapping(om) for om in org_mappings]
def _check_organization_mapping_integrity(
self, org_id: int, update: RpcOrganizationMappingUpdate
) -> bool:
if not update.slug:
capture_exception(
OrganizationMappingConsistencyException("Organization mapping must have a slug")
)
return False
if not update.region_name:
capture_exception(
OrganizationMappingConsistencyException("Organization mapping must have a region")
)
return False
org_slug_qs = OrganizationSlugReservation.objects.filter(
organization_id=org_id,
)
org_slugs = [org_slug for org_slug in org_slug_qs]
if len(org_slugs) == 0:
# If there's no matching organization slug reservation, alert and prevent writing
# a new org mapping, as we don't want to create contention if the slug conflicts
# with a future organization.
capture_exception(
OrganizationMappingConsistencyException(
f"Expected an organization slug reservation for organization {org_id}, none was found"
)
)
return False
org_slug_regions_set = {org_slug.region_name for org_slug in org_slugs}
if update.region_name not in org_slug_regions_set:
capture_exception(
OrganizationMappingConsistencyException(
"Mismatched Slug Reservation and Organization Regions"
)
)
return False
has_matching_slug_reservation = (
len([org_slug for org_slug in org_slugs if org_slug.slug == update.slug]) > 0
)
if not has_matching_slug_reservation:
capture_exception(
OrganizationMappingConsistencyException(
"Mismatched Slug Reservation and Organization Slugs"
)
)
return False
return True
def _upsert_organization_slug_reservation_for_monolith(
self, organization_id: int, mapping_update: RpcOrganizationMappingUpdate
) -> None:
org_slug_reservation = OrganizationSlugReservation.objects.filter(
organization_id=organization_id
).first()
if org_slug_reservation is None:
OrganizationSlugReservation(
region_name=mapping_update.region_name,
slug=mapping_update.slug,
organization_id=organization_id,
user_id=-1,
).save(unsafe_write=True)
elif org_slug_reservation.slug != mapping_update.slug:
org_slug_reservation.update(slug=mapping_update.slug, unsafe_write=True)
def upsert(self, organization_id: int, update: RpcOrganizationMappingUpdate) -> None:
update_dict: dict[str, Any] = dict(
name=update.name,
status=update.status,
slug=update.slug,
region_name=update.region_name,
require_2fa=update.requires_2fa,
early_adopter=update.early_adopter,
allow_joinleave=update.allow_joinleave,
enhanced_privacy=update.enhanced_privacy,
disable_shared_issues=update.disable_shared_issues,
disable_new_visibility_features=update.disable_new_visibility_features,
require_email_verification=update.require_email_verification,
codecov_access=update.codecov_access,
disable_member_project_creation=update.disable_member_project_creation,
prevent_superuser_access=update.prevent_superuser_access,
disable_member_invite=update.disable_member_invite,
)
if isinstance(update.customer_id, CustomerId):
update_dict["customer_id"] = update.customer_id.value
with unguarded_write(using=router.db_for_write(OrganizationMapping)):
mapping_is_valid = self._check_organization_mapping_integrity(
org_id=organization_id, update=update
)
if not mapping_is_valid:
return
OrganizationMapping.objects.update_or_create(
organization_id=organization_id, defaults=update_dict
)
def delete(self, organization_id: int) -> None:
OrganizationMapping.objects.filter(organization_id=organization_id).delete()
| DatabaseBackedOrganizationMappingService |
python | pypa__pip | tests/unit/test_appdirs.py | {
"start": 5863,
"end": 8870
} | class ____:
@pytest.mark.skipif(sys.platform != "win32", reason="Windows-only test")
def test_user_config_dir_win_no_roaming(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
_get_win_folder = mock.Mock(return_value="C:\\Users\\test\\AppData\\Local")
monkeypatch.setattr(
platformdirs.windows, # type: ignore
"get_win_folder",
_get_win_folder,
raising=False,
)
assert (
appdirs.user_config_dir("pip", roaming=False)
== "C:\\Users\\test\\AppData\\Local\\pip"
)
assert _get_win_folder.call_args_list == [mock.call("CSIDL_LOCAL_APPDATA")]
@pytest.mark.skipif(sys.platform != "win32", reason="Windows-only test")
def test_user_config_dir_win_yes_roaming(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
_get_win_folder = mock.Mock(return_value="C:\\Users\\test\\AppData\\Roaming")
monkeypatch.setattr(
platformdirs.windows, # type: ignore
"get_win_folder",
_get_win_folder,
raising=False,
)
assert (
appdirs.user_config_dir("pip") == "C:\\Users\\test\\AppData\\Roaming\\pip"
)
assert _get_win_folder.call_args_list == [mock.call("CSIDL_APPDATA")]
@pytest.mark.skipif(sys.platform != "darwin", reason="MacOS-only test")
def test_user_config_dir_osx(self, monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setenv("HOME", "/home/test")
if os.path.isdir("/home/test/Library/Application Support/"):
assert (
appdirs.user_config_dir("pip")
== "/home/test/Library/Application Support/pip"
)
else:
assert appdirs.user_config_dir("pip") == "/home/test/.config/pip"
@pytest.mark.skipif(sys.platform != "linux", reason="Linux-only test")
def test_user_config_dir_linux(self, monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.delenv("XDG_CONFIG_HOME", raising=False)
monkeypatch.setenv("HOME", "/home/test")
assert appdirs.user_config_dir("pip") == "/home/test/.config/pip"
@pytest.mark.skipif(sys.platform != "linux", reason="Linux-only test")
def test_user_config_dir_linux_override(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
monkeypatch.setenv("XDG_CONFIG_HOME", "/home/test/.other-config")
monkeypatch.setenv("HOME", "/home/test")
assert appdirs.user_config_dir("pip") == "/home/test/.other-config/pip"
@pytest.mark.skipif(sys.platform != "linux", reason="Linux-only test")
def test_user_config_dir_linux_home_slash(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
# Verify that we are not affected by https://bugs.python.org/issue14768
monkeypatch.delenv("XDG_CONFIG_HOME", raising=False)
monkeypatch.setenv("HOME", "/")
assert appdirs.user_config_dir("pip") == "/.config/pip"
| TestUserConfigDir |
python | rq__rq | tests/test_callbacks.py | {
"start": 11387,
"end": 15076
} | class ____(RQTestCase):
def test_job_creation_with_success_callback(self):
"""Ensure callbacks are created and persisted properly"""
job = Job.create(say_hello, connection=self.connection)
self.assertIsNone(job._success_callback_name)
# _success_callback starts with UNEVALUATED
self.assertEqual(job._success_callback, UNEVALUATED)
self.assertEqual(job.success_callback, None)
# job.success_callback is assigned properly
job = Job.create(say_hello, on_success=print, connection=self.connection)
self.assertIsNotNone(job._success_callback_name)
self.assertEqual(job.success_callback, print)
job.save()
job = Job.fetch(id=job.id, connection=self.connection)
self.assertEqual(job.success_callback, print)
# test string callbacks
job = Job.create(say_hello, on_success=Callback('print'), connection=self.connection)
self.assertIsNotNone(job._success_callback_name)
self.assertEqual(job.success_callback, print)
job.save()
job = Job.fetch(id=job.id, connection=self.connection)
self.assertEqual(job.success_callback, print)
def test_job_creation_with_failure_callback(self):
"""Ensure failure callbacks are persisted properly"""
job = Job.create(say_hello, connection=self.connection)
self.assertIsNone(job._failure_callback_name)
# _failure_callback starts with UNEVALUATED
self.assertEqual(job._failure_callback, UNEVALUATED)
self.assertEqual(job.failure_callback, None)
# job.failure_callback is assigned properly
job = Job.create(say_hello, on_failure=print, connection=self.connection)
self.assertIsNotNone(job._failure_callback_name)
self.assertEqual(job.failure_callback, print)
job.save()
job = Job.fetch(id=job.id, connection=self.connection)
self.assertEqual(job.failure_callback, print)
# test string callbacks
job = Job.create(say_hello, on_failure=Callback('print'), connection=self.connection)
self.assertIsNotNone(job._failure_callback_name)
self.assertEqual(job.failure_callback, print)
job.save()
job = Job.fetch(id=job.id, connection=self.connection)
self.assertEqual(job.failure_callback, print)
def test_job_creation_with_stopped_callback(self):
"""Ensure stopped callbacks are persisted properly"""
job = Job.create(say_hello, connection=self.connection)
self.assertIsNone(job._stopped_callback_name)
# _failure_callback starts with UNEVALUATED
self.assertEqual(job._stopped_callback, UNEVALUATED)
self.assertEqual(job.stopped_callback, None)
# _stopped_callback becomes `None` after `job.stopped_callback` is called if there's no stopped callback
self.assertEqual(job._stopped_callback, None)
# job.failure_callback is assigned properly
job = Job.create(say_hello, on_stopped=print, connection=self.connection)
self.assertIsNotNone(job._stopped_callback_name)
self.assertEqual(job.stopped_callback, print)
job.save()
job = Job.fetch(id=job.id, connection=self.connection)
self.assertEqual(job.stopped_callback, print)
# test string callbacks
job = Job.create(say_hello, on_stopped=Callback('print'), connection=self.connection)
self.assertIsNotNone(job._stopped_callback_name)
self.assertEqual(job.stopped_callback, print)
job.save()
job = Job.fetch(id=job.id, connection=self.connection)
self.assertEqual(job.stopped_callback, print)
| JobCallbackTestCase |
python | pyca__cryptography | tests/hazmat/primitives/decrepit/test_algorithms.py | {
"start": 7953,
"end": 8399
} | class ____:
test_ecb = generate_encrypt_test(
load_nist_vectors,
os.path.join("ciphers", "IDEA"),
["idea-ecb.txt"],
lambda key, **kwargs: IDEA(binascii.unhexlify(key)),
lambda **kwargs: modes.ECB(),
)
@pytest.mark.supported(
only_if=lambda backend: backend.cipher_supported(
IDEA(b"\x00" * 16), modes.CBC(b"\x00" * 8)
),
skip_message="Does not support IDEA CBC",
)
| TestIDEAModeECB |
python | facelessuser__soupsieve | tests/test_level4/test_host.py | {
"start": 49,
"end": 588
} | class ____(util.TestCase):
"""Test host selectors."""
MARKUP = """<h1>header</h1><div><p>some text</p></div>"""
def test_host(self):
"""Test host (not supported)."""
self.assert_selector(
self.MARKUP,
":host",
[],
flags=util.HTML
)
def test_host_func(self):
"""Test host function (not supported)."""
self.assert_selector(
self.MARKUP,
":host(h1)",
[],
flags=util.HTML
)
| TestHost |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/event_api.py | {
"start": 2180,
"end": 3166
} | class ____(NamedTuple):
"""Pairs an id-based event log cursor with a timestamp-based run cursor, for improved
performance on run-sharded event log storages (e.g. the default SqliteEventLogStorage). For
run-sharded storages, the id field is ignored, since they may not be unique across shards.
"""
id: int
run_updated_after: datetime
RunStatusChangeEventType: TypeAlias = Literal[
DagsterEventType.RUN_START,
DagsterEventType.RUN_SUCCESS,
DagsterEventType.RUN_FAILURE,
DagsterEventType.RUN_ENQUEUED,
DagsterEventType.RUN_STARTING,
DagsterEventType.RUN_CANCELING,
DagsterEventType.RUN_CANCELED,
]
AssetEventType: TypeAlias = Literal[
DagsterEventType.ASSET_MATERIALIZATION,
DagsterEventType.ASSET_OBSERVATION,
DagsterEventType.ASSET_MATERIALIZATION_PLANNED,
DagsterEventType.ASSET_FAILED_TO_MATERIALIZE,
]
EventCursor: TypeAlias = Union[int, RunShardedEventsCursor]
@whitelist_for_serdes
@public
| RunShardedEventsCursor |
python | huggingface__transformers | tests/models/recurrent_gemma/test_modeling_recurrent_gemma.py | {
"start": 1357,
"end": 4435
} | class ____(CausalLMModelTest, unittest.TestCase):
has_attentions = False
model_tester_class = RecurrentGemmaModelTester
@unittest.skip(reason="RecurrentGemma only supports sdpa")
def test_eager_matches_sdpa_generate(self):
pass
@unittest.skip(reason="SQRBound is known to have issues with gc")
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@unittest.skip(reason="Past key values are not returned")
def test_prompt_lookup_decoding_matches_greedy_search(self):
pass
@unittest.skip(reason="Past key values are not returned")
def test_model_parallelism(self):
pass
@unittest.skip(reason="Past key values are not returned")
def test_model_parallel_beam_search(self):
pass
@parameterized.expand([("random",), ("same",)])
@pytest.mark.generate
@unittest.skip(reason="Rely on `past_key_values` to crop the assistant pkv. Not supported")
def test_assisted_decoding_matches_greedy_search(self):
pass
@unittest.skip(reason="RecurrentGemma's output different if you pad left or right. This is expected")
def test_left_padding_compatibility(self):
pass
@pytest.mark.generate
@unittest.skip(reason="Relies on `past_key_values` returned by the model. Not supported with recurrent gemma")
def test_assisted_decoding_sample(self):
pass
@unittest.skip(reason="RecurrentGemma is unusual and fails a lot of generation tests")
@pytest.mark.generate
def test_beam_sample_generate_dict_output(self):
pass
@unittest.skip(reason="RecurrentGemma is unusual and fails a lot of generation tests")
@pytest.mark.generate
def test_beam_search_generate_dict_output(self):
pass
@unittest.skip(reason="RecurrentGemma is unusual and fails a lot of generation tests")
@pytest.mark.generate
def test_beam_search_generate_dict_outputs_use_cache(self):
pass
@unittest.skip(reason="RecurrentGemma is unusual and fails a lot of generation tests")
@pytest.mark.generate
def test_generate_without_input_ids(self):
pass
@unittest.skip(reason="RecurrentGemma is unusual and fails a lot of generation tests")
@pytest.mark.generate
def test_greedy_generate_dict_outputs(self):
pass
@unittest.skip(reason="RecurrentGemma is unusual and fails a lot of generation tests")
@pytest.mark.generate
def test_greedy_generate_dict_outputs_use_cache(self):
pass
@unittest.skip(reason="RecurrentGemma is unusual and fails a lot of generation tests")
def test_model_outputs_equivalence(self):
pass
@unittest.skip("RecurrentGemma doesn't have RoPE scaling implemented")
def test_model_rope_scaling_frequencies(self):
pass
@parameterized.expand([("linear",), ("dynamic",), ("yarn",)])
@unittest.skip("RecurrentGemma doesn't have RoPE scaling implemented")
def test_model_rope_scaling_from_config(self, scaling_type):
pass
@require_torch_accelerator
@slow
| RecurrentGemmaModelTest |
python | joke2k__faker | faker/providers/company/hy_AM/__init__.py | {
"start": 45,
"end": 7275
} | class ____(CompanyProvider):
formats = (
"{{first_name}} և {{first_name}} {{company_suffix}}",
"{{last_name}} {{company_suffix}}",
"{{last_name}} և {{last_name}} {{company_suffix}}"
"{{last_name}}, {{last_name}} և {{last_name}} {{company_suffix}}",
)
company_suffixes = ("ՍՊԸ", "ՀՁ", "ՓԲԸ", "ԲԲԸ", "Գրուպ", "Հոլդինգ")
catch_phrase_words = (
(
"առաջավոր",
"բարելավված",
"ավտոմատացված",
"հավասարակշռված",
"կենտրոնացված",
"համատեղելի",
"կարգավորելի",
"անհատականացված",
"ապակենտրոնացված",
"թվայնացված",
"տարածված",
"փոքրացված",
"ընդլայնված",
"էրգոնիկ",
"բացառիկ",
"երկարացված",
"լիովին կոնֆիգուրացվող",
"ֆունկցիոնալ հիմունքներով",
"հիմնական",
"հորիզոնական",
"իրականացված",
"նորարական",
"ինտեգրված",
"ինտուիտիվ",
"պարտադիր",
"բազմուղի",
"բազմաշերտ",
"ցանցային",
"բաց կոդով",
"օպերատիվ",
"օպտիմալացված",
"օրգանական",
"կազմակերպված",
"կայուն",
"կենսունակ",
"ավարտված",
"բևեռացված",
"կանխարգելող",
"ակտիվ",
"ծրագրավորելի",
"առաջադիմական",
"որակով",
"ռեակտիվ",
"իրականացված",
"նվազեցված",
"դիմացկուն",
"անխափան",
"ապահով",
"համատեղելի",
"հեշտացված",
"փոխարկելի",
"սինխրոնիզացված",
"ունիվերսալ",
"ճկուն",
"վիրտուալ",
),
(
"3-րդ սերնդի",
"4-րդ սերնդի",
"5-րդ սերնդի",
"6-րդ սերնդի",
"ասիմետրիկ",
"ասինխրոն",
"թողունակությունը վերահսկվող",
"երկկողմանի",
"հստակ մտածող",
"համաձայնեցված",
"բաղադրյալ",
"դիդակտիկ",
"ուղղորդիչ",
"դիսկրետ",
"բացահայտ",
"գլոբալ",
"բարձր մակարդակի",
"ամբողջական",
"միատարր",
"հիբրիդ",
"ազդեցիկ",
"ինտերակտիվ",
"միջանկյալ",
"առաջատար",
"տեղային",
"լոգիստիկ",
"սիստեմատիկ",
"մոդուլային",
"չեզոք",
"հաջորդ սերնդի",
"օբյեկտի վրա հիմնված",
"օպտիմալ",
"արմատական",
"փոխադարձ",
"ռեգիոնալ",
"երկրորդական",
"կայուն",
"ստատիկ",
"համակարգված",
"համակարգային",
"շոշափելի",
"երրորդական",
"անցումային",
"միատեսակ",
"լավ մոդուլացված",
"առանց թերությունների",
),
(
"կարողություն",
"մուտք",
"ադապտեր",
"ալգորիթմ",
"միություն",
"վերլուծիչ",
"ծրագրային ապահովում",
"մոտեցում",
"արխիվ",
"արհեստական բանականություն",
"վերաբերմունք",
"ընդունակություն",
"կարողություն",
"մարտահրավեր",
"գործակցություն",
"բարդություն",
"գաղափար",
"համախմբվածություն",
"տվյալների բազա",
"տվյալների պահեստ",
"սահմանում",
"իմացություն",
"կոդավորում",
"գաղտնագրում",
"կանխատեսում",
"հենքային ծրագիր",
"ֆունկցիա",
"գործառույթ",
"գրաֆիկական ինտերֆեյս",
"սարքային ապահովում",
"հիերարխիա",
"հանգույց",
"ենթակառուցվածք",
"նախաձեռնություն",
"ծրագրի ներդրում",
"հրահանգների հավաքածու",
"ինտերֆեյս",
"ինտրանետ",
"գիտելիքների բազա",
"տեղական ցանց",
"մատրիցա",
"մեթոդաբանություն",
"միջանկյալ շերտ",
"միգրացիա",
"մոդել",
"կարգավորիչ",
"մոնիտորինգ",
"բաց համակարգ",
"պարադիգմ",
"պորտալ",
"գնային կառուցվածք",
"արդյունավետություն",
"նախագիծ",
"ապահովված գիծ",
"ծրագրային ապահովում",
"լուծում",
"ստանդարտացում",
"ստրատեգիա",
"կառուցվածք",
"օպերատիվ խումբ",
"արտադրողականություն",
"ժամանակացույց",
"գործիք",
"օգտագործում",
"կայք",
"աշխատուժ",
),
)
bsWords = (
(
"իրականացնել",
"օգտագործել",
"ինտեգրել",
"ռացիոնալացնել",
"օպտիմալացնել",
"փոխակերպել",
"ընդգրկել",
"ակտիվացնել",
"կազմակերպել",
"նախագծել",
"խթանել",
"ձևափոխել",
"արտոնել",
"դրամայնացնել",
"հեշտացնել",
"վերցնել",
"աճեցնել",
"սինթեզել",
"առաքել",
"զբաղվել",
"առավելագույնի հասցնել",
"արագացնել",
"միջնորդել",
"պատկերացնել",
"վերափոխել",
"ընդլայնել",
"նախաձեռնել",
"հեղափոխականացնել",
"առաջացնել",
"օգտագործել",
"զարգացնել",
"արտադրանքի վերածել",
),
(
"ուղղահայաց",
"ակտիվ",
"դիմացկուն",
"հեղափոխական",
"առաջատար",
"նորարարական",
"ինտուիտիվ",
"ռազմավարական",
"էլեկտրոնային",
"գլոբալ",
"վիրտուալ",
"դինամիկ",
"գրավիչ",
"ինտերակտիվ",
"արդյունավետ",
"ընդարձակելի",
"պատրաստի",
"ինտեգրված",
"ազդեցիկ",
"անլար",
"թափանցիկ",
"հաջորդ սերնդի",
"ժամանակակից",
"հարմարեցված",
"համատարած",
"ազդեցիկ",
"ամբողջական",
"հարուստ",
"անվճար",
),
(
"պարադիգմներ",
"շուկաներ",
"ենթակառուցվածքներ",
"պլատֆորմներ",
"նախաձեռնություններ",
"ուղիներ",
"համայնքներ",
"լուծումներ",
"պորտալներ",
"տեխնոլոգիաներ",
"հարաբերություններ",
"կառուցվածքներ",
"ինտերֆեյսներ",
"շուկաներ",
"համակարգեր",
"մոդելներ",
"օգտագործողներ",
"սխեմաներ",
"ցանցեր",
"ծրագրեր",
"չափանիշներ",
"բիզնես",
"գործառույթներ",
"փորձառություններ",
"մեթոդաբանություններ",
),
)
| Provider |
python | pytorch__pytorch | test/distributed/checkpoint/test_planner.py | {
"start": 20617,
"end": 25129
} | class ____(TestCase):
def test_create_read_item_from_chunks(self):
tensor_md = TensorStorageMetadata(
properties=TensorProperties.create_from_tensor(torch.empty([16])),
size=torch.Size([16]),
chunks=[
ChunkStorageMetadata(offsets=torch.Size([0]), sizes=torch.Size([8])),
ChunkStorageMetadata(offsets=torch.Size([8]), sizes=torch.Size([8])),
],
)
chunk = ChunkStorageMetadata(offsets=torch.Size([4]), sizes=torch.Size([7]))
read_items = create_read_items_for_chunk_list("foo", tensor_md, [chunk])
self.assertEqual(2, len(read_items))
self.assertEqual(MetadataIndex("foo", [4]), read_items[0].dest_index)
self.assertEqual(torch.Size([0]), read_items[0].dest_offsets)
self.assertEqual(MetadataIndex("foo", [0]), read_items[0].storage_index)
self.assertEqual(torch.Size([4]), read_items[0].storage_offsets)
self.assertEqual(torch.Size([4]), read_items[0].lengths)
self.assertEqual(MetadataIndex("foo", [4]), read_items[1].dest_index)
self.assertEqual(torch.Size([4]), read_items[1].dest_offsets)
self.assertEqual(MetadataIndex("foo", [8]), read_items[1].storage_index)
self.assertEqual(torch.Size([0]), read_items[1].storage_offsets)
self.assertEqual(torch.Size([3]), read_items[1].lengths)
def test_merge_delta_local_plans(self):
def create_data(rank):
with with_dist(rank=rank, world_size=4):
tensor = torch.rand(10)
val = [1, 2, 3]
st = create_sharded_tensor(rank=rank, world_size=4, shards_per_rank=1)
state_dict = {"tensor": tensor, "value": val, "st": st}
return create_default_local_save_plan(state_dict, rank == 0)
def _validate_plans(plan1: SavePlan, plan2: SavePlan):
self.assertEqual(len(plan1.items), len(plan2.items))
for item1, item2 in zip(plan1.items, plan2.items):
self.assertEqual(item1.index, item2.index)
self.assertEqual(item1.type, item2.type)
self.assertEqual(item1.tensor_data, item2.tensor_data)
cached_plans = [create_data(0), create_data(1)]
delta_plans = [create_data(2), create_data(3)]
# Both the plans changed.
# Merge plan should have both the plans from the delta plans
merged_plans = _merge_delta_local_plans(cached_plans, delta_plans)
self.assertEqual(2, len(merged_plans))
_validate_plans(delta_plans[0], merged_plans[0])
_validate_plans(delta_plans[1], merged_plans[1])
# Only the first plan changed.
# Merge plan should have the first plan from the delta plans and the second plan from the cached plans
delta_plans = [create_data(2), SavePlan([], usable=False)]
merged_plans = _merge_delta_local_plans(cached_plans, delta_plans)
_validate_plans(delta_plans[0], merged_plans[0])
_validate_plans(cached_plans[1], merged_plans[1])
# Only the second plan changed.
# Merge plan should have the first plan from the cached plans and the second plan from the delta plans
delta_plans = [SavePlan([], usable=False), create_data(3)]
merged_plans = _merge_delta_local_plans(cached_plans, delta_plans)
_validate_plans(cached_plans[0], merged_plans[0])
_validate_plans(delta_plans[1], merged_plans[1])
# None of the plans changed. Cached plans should be returned
delta_plans = [SavePlan([], usable=False), SavePlan([], usable=False)]
merged_plans = _merge_delta_local_plans(cached_plans, delta_plans)
_validate_plans(cached_plans[0], merged_plans[0])
_validate_plans(cached_plans[1], merged_plans[1])
def test_compare_save_plans(self):
def create_data(rank):
with with_dist(rank=rank, world_size=4):
tensor = torch.rand(10)
val = [1, 2, 3]
st = create_sharded_tensor(rank=rank, world_size=4, shards_per_rank=1)
state_dict = {"tensor": tensor, "value": val, "st": st}
return create_default_local_save_plan(state_dict, rank == 0)
plan1 = create_data(0)
plan2 = create_data(1)
self.assertFalse(_compare_save_plans(plan1, plan2))
self.assertTrue(_compare_save_plans(plan1, plan1))
self.assertTrue(_compare_save_plans(plan2, plan2))
| TestPlannerHelpers |
python | google__pytype | pytype/imports/base.py | {
"start": 1360,
"end": 1776
} | class ____(abc.ABC):
"""Base class for module loaders."""
@abc.abstractmethod
def find_import(self, module_name: str) -> ModuleInfo | None:
raise NotImplementedError()
@abc.abstractmethod
def load_ast(self, mod_info: ModuleInfo) -> pytd.TypeDeclUnit:
raise NotImplementedError()
@abc.abstractmethod
def log_module_not_found(self, module_name: str):
raise NotImplementedError()
| ModuleLoader |
python | astropy__astropy | astropy/io/fits/hdu/compressed/_codecs.py | {
"start": 2272,
"end": 3877
} | class ____(Codec):
"""
The FITS GZIP 1 compression and decompression algorithm.
The Gzip algorithm is used in the free GNU software compression utility of
the same name. It was created by J. L. Gailly and M. Adler, based on the
DEFLATE algorithm (Deutsch 1996), which is a combination of LZ77 (Ziv &
Lempel 1977) and Huffman coding.
"""
codec_id = "FITS_GZIP1"
def decode(self, buf):
"""
Decompress buffer using the GZIP_1 algorithm.
Parameters
----------
buf : bytes or array_like
The buffer to decompress.
Returns
-------
buf : np.ndarray
The decompressed buffer.
"""
# In principle we should be able to not have .tobytes() here and avoid
# the copy but this does not work correctly in Python 3.11.
cbytes = np.frombuffer(buf, dtype=np.uint8).tobytes()
dbytes = gzip_decompress(cbytes)
return np.frombuffer(dbytes, dtype=np.uint8)
def encode(self, buf):
"""
Compress the data in the buffer using the GZIP_1 algorithm.
Parameters
----------
buf _like
The buffer to compress.
Returns
-------
bytes
The compressed bytes.
"""
# Data bytes should be stored as big endian in files
# In principle we should be able to not have .tobytes() here and avoid
# the copy but this does not work correctly in Python 3.11.
dbytes = _as_big_endian_array(buf).tobytes()
return gzip_compress(dbytes)
| Gzip1 |
python | getsentry__sentry-python | tests/test_utils.py | {
"start": 808,
"end": 30996
} | class ____(Integration):
"""
Test integration for testing ensure_integration_enabled decorator.
"""
identifier = "test"
setup_once = mock.MagicMock()
try:
import gevent
except ImportError:
gevent = None
def _normalize_distribution_name(name):
# type: (str) -> str
"""Normalize distribution name according to PEP-0503.
See:
https://peps.python.org/pep-0503/#normalized-names
for more details.
"""
return re.sub(r"[-_.]+", "-", name).lower()
isoformat_inputs_and_datetime_outputs = (
(
"2021-01-01T00:00:00.000000Z",
datetime(2021, 1, 1, tzinfo=timezone.utc),
), # UTC time
(
"2021-01-01T00:00:00.000000",
datetime(2021, 1, 1).astimezone(timezone.utc),
), # No TZ -- assume local but convert to UTC
(
"2021-01-01T00:00:00Z",
datetime(2021, 1, 1, tzinfo=timezone.utc),
), # UTC - No milliseconds
(
"2021-01-01T00:00:00.000000+00:00",
datetime(2021, 1, 1, tzinfo=timezone.utc),
),
(
"2021-01-01T00:00:00.000000-00:00",
datetime(2021, 1, 1, tzinfo=timezone.utc),
),
(
"2021-01-01T00:00:00.000000+0000",
datetime(2021, 1, 1, tzinfo=timezone.utc),
),
(
"2021-01-01T00:00:00.000000-0000",
datetime(2021, 1, 1, tzinfo=timezone.utc),
),
(
"2020-12-31T00:00:00.000000+02:00",
datetime(2020, 12, 31, tzinfo=timezone(timedelta(hours=2))),
), # UTC+2 time
(
"2020-12-31T00:00:00.000000-0200",
datetime(2020, 12, 31, tzinfo=timezone(timedelta(hours=-2))),
), # UTC-2 time
(
"2020-12-31T00:00:00-0200",
datetime(2020, 12, 31, tzinfo=timezone(timedelta(hours=-2))),
), # UTC-2 time - no milliseconds
)
@pytest.mark.parametrize(
("input_str", "expected_output"),
isoformat_inputs_and_datetime_outputs,
)
def test_datetime_from_isoformat(input_str, expected_output):
assert datetime_from_isoformat(input_str) == expected_output, input_str
@pytest.mark.parametrize(
("input_str", "expected_output"),
isoformat_inputs_and_datetime_outputs,
)
def test_datetime_from_isoformat_with_py_36_or_lower(input_str, expected_output):
"""
`fromisoformat` was added in Python version 3.7
"""
with mock.patch("sentry_sdk.utils.datetime") as datetime_mocked:
datetime_mocked.fromisoformat.side_effect = AttributeError()
datetime_mocked.strptime = datetime.strptime
assert datetime_from_isoformat(input_str) == expected_output, input_str
@pytest.mark.parametrize(
"env_var_value,strict,expected",
[
(None, True, None),
(None, False, False),
("", True, None),
("", False, False),
("t", True, True),
("T", True, True),
("t", False, True),
("T", False, True),
("y", True, True),
("Y", True, True),
("y", False, True),
("Y", False, True),
("1", True, True),
("1", False, True),
("True", True, True),
("True", False, True),
("true", True, True),
("true", False, True),
("tRuE", True, True),
("tRuE", False, True),
("Yes", True, True),
("Yes", False, True),
("yes", True, True),
("yes", False, True),
("yEs", True, True),
("yEs", False, True),
("On", True, True),
("On", False, True),
("on", True, True),
("on", False, True),
("oN", True, True),
("oN", False, True),
("f", True, False),
("f", False, False),
("n", True, False),
("N", True, False),
("n", False, False),
("N", False, False),
("0", True, False),
("0", False, False),
("False", True, False),
("False", False, False),
("false", True, False),
("false", False, False),
("FaLsE", True, False),
("FaLsE", False, False),
("No", True, False),
("No", False, False),
("no", True, False),
("no", False, False),
("nO", True, False),
("nO", False, False),
("Off", True, False),
("Off", False, False),
("off", True, False),
("off", False, False),
("oFf", True, False),
("oFf", False, False),
("xxx", True, None),
("xxx", False, True),
],
)
def test_env_to_bool(env_var_value, strict, expected):
assert env_to_bool(env_var_value, strict=strict) == expected, (
f"Value: {env_var_value}, strict: {strict}"
)
@pytest.mark.parametrize(
("url", "expected_result"),
[
("http://localhost:8000", "http://localhost:8000"),
("http://example.com", "http://example.com"),
("https://example.com", "https://example.com"),
(
"example.com?token=abc&sessionid=123&save=true",
"example.com?token=[Filtered]&sessionid=[Filtered]&save=[Filtered]",
),
(
"http://example.com?token=abc&sessionid=123&save=true",
"http://example.com?token=[Filtered]&sessionid=[Filtered]&save=[Filtered]",
),
(
"https://example.com?token=abc&sessionid=123&save=true",
"https://example.com?token=[Filtered]&sessionid=[Filtered]&save=[Filtered]",
),
(
"http://localhost:8000/?token=abc&sessionid=123&save=true",
"http://localhost:8000/?token=[Filtered]&sessionid=[Filtered]&save=[Filtered]",
),
(
"ftp://username:password@ftp.example.com:9876/bla/blub#foo",
"ftp://[Filtered]:[Filtered]@ftp.example.com:9876/bla/blub#foo",
),
(
"https://username:password@example.com/bla/blub?token=abc&sessionid=123&save=true#fragment",
"https://[Filtered]:[Filtered]@example.com/bla/blub?token=[Filtered]&sessionid=[Filtered]&save=[Filtered]#fragment",
),
("bla/blub/foo", "bla/blub/foo"),
("/bla/blub/foo/", "/bla/blub/foo/"),
(
"bla/blub/foo?token=abc&sessionid=123&save=true",
"bla/blub/foo?token=[Filtered]&sessionid=[Filtered]&save=[Filtered]",
),
(
"/bla/blub/foo/?token=abc&sessionid=123&save=true",
"/bla/blub/foo/?token=[Filtered]&sessionid=[Filtered]&save=[Filtered]",
),
],
)
def test_sanitize_url(url, expected_result):
assert sanitize_url(url) == expected_result
@pytest.mark.parametrize(
("url", "expected_result"),
[
(
"http://localhost:8000",
Components(
scheme="http", netloc="localhost:8000", path="", query="", fragment=""
),
),
(
"http://example.com",
Components(
scheme="http", netloc="example.com", path="", query="", fragment=""
),
),
(
"https://example.com",
Components(
scheme="https", netloc="example.com", path="", query="", fragment=""
),
),
(
"example.com?token=abc&sessionid=123&save=true",
Components(
scheme="",
netloc="",
path="example.com",
query="token=[Filtered]&sessionid=[Filtered]&save=[Filtered]",
fragment="",
),
),
(
"http://example.com?token=abc&sessionid=123&save=true",
Components(
scheme="http",
netloc="example.com",
path="",
query="token=[Filtered]&sessionid=[Filtered]&save=[Filtered]",
fragment="",
),
),
(
"https://example.com?token=abc&sessionid=123&save=true",
Components(
scheme="https",
netloc="example.com",
path="",
query="token=[Filtered]&sessionid=[Filtered]&save=[Filtered]",
fragment="",
),
),
(
"http://localhost:8000/?token=abc&sessionid=123&save=true",
Components(
scheme="http",
netloc="localhost:8000",
path="/",
query="token=[Filtered]&sessionid=[Filtered]&save=[Filtered]",
fragment="",
),
),
(
"ftp://username:password@ftp.example.com:9876/bla/blub#foo",
Components(
scheme="ftp",
netloc="[Filtered]:[Filtered]@ftp.example.com:9876",
path="/bla/blub",
query="",
fragment="foo",
),
),
(
"https://username:password@example.com/bla/blub?token=abc&sessionid=123&save=true#fragment",
Components(
scheme="https",
netloc="[Filtered]:[Filtered]@example.com",
path="/bla/blub",
query="token=[Filtered]&sessionid=[Filtered]&save=[Filtered]",
fragment="fragment",
),
),
(
"bla/blub/foo",
Components(
scheme="", netloc="", path="bla/blub/foo", query="", fragment=""
),
),
(
"bla/blub/foo?token=abc&sessionid=123&save=true",
Components(
scheme="",
netloc="",
path="bla/blub/foo",
query="token=[Filtered]&sessionid=[Filtered]&save=[Filtered]",
fragment="",
),
),
(
"/bla/blub/foo/?token=abc&sessionid=123&save=true",
Components(
scheme="",
netloc="",
path="/bla/blub/foo/",
query="token=[Filtered]&sessionid=[Filtered]&save=[Filtered]",
fragment="",
),
),
],
)
def test_sanitize_url_and_split(url, expected_result):
sanitized_url = sanitize_url(url, split=True)
assert sanitized_url.scheme == expected_result.scheme
assert sanitized_url.netloc == expected_result.netloc
assert sanitized_url.query == expected_result.query
assert sanitized_url.path == expected_result.path
assert sanitized_url.fragment == expected_result.fragment
def test_sanitize_url_remove_authority_is_false():
url = "https://usr:pwd@example.com"
sanitized_url = sanitize_url(url, remove_authority=False)
assert sanitized_url == url
@pytest.mark.parametrize(
("url", "sanitize", "expected_url", "expected_query", "expected_fragment"),
[
# Test with sanitize=True
(
"https://example.com",
True,
"https://example.com",
"",
"",
),
(
"example.com?token=abc&sessionid=123&save=true",
True,
"example.com",
"token=[Filtered]&sessionid=[Filtered]&save=[Filtered]",
"",
),
(
"https://example.com?token=abc&sessionid=123&save=true",
True,
"https://example.com",
"token=[Filtered]&sessionid=[Filtered]&save=[Filtered]",
"",
),
(
"https://username:password@example.com/bla/blub?token=abc&sessionid=123&save=true#fragment",
True,
"https://[Filtered]:[Filtered]@example.com/bla/blub",
"token=[Filtered]&sessionid=[Filtered]&save=[Filtered]",
"fragment",
),
(
"bla/blub/foo",
True,
"bla/blub/foo",
"",
"",
),
(
"/bla/blub/foo/#baz",
True,
"/bla/blub/foo/",
"",
"baz",
),
(
"bla/blub/foo?token=abc&sessionid=123&save=true",
True,
"bla/blub/foo",
"token=[Filtered]&sessionid=[Filtered]&save=[Filtered]",
"",
),
(
"/bla/blub/foo/?token=abc&sessionid=123&save=true",
True,
"/bla/blub/foo/",
"token=[Filtered]&sessionid=[Filtered]&save=[Filtered]",
"",
),
# Test with sanitize=False
(
"https://example.com",
False,
"https://example.com",
"",
"",
),
(
"example.com?token=abc&sessionid=123&save=true",
False,
"example.com",
"token=abc&sessionid=123&save=true",
"",
),
(
"https://example.com?token=abc&sessionid=123&save=true",
False,
"https://example.com",
"token=abc&sessionid=123&save=true",
"",
),
(
"https://username:password@example.com/bla/blub?token=abc&sessionid=123&save=true#fragment",
False,
"https://[Filtered]:[Filtered]@example.com/bla/blub",
"token=abc&sessionid=123&save=true",
"fragment",
),
(
"bla/blub/foo",
False,
"bla/blub/foo",
"",
"",
),
(
"/bla/blub/foo/#baz",
False,
"/bla/blub/foo/",
"",
"baz",
),
(
"bla/blub/foo?token=abc&sessionid=123&save=true",
False,
"bla/blub/foo",
"token=abc&sessionid=123&save=true",
"",
),
(
"/bla/blub/foo/?token=abc&sessionid=123&save=true",
False,
"/bla/blub/foo/",
"token=abc&sessionid=123&save=true",
"",
),
],
)
def test_parse_url(url, sanitize, expected_url, expected_query, expected_fragment):
assert parse_url(url, sanitize=sanitize).url == expected_url
assert parse_url(url, sanitize=sanitize).fragment == expected_fragment
assert parse_url(url, sanitize=sanitize).query == expected_query
@pytest.mark.parametrize(
"rate",
[0.0, 0.1231, 1.0, True, False],
)
def test_accepts_valid_sample_rate(rate):
with mock.patch.object(logger, "warning", mock.Mock()):
result = is_valid_sample_rate(rate, source="Testing")
assert logger.warning.called is False
assert result is True
@pytest.mark.parametrize(
"rate",
[
"dogs are great", # wrong type
(0, 1), # wrong type
{"Maisey": "Charllie"}, # wrong type
[True, True], # wrong type
{0.2012}, # wrong type
float("NaN"), # wrong type
None, # wrong type
-1.121, # wrong value
1.231, # wrong value
],
)
def test_warns_on_invalid_sample_rate(rate, StringContaining): # noqa: N803
with mock.patch.object(logger, "warning", mock.Mock()):
result = is_valid_sample_rate(rate, source="Testing")
logger.warning.assert_any_call(StringContaining("Given sample rate is invalid"))
assert result is False
@pytest.mark.parametrize(
"include_source_context",
[True, False],
)
def test_include_source_context_when_serializing_frame(include_source_context):
frame = sys._getframe()
result = serialize_frame(frame, include_source_context=include_source_context)
assert include_source_context ^ ("pre_context" in result) ^ True
assert include_source_context ^ ("context_line" in result) ^ True
assert include_source_context ^ ("post_context" in result) ^ True
@pytest.mark.parametrize(
"item,regex_list,expected_result",
[
["", [], False],
[None, [], False],
["", None, False],
[None, None, False],
["some-string", [], False],
["some-string", None, False],
["some-string", ["some-string"], True],
["some-string", ["some"], False],
["some-string", ["some$"], False], # same as above
["some-string", ["some.*"], True],
["some-string", ["Some"], False], # we do case sensitive matching
["some-string", [".*string$"], True],
],
)
def test_match_regex_list(item, regex_list, expected_result):
assert match_regex_list(item, regex_list) == expected_result
@pytest.mark.parametrize(
"version,expected_result",
[
["3.5.15", (3, 5, 15)],
["2.0.9", (2, 0, 9)],
["2.0.0", (2, 0, 0)],
["0.6.0", (0, 6, 0)],
["2.0.0.post1", (2, 0, 0)],
["2.0.0rc3", (2, 0, 0)],
["2.0.0rc2", (2, 0, 0)],
["2.0.0rc1", (2, 0, 0)],
["2.0.0b4", (2, 0, 0)],
["2.0.0b3", (2, 0, 0)],
["2.0.0b2", (2, 0, 0)],
["2.0.0b1", (2, 0, 0)],
["0.6beta3", (0, 6)],
["0.6beta2", (0, 6)],
["0.6beta1", (0, 6)],
["0.4.2b", (0, 4, 2)],
["0.4.2a", (0, 4, 2)],
["0.0.1", (0, 0, 1)],
["0.0.0", (0, 0, 0)],
["1", (1,)],
["1.0", (1, 0)],
["1.0.0", (1, 0, 0)],
[" 1.0.0 ", (1, 0, 0)],
[" 1.0.0 ", (1, 0, 0)],
["x1.0.0", None],
["1.0.0x", None],
["x1.0.0x", None],
],
)
def test_parse_version(version, expected_result):
assert parse_version(version) == expected_result
@pytest.fixture
def mock_client_with_dsn_netloc():
"""
Returns a mocked Client with a DSN netloc of "abcd1234.ingest.sentry.io".
"""
mock_client = mock.Mock(spec=sentry_sdk.Client)
mock_client.transport = mock.Mock(spec=sentry_sdk.Transport)
mock_client.transport.parsed_dsn = mock.Mock(spec=Dsn)
mock_client.transport.parsed_dsn.netloc = "abcd1234.ingest.sentry.io"
return mock_client
@pytest.mark.parametrize(
["test_url", "is_sentry_url_expected"],
[
["https://asdf@abcd1234.ingest.sentry.io/123456789", True],
["https://asdf@abcd1234.ingest.notsentry.io/123456789", False],
],
)
def test_is_sentry_url_true(
test_url, is_sentry_url_expected, mock_client_with_dsn_netloc
):
ret_val = is_sentry_url(mock_client_with_dsn_netloc, test_url)
assert ret_val == is_sentry_url_expected
def test_is_sentry_url_no_client():
test_url = "https://asdf@abcd1234.ingest.sentry.io/123456789"
ret_val = is_sentry_url(None, test_url)
assert not ret_val
@pytest.mark.parametrize(
"error,expected_result",
[
["", lambda x: safe_str(x)],
["some-string", lambda _: "some-string"],
],
)
def test_get_error_message(error, expected_result):
with pytest.raises(BaseException) as exc_value:
exc_value.message = error
raise Exception
assert get_error_message(exc_value) == expected_result(exc_value)
with pytest.raises(BaseException) as exc_value:
exc_value.detail = error
raise Exception
assert get_error_message(exc_value) == expected_result(exc_value)
def test_safe_str_fails():
class ExplodingStr:
def __str__(self):
raise Exception
obj = ExplodingStr()
result = safe_str(obj)
assert result == repr(obj)
def test_installed_modules_caching():
mock_generate_installed_modules = mock.Mock()
mock_generate_installed_modules.return_value = {"package": "1.0.0"}
with mock.patch("sentry_sdk.utils._installed_modules", None):
with mock.patch(
"sentry_sdk.utils._generate_installed_modules",
mock_generate_installed_modules,
):
_get_installed_modules()
assert mock_generate_installed_modules.called
mock_generate_installed_modules.reset_mock()
_get_installed_modules()
mock_generate_installed_modules.assert_not_called()
def test_devnull_inaccessible():
with mock.patch("sentry_sdk.utils.open", side_effect=OSError("oh no")):
revision = get_git_revision()
assert revision is None
def test_devnull_not_found():
with mock.patch("sentry_sdk.utils.open", side_effect=FileNotFoundError("oh no")):
revision = get_git_revision()
assert revision is None
def test_default_release():
release = get_default_release()
assert release is not None
def test_default_release_empty_string():
with mock.patch("sentry_sdk.utils.get_git_revision", return_value=""):
release = get_default_release()
assert release is None
def test_get_default_release_sentry_release_env(monkeypatch):
monkeypatch.setenv("SENTRY_RELEASE", "sentry-env-release")
assert get_default_release() == "sentry-env-release"
def test_get_default_release_other_release_env(monkeypatch):
monkeypatch.setenv("SOURCE_VERSION", "other-env-release")
with mock.patch("sentry_sdk.utils.get_git_revision", return_value=""):
release = get_default_release()
assert release == "other-env-release"
def test_ensure_integration_enabled_integration_enabled(sentry_init):
def original_function():
return "original"
def function_to_patch():
return "patched"
sentry_init(integrations=[TestIntegration()])
# Test the decorator by applying to function_to_patch
patched_function = ensure_integration_enabled(TestIntegration, original_function)(
function_to_patch
)
assert patched_function() == "patched"
assert patched_function.__name__ == "original_function"
def test_ensure_integration_enabled_integration_disabled(sentry_init):
def original_function():
return "original"
def function_to_patch():
return "patched"
sentry_init(integrations=[]) # TestIntegration is disabled
# Test the decorator by applying to function_to_patch
patched_function = ensure_integration_enabled(TestIntegration, original_function)(
function_to_patch
)
assert patched_function() == "original"
assert patched_function.__name__ == "original_function"
def test_ensure_integration_enabled_no_original_function_enabled(sentry_init):
shared_variable = "original"
def function_to_patch():
nonlocal shared_variable
shared_variable = "patched"
sentry_init(integrations=[TestIntegration])
# Test the decorator by applying to function_to_patch
patched_function = ensure_integration_enabled(TestIntegration)(function_to_patch)
patched_function()
assert shared_variable == "patched"
assert patched_function.__name__ == "function_to_patch"
def test_ensure_integration_enabled_no_original_function_disabled(sentry_init):
shared_variable = "original"
def function_to_patch():
nonlocal shared_variable
shared_variable = "patched"
sentry_init(integrations=[])
# Test the decorator by applying to function_to_patch
patched_function = ensure_integration_enabled(TestIntegration)(function_to_patch)
patched_function()
assert shared_variable == "original"
assert patched_function.__name__ == "function_to_patch"
@pytest.mark.parametrize(
"delta,expected_milliseconds",
[
[timedelta(milliseconds=132), 132.0],
[timedelta(hours=1, milliseconds=132), float(60 * 60 * 1000 + 132)],
[timedelta(days=10), float(10 * 24 * 60 * 60 * 1000)],
[timedelta(microseconds=100), 0.1],
],
)
def test_duration_in_milliseconds(delta, expected_milliseconds):
assert delta / timedelta(milliseconds=1) == expected_milliseconds
def test_get_current_thread_meta_explicit_thread():
results = Queue(maxsize=1)
def target1():
pass
def target2():
results.put(get_current_thread_meta(thread1))
thread1 = threading.Thread(target=target1)
thread1.start()
thread2 = threading.Thread(target=target2)
thread2.start()
thread2.join()
thread1.join()
assert (thread1.ident, thread1.name) == results.get(timeout=1)
def test_get_current_thread_meta_bad_explicit_thread():
thread = "fake thread"
main_thread = threading.main_thread()
assert (main_thread.ident, main_thread.name) == get_current_thread_meta(thread)
@pytest.mark.skipif(gevent is None, reason="gevent not enabled")
def test_get_current_thread_meta_gevent_in_thread():
results = Queue(maxsize=1)
def target():
with mock.patch("sentry_sdk.utils.is_gevent", side_effect=[True]):
job = gevent.spawn(get_current_thread_meta)
job.join()
results.put(job.value)
thread = threading.Thread(target=target)
thread.start()
thread.join()
assert (thread.ident, None) == results.get(timeout=1)
@pytest.mark.skipif(gevent is None, reason="gevent not enabled")
def test_get_current_thread_meta_gevent_in_thread_failed_to_get_hub():
results = Queue(maxsize=1)
def target():
with mock.patch("sentry_sdk.utils.is_gevent", side_effect=[True]):
with mock.patch(
"sentry_sdk.utils.get_gevent_hub", side_effect=["fake gevent hub"]
):
job = gevent.spawn(get_current_thread_meta)
job.join()
results.put(job.value)
thread = threading.Thread(target=target)
thread.start()
thread.join()
assert (thread.ident, thread.name) == results.get(timeout=1)
def test_get_current_thread_meta_running_thread():
results = Queue(maxsize=1)
def target():
results.put(get_current_thread_meta())
thread = threading.Thread(target=target)
thread.start()
thread.join()
assert (thread.ident, thread.name) == results.get(timeout=1)
def test_get_current_thread_meta_bad_running_thread():
results = Queue(maxsize=1)
def target():
with mock.patch("threading.current_thread", side_effect=["fake thread"]):
results.put(get_current_thread_meta())
thread = threading.Thread(target=target)
thread.start()
thread.join()
main_thread = threading.main_thread()
assert (main_thread.ident, main_thread.name) == results.get(timeout=1)
def test_get_current_thread_meta_main_thread():
results = Queue(maxsize=1)
def target():
# mock that somehow the current thread doesn't exist
with mock.patch("threading.current_thread", side_effect=[None]):
results.put(get_current_thread_meta())
main_thread = threading.main_thread()
thread = threading.Thread(target=target)
thread.start()
thread.join()
assert (main_thread.ident, main_thread.name) == results.get(timeout=1)
@pytest.mark.skipif(PY38, reason="Flakes a lot on 3.8 in CI.")
def test_get_current_thread_meta_failed_to_get_main_thread():
results = Queue(maxsize=1)
def target():
with mock.patch("threading.current_thread", side_effect=["fake thread"]):
with mock.patch("threading.current_thread", side_effect=["fake thread"]):
results.put(get_current_thread_meta())
main_thread = threading.main_thread()
thread = threading.Thread(target=target)
thread.start()
thread.join()
assert (main_thread.ident, main_thread.name) == results.get(timeout=1)
@pytest.mark.parametrize(
("datetime_object", "expected_output"),
(
(
datetime(2021, 1, 1, tzinfo=timezone.utc),
"2021-01-01T00:00:00.000000Z",
), # UTC time
(
datetime(2021, 1, 1, tzinfo=timezone(timedelta(hours=2))),
"2020-12-31T22:00:00.000000Z",
), # UTC+2 time
(
datetime(2021, 1, 1, tzinfo=timezone(timedelta(hours=-7))),
"2021-01-01T07:00:00.000000Z",
), # UTC-7 time
(
datetime(2021, 2, 3, 4, 56, 7, 890123, tzinfo=timezone.utc),
"2021-02-03T04:56:07.890123Z",
), # UTC time all non-zero fields
),
)
def test_format_timestamp(datetime_object, expected_output):
formatted = format_timestamp(datetime_object)
assert formatted == expected_output
def test_format_timestamp_naive():
datetime_object = datetime(2021, 1, 1)
timestamp_regex = r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{6}Z"
# Ensure that some timestamp is returned, without error. We currently treat these as local time, but this is an
# implementation detail which we should not assert here.
assert re.fullmatch(timestamp_regex, format_timestamp(datetime_object))
def test_qualname_from_function_inner_function():
def test_function(): ...
assert (
sentry_sdk.utils.qualname_from_function(test_function)
== "tests.test_utils.test_qualname_from_function_inner_function.<locals>.test_function"
)
def test_qualname_from_function_none_name():
def test_function(): ...
test_function.__module__ = None
assert (
sentry_sdk.utils.qualname_from_function(test_function)
== "test_qualname_from_function_none_name.<locals>.test_function"
)
def test_to_string_unicode_decode_error():
class BadStr:
def __str__(self):
raise UnicodeDecodeError("utf-8", b"", 0, 1, "reason")
obj = BadStr()
result = to_string(obj)
assert result == repr(obj)[1:-1]
def test_exc_info_from_error_dont_get_an_exc():
class NotAnException:
pass
with pytest.raises(ValueError) as exc:
exc_info_from_error(NotAnException())
assert "Expected Exception object to report, got <class" in str(exc.value)
def test_get_lines_from_file_handle_linecache_errors():
expected_result = ([], None, [])
class Loader:
@staticmethod
def get_source(module):
raise IOError("something went wrong")
result = get_lines_from_file("filename", 10, loader=Loader())
assert result == expected_result
with mock.patch(
"sentry_sdk.utils.linecache.getlines",
side_effect=OSError("something went wrong"),
):
result = get_lines_from_file("filename", 10)
assert result == expected_result
lines = ["line1", "line2", "line3"]
def fake_getlines(filename):
return lines
with mock.patch("sentry_sdk.utils.linecache.getlines", fake_getlines):
result = get_lines_from_file("filename", 10)
assert result == expected_result
def test_package_version_is_none():
assert package_version("non_existent_package") is None
| TestIntegration |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_display_units02.py | {
"start": 315,
"end": 1205
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_display_units02.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [56159232, 61364096]
data = [
[10000000, 20000000, 30000000, 20000000, 10000000],
]
worksheet.write_column(0, 0, data[0])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.set_y_axis({"display_units": "hundreds", "display_units_visible": 0})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | pytorch__pytorch | torch/_VF.py | {
"start": 360,
"end": 664
} | class ____(types.ModuleType):
vf: types.ModuleType
def __init__(self, name: str):
super().__init__(name)
self.vf = torch._C._VariableFunctions
def __getattr__(self, name: str) -> object:
return getattr(self.vf, name)
sys.modules[__name__] = VFModule(__name__)
| VFModule |
python | openai__openai-python | tests/api_resources/conversations/test_items.py | {
"start": 9575,
"end": 19284
} | class ____:
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)
@parametrize
async def test_method_create(self, async_client: AsyncOpenAI) -> None:
item = await async_client.conversations.items.create(
conversation_id="conv_123",
items=[
{
"content": "string",
"role": "user",
"type": "message",
}
],
)
assert_matches_type(ConversationItemList, item, path=["response"])
@parametrize
async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
item = await async_client.conversations.items.create(
conversation_id="conv_123",
items=[
{
"content": "string",
"role": "user",
"type": "message",
}
],
include=["file_search_call.results"],
)
assert_matches_type(ConversationItemList, item, path=["response"])
@parametrize
async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
response = await async_client.conversations.items.with_raw_response.create(
conversation_id="conv_123",
items=[
{
"content": "string",
"role": "user",
"type": "message",
}
],
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
item = response.parse()
assert_matches_type(ConversationItemList, item, path=["response"])
@parametrize
async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
async with async_client.conversations.items.with_streaming_response.create(
conversation_id="conv_123",
items=[
{
"content": "string",
"role": "user",
"type": "message",
}
],
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
item = await response.parse()
assert_matches_type(ConversationItemList, item, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_create(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
await async_client.conversations.items.with_raw_response.create(
conversation_id="",
items=[
{
"content": "string",
"role": "user",
"type": "message",
}
],
)
@parametrize
async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
item = await async_client.conversations.items.retrieve(
item_id="msg_abc",
conversation_id="conv_123",
)
assert_matches_type(ConversationItem, item, path=["response"])
@parametrize
async def test_method_retrieve_with_all_params(self, async_client: AsyncOpenAI) -> None:
item = await async_client.conversations.items.retrieve(
item_id="msg_abc",
conversation_id="conv_123",
include=["file_search_call.results"],
)
assert_matches_type(ConversationItem, item, path=["response"])
@parametrize
async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
response = await async_client.conversations.items.with_raw_response.retrieve(
item_id="msg_abc",
conversation_id="conv_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
item = response.parse()
assert_matches_type(ConversationItem, item, path=["response"])
@parametrize
async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
async with async_client.conversations.items.with_streaming_response.retrieve(
item_id="msg_abc",
conversation_id="conv_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
item = await response.parse()
assert_matches_type(ConversationItem, item, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
await async_client.conversations.items.with_raw_response.retrieve(
item_id="msg_abc",
conversation_id="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `item_id` but received ''"):
await async_client.conversations.items.with_raw_response.retrieve(
item_id="",
conversation_id="conv_123",
)
@parametrize
async def test_method_list(self, async_client: AsyncOpenAI) -> None:
item = await async_client.conversations.items.list(
conversation_id="conv_123",
)
assert_matches_type(AsyncConversationCursorPage[ConversationItem], item, path=["response"])
@parametrize
async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
item = await async_client.conversations.items.list(
conversation_id="conv_123",
after="after",
include=["file_search_call.results"],
limit=0,
order="asc",
)
assert_matches_type(AsyncConversationCursorPage[ConversationItem], item, path=["response"])
@parametrize
async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
response = await async_client.conversations.items.with_raw_response.list(
conversation_id="conv_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
item = response.parse()
assert_matches_type(AsyncConversationCursorPage[ConversationItem], item, path=["response"])
@parametrize
async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
async with async_client.conversations.items.with_streaming_response.list(
conversation_id="conv_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
item = await response.parse()
assert_matches_type(AsyncConversationCursorPage[ConversationItem], item, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_list(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
await async_client.conversations.items.with_raw_response.list(
conversation_id="",
)
@parametrize
async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
item = await async_client.conversations.items.delete(
item_id="msg_abc",
conversation_id="conv_123",
)
assert_matches_type(Conversation, item, path=["response"])
@parametrize
async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
response = await async_client.conversations.items.with_raw_response.delete(
item_id="msg_abc",
conversation_id="conv_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
item = response.parse()
assert_matches_type(Conversation, item, path=["response"])
@parametrize
async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
async with async_client.conversations.items.with_streaming_response.delete(
item_id="msg_abc",
conversation_id="conv_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
item = await response.parse()
assert_matches_type(Conversation, item, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
await async_client.conversations.items.with_raw_response.delete(
item_id="msg_abc",
conversation_id="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `item_id` but received ''"):
await async_client.conversations.items.with_raw_response.delete(
item_id="",
conversation_id="conv_123",
)
| TestAsyncItems |
python | numba__numba | numba/core/errors.py | {
"start": 2569,
"end": 2704
} | class ____(NumbaWarning):
"""
Warning category for an issue with the emission of debug information.
"""
| NumbaDebugInfoWarning |
python | ApeWorX__ape | src/ape/plugins/network.py | {
"start": 2835,
"end": 3676
} | class ____(PluginType):
"""
A plugin for a blockchain explorer, such as
`ape-etherscan <https://github.com/ApeWorX/ape-etherscan>`__.
"""
@hookspec
def explorers( # type: ignore[empty-body]
self,
) -> Iterator[tuple[str, str, type["ExplorerAPI"]]]:
"""
A hook that must return an iterator of tuples of:
* the target ecosystem plugin's name
* the network it works with (which must be valid network in the ecosystem)
* a :class:`~ape.api.explorers.ExplorerAPI` subclass
Usage example::
@plugins.register(plugins.ExplorerPlugin)
def explorers():
yield "ethereum", "mainnet", MyBlockExplorer
Returns:
Iterator[tuple[str, str, type[:class:`ape.api.explorers.ExplorerAPI`]]]
"""
| ExplorerPlugin |
python | tox-dev__tox | src/tox/config/source/tox_ini.py | {
"start": 65,
"end": 193
} | class ____(IniSource):
"""Configuration sourced from a tox.ini file."""
FILENAME = "tox.ini"
__all__ = ("ToxIni",)
| ToxIni |
python | jmcnamara__XlsxWriter | xlsxwriter/test/worksheet/test_merge_range02.py | {
"start": 437,
"end": 5386
} | class ____(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test merged cell range"""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
worksheet.str_table = SharedStringTable()
worksheet.select()
cell_format1 = Format({"xf_index": 1})
cell_format2 = Format({"xf_index": 2})
worksheet.merge_range("B3:C3", "Foo", cell_format1)
worksheet.merge_range("A2:D2", "", cell_format2)
worksheet.select()
worksheet._assemble_xml_file()
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="A2:D3"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<sheetData>
<row r="2" spans="1:4">
<c r="A2" s="2"/>
<c r="B2" s="2"/>
<c r="C2" s="2"/>
<c r="D2" s="2"/>
</row>
<row r="3" spans="1:4">
<c r="B3" s="1" t="s">
<v>0</v>
</c>
<c r="C3" s="1"/>
</row>
</sheetData>
<mergeCells count="2">
<mergeCell ref="B3:C3"/>
<mergeCell ref="A2:D2"/>
</mergeCells>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
def test_assemble_xml_file_write(self):
"""Test writing a worksheet with a blank cell with write() method."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
cell_format = Format({"xf_index": 1})
# No format. Should be ignored.
worksheet.write(0, 0, None)
worksheet.write(1, 2, None, cell_format)
worksheet.select()
worksheet._assemble_xml_file()
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="C2"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<sheetData>
<row r="2" spans="3:3">
<c r="C2" s="1"/>
</row>
</sheetData>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
def test_assemble_xml_file_A1(self):
"""Test writing a worksheet with a blank cell with A1 notation."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
cell_format = Format({"xf_index": 1})
# No format. Should be ignored.
worksheet.write_blank("A1", None)
worksheet.write_blank("C2", None, cell_format)
worksheet.select()
worksheet._assemble_xml_file()
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="C2"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<sheetData>
<row r="2" spans="3:3">
<c r="C2" s="1"/>
</row>
</sheetData>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
| TestAssembleWorksheet |
python | falconry__falcon | falcon/errors.py | {
"start": 3840,
"end": 3947
} | class ____(OSError):
"""The read operation did not find the requested stream delimiter."""
| DelimiterError |
python | getsentry__sentry | tests/sentry/api/serializers/test_organization.py | {
"start": 1857,
"end": 5612
} | class ____(TestCase):
def test_simple(self) -> None:
user = self.create_user()
organization = self.create_organization(owner=user)
result = serialize(organization, user)
assert result["id"] == str(organization.id)
assert result["features"] == [
"advanced-search",
"change-alerts",
"crash-rate-alerts",
"custom-symbol-sources",
"dashboards-basic",
"dashboards-edit",
"data-forwarding",
"discover-basic",
"discover-query",
"event-attachments",
"insight-modules",
"integrations-alert-rule",
"integrations-chat-unfurl",
"integrations-codeowners",
"integrations-deployment",
"integrations-enterprise-alert-rule",
"integrations-enterprise-incident-management",
"integrations-event-hooks",
"integrations-incident-management",
"integrations-issue-basic",
"integrations-issue-sync",
"integrations-scm-multi-org",
"integrations-stacktrace-link",
"integrations-ticket-rules",
"integrations-vercel",
"invite-members",
"minute-resolution-sessions",
"open-membership",
"project-creation-games-tab",
"relay",
"session-replay-ui",
"shared-issues",
"sso-basic",
"sso-saml2",
"symbol-sources",
"team-insights",
"team-roles",
"uptime",
]
@mock.patch("sentry.features.batch_has")
def test_organization_batch_has(self, mock_batch: mock.MagicMock) -> None:
user = self.create_user()
organization = self.create_organization(owner=user)
features.add("organizations:test-feature", OrganizationFeature, api_expose=True)
features.add("organizations:disabled-feature", OrganizationFeature, api_expose=True)
mock_batch.return_value = {
f"organization:{organization.id}": {
"organizations:test-feature": True,
"organizations:disabled-feature": False,
}
}
result = serialize(organization, user)
assert "test-feature" in result["features"]
assert "disabled-feature" not in result["features"]
@mock.patch.dict(ORGANIZATION_OPTIONS_AS_FEATURES, mock_options_as_features)
def test_organization_options_as_features(self) -> None:
user = self.create_user()
organization = self.create_organization(owner=user)
OrganizationOption.objects.set_value(organization, "sentry:set_no_value", {})
OrganizationOption.objects.set_value(organization, "sentry:set_with_func_pass", 1)
OrganizationOption.objects.set_value(organization, "sentry:set_with_func_fail", 0)
features = serialize(organization, user)["features"]
# Setting a flag with no function checks for option, regardless of value
for feature, _func in mock_options_as_features["sentry:set_no_value"]:
assert feature in features
# If the option isn't set, it doesn't appear in features
for feature, _func in mock_options_as_features["sentry:unset_no_value"]:
assert feature not in features
# With a function, run it against the value
for feature, _func in mock_options_as_features["sentry:set_with_func_pass"]:
assert feature in features
# If it returns False, it doesn't appear in features
for feature, _func in mock_options_as_features["sentry:set_with_func_fail"]:
assert feature not in features
| OrganizationSerializerTest |
python | sympy__sympy | sympy/tensor/array/expressions/array_expressions.py | {
"start": 6895,
"end": 11018
} | class ____(_CodegenArrayAbstract):
r"""
Class to represent the tensor product of array-like objects.
"""
def __new__(cls, *args, **kwargs):
args = [_sympify(arg) for arg in args]
canonicalize = kwargs.pop("canonicalize", False)
ranks = [get_rank(arg) for arg in args]
obj = Basic.__new__(cls, *args)
obj._subranks = ranks
shapes = [get_shape(i) for i in args]
if any(i is None for i in shapes):
obj._shape = None
else:
obj._shape = tuple(j for i in shapes for j in i)
if canonicalize:
return obj._canonicalize()
return obj
def _canonicalize(self):
args = self.args
args = self._flatten(args)
ranks = [get_rank(arg) for arg in args]
# Check if there are nested permutation and lift them up:
permutation_cycles = []
for i, arg in enumerate(args):
if not isinstance(arg, PermuteDims):
continue
permutation_cycles.extend([[k + sum(ranks[:i]) for k in j] for j in arg.permutation.cyclic_form])
args[i] = arg.expr
if permutation_cycles:
return _permute_dims(_array_tensor_product(*args), Permutation(sum(ranks)-1)*Permutation(permutation_cycles))
if len(args) == 1:
return args[0]
# If any object is a ZeroArray, return a ZeroArray:
if any(isinstance(arg, (ZeroArray, ZeroMatrix)) for arg in args):
shapes = reduce(operator.add, [get_shape(i) for i in args], ())
return ZeroArray(*shapes)
# If there are contraction objects inside, transform the whole
# expression into `ArrayContraction`:
contractions = {i: arg for i, arg in enumerate(args) if isinstance(arg, ArrayContraction)}
if contractions:
ranks = [_get_subrank(arg) if isinstance(arg, ArrayContraction) else get_rank(arg) for arg in args]
cumulative_ranks = list(accumulate([0] + ranks))[:-1]
tp = _array_tensor_product(*[arg.expr if isinstance(arg, ArrayContraction) else arg for arg in args])
contraction_indices = [tuple(cumulative_ranks[i] + k for k in j) for i, arg in contractions.items() for j in arg.contraction_indices]
return _array_contraction(tp, *contraction_indices)
diagonals = {i: arg for i, arg in enumerate(args) if isinstance(arg, ArrayDiagonal)}
if diagonals:
inverse_permutation = []
last_perm = []
ranks = [get_rank(arg) for arg in args]
cumulative_ranks = list(accumulate([0] + ranks))[:-1]
for i, arg in enumerate(args):
if isinstance(arg, ArrayDiagonal):
i1 = get_rank(arg) - len(arg.diagonal_indices)
i2 = len(arg.diagonal_indices)
inverse_permutation.extend([cumulative_ranks[i] + j for j in range(i1)])
last_perm.extend([cumulative_ranks[i] + j for j in range(i1, i1 + i2)])
else:
inverse_permutation.extend([cumulative_ranks[i] + j for j in range(get_rank(arg))])
inverse_permutation.extend(last_perm)
tp = _array_tensor_product(*[arg.expr if isinstance(arg, ArrayDiagonal) else arg for arg in args])
ranks2 = [_get_subrank(arg) if isinstance(arg, ArrayDiagonal) else get_rank(arg) for arg in args]
cumulative_ranks2 = list(accumulate([0] + ranks2))[:-1]
diagonal_indices = [tuple(cumulative_ranks2[i] + k for k in j) for i, arg in diagonals.items() for j in arg.diagonal_indices]
return _permute_dims(_array_diagonal(tp, *diagonal_indices), _af_invert(inverse_permutation))
return self.func(*args, canonicalize=False)
@classmethod
def _flatten(cls, args):
args = [i for arg in args for i in (arg.args if isinstance(arg, cls) else [arg])]
return args
def as_explicit(self):
return tensorproduct(*[arg.as_explicit() if hasattr(arg, "as_explicit") else arg for arg in self.args])
| ArrayTensorProduct |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/callbackProtocol2.py | {
"start": 504,
"end": 617
} | class ____:
def __call__(self, inputs: Any) -> int:
return 5
g2: MyCallable[int, int] = Class2()
| Class2 |
python | PyCQA__pylint | doc/data/messages/i/invalid-enum-extension/good.py | {
"start": 75,
"end": 138
} | class ____(Enum):
ORANGE = 1
CHERRY = 2
APPLE = 3
| Fruit |
python | fabric__fabric | fabric/connection.py | {
"start": 1408,
"end": 45993
} | class ____(Context):
"""
A connection to an SSH daemon, with methods for commands and file transfer.
**Basics**
This class inherits from Invoke's `~invoke.context.Context`, as it is a
context within which commands, tasks etc can operate. It also encapsulates
a Paramiko `~paramiko.client.SSHClient` instance, performing useful high
level operations with that `~paramiko.client.SSHClient` and
`~paramiko.channel.Channel` instances generated from it.
.. _connect_kwargs:
.. note::
Many SSH specific options -- such as specifying private keys and
passphrases, timeouts, disabling SSH agents, etc -- are handled
directly by Paramiko and should be specified via the
:ref:`connect_kwargs argument <connect_kwargs-arg>` of the constructor.
**Lifecycle**
`.Connection` has a basic "`create <__init__>`, `connect/open <open>`, `do
work <run>`, `disconnect/close <close>`" lifecycle:
- `Instantiation <__init__>` imprints the object with its connection
parameters (but does **not** actually initiate the network connection).
- An alternate constructor exists for users :ref:`upgrading piecemeal
from Fabric 1 <from-v1>`: `from_v1`
- Methods like `run`, `get` etc automatically trigger a call to
`open` if the connection is not active; users may of course call `open`
manually if desired.
- It's best to explicitly close your connections when done using them. This
can be accomplished by manually calling `close`, or by using the object
as a contextmanager::
with Connection('host') as c:
c.run('command')
c.put('file')
.. warning::
While Fabric (and Paramiko) attempt to register connections for
automatic garbage collection, it's not currently safe to rely on that
feature, as it can lead to end-of-process hangs and similar behavior.
.. note::
This class rebinds `invoke.context.Context.run` to `.local` so both
remote and local command execution can coexist.
**Configuration**
Most `.Connection` parameters honor :doc:`Invoke-style configuration
</concepts/configuration>` as well as any applicable :ref:`SSH config file
directives <connection-ssh-config>`. For example, to end up with a
connection to ``admin@myhost``, one could:
- Use any built-in config mechanism, such as ``/etc/fabric.yml``,
``~/.fabric.json``, collection-driven configuration, env vars, etc,
stating ``user: admin`` (or ``{"user": "admin"}``, depending on config
format.) Then ``Connection('myhost')`` would implicitly have a ``user``
of ``admin``.
- Use an SSH config file containing ``User admin`` within any applicable
``Host`` header (``Host myhost``, ``Host *``, etc.) Again,
``Connection('myhost')`` will default to an ``admin`` user.
- Leverage host-parameter shorthand (described in `.Config.__init__`), i.e.
``Connection('admin@myhost')``.
- Give the parameter directly: ``Connection('myhost', user='admin')``.
The same applies to agent forwarding, gateways, and so forth.
.. versionadded:: 2.0
"""
# NOTE: these are initialized here to hint to invoke.Config.__setattr__
# that they should be treated as real attributes instead of config proxies.
# (Additionally, we're doing this instead of using invoke.Config._set() so
# we can take advantage of Sphinx's attribute-doc-comment static analysis.)
# Once an instance is created, these values will usually be non-None
# because they default to the default config values.
host = None
original_host = None
user = None
port = None
ssh_config = None
gateway = None
forward_agent = None
connect_timeout = None
connect_kwargs = None
client = None
transport = None
_sftp = None
_agent_handler = None
@classmethod
def from_v1(cls, env, **kwargs):
"""
Alternate constructor which uses Fabric 1's ``env`` dict for settings.
All keyword arguments besides ``env`` are passed unmolested into the
primary constructor.
.. warning::
Because your own config overrides will win over data from ``env``,
make sure you only set values you *intend* to change from your v1
environment!
For details on exactly which ``env`` vars are imported and what they
become in the new API, please see :ref:`v1-env-var-imports`.
:param env:
An explicit Fabric 1 ``env`` dict (technically, any
``fabric.utils._AttributeDict`` instance should work) to pull
configuration from.
.. versionadded:: 2.4
"""
# TODO: import fabric.state.env (need good way to test it first...)
# TODO: how to handle somebody accidentally calling this in a process
# where 'fabric' is fabric 2, and there's no fabric 1? Probably just a
# re-raise of ImportError??
# Our only requirement is a non-empty host_string
if not env.host_string:
raise InvalidV1Env(
"Supplied v1 env has an empty `host_string` value! Please make sure you're calling Connection.from_v1 within a connected Fabric 1 session." # noqa
)
# TODO: detect collisions with kwargs & except instead of overwriting?
# (More Zen of Python compliant, but also, effort, and also, makes it
# harder for users to intentionally overwrite!)
connect_kwargs = kwargs.setdefault("connect_kwargs", {})
kwargs.setdefault("host", env.host_string)
shorthand = derive_shorthand(env.host_string)
# TODO: don't we need to do the below skipping for user too?
kwargs.setdefault("user", env.user)
# Skip port if host string seemed to have it; otherwise we hit our own
# ambiguity clause in __init__. v1 would also have been doing this
# anyways (host string wins over other settings).
if not shorthand["port"]:
# Run port through int(); v1 inexplicably has a string default...
kwargs.setdefault("port", int(env.port))
# key_filename defaults to None in v1, but in v2, we expect it to be
# either unset, or set to a list. Thus, we only pull it over if it is
# not None.
if env.key_filename is not None:
connect_kwargs.setdefault("key_filename", env.key_filename)
# Obtain config values, if not given, from its own from_v1
# NOTE: not using setdefault as we truly only want to call
# Config.from_v1 when necessary.
if "config" not in kwargs:
kwargs["config"] = Config.from_v1(env)
return cls(**kwargs)
# TODO: should "reopening" an existing Connection object that has been
# closed, be allowed? (See e.g. how v1 detects closed/semi-closed
# connections & nukes them before creating a new client to the same host.)
# TODO: push some of this into paramiko.client.Client? e.g. expand what
# Client.exec_command does, it already allows configuring a subset of what
# we do / will eventually do / did in 1.x. It's silly to have to do
# .get_transport().open_session().
def __init__(
self,
host,
user=None,
port=None,
config=None,
gateway=None,
forward_agent=None,
connect_timeout=None,
connect_kwargs=None,
inline_ssh_env=None,
):
"""
Set up a new object representing a server connection.
:param str host:
the hostname (or IP address) of this connection.
May include shorthand for the ``user`` and/or ``port`` parameters,
of the form ``user@host``, ``host:port``, or ``user@host:port``.
.. note::
Due to ambiguity, IPv6 host addresses are incompatible with the
``host:port`` shorthand (though ``user@host`` will still work
OK). In other words, the presence of >1 ``:`` character will
prevent any attempt to derive a shorthand port number; use the
explicit ``port`` parameter instead.
.. note::
If ``host`` matches a ``Host`` clause in loaded SSH config
data, and that ``Host`` clause contains a ``Hostname``
directive, the resulting `.Connection` object will behave as if
``host`` is equal to that ``Hostname`` value.
In all cases, the original value of ``host`` is preserved as
the ``original_host`` attribute.
Thus, given SSH config like so::
Host myalias
Hostname realhostname
a call like ``Connection(host='myalias')`` will result in an
object whose ``host`` attribute is ``realhostname``, and whose
``original_host`` attribute is ``myalias``.
:param str user:
the login user for the remote connection. Defaults to
``config.user``.
:param int port:
the remote port. Defaults to ``config.port``.
:param config:
configuration settings to use when executing methods on this
`.Connection` (e.g. default SSH port and so forth).
Should be a `.Config` or an `invoke.config.Config`
(which will be turned into a `.Config`).
Default is an anonymous `.Config` object.
:param gateway:
An object to use as a proxy or gateway for this connection.
This parameter accepts one of the following:
- another `.Connection` (for a ``ProxyJump`` style gateway);
- a shell command string (for a ``ProxyCommand`` style style
gateway).
Default: ``None``, meaning no gatewaying will occur (unless
otherwise configured; if one wants to override a configured gateway
at runtime, specify ``gateway=False``.)
.. seealso:: :ref:`ssh-gateways`
:param bool forward_agent:
Whether to enable SSH agent forwarding.
Default: ``config.forward_agent``.
:param int connect_timeout:
Connection timeout, in seconds.
Default: ``config.timeouts.connect``.
:param dict connect_kwargs:
.. _connect_kwargs-arg:
Keyword arguments handed verbatim to
`SSHClient.connect <paramiko.client.SSHClient.connect>` (when
`.open` is called).
`.Connection` tries not to grow additional settings/kwargs of its
own unless it is adding value of some kind; thus,
``connect_kwargs`` is currently the right place to hand in paramiko
connection parameters such as ``pkey`` or ``key_filename``. For
example::
c = Connection(
host="hostname",
user="admin",
connect_kwargs={
"key_filename": "/home/myuser/.ssh/private.key",
},
)
Default: ``config.connect_kwargs``.
:param bool inline_ssh_env:
Whether to send environment variables "inline" as prefixes in front
of command strings (``export VARNAME=value && mycommand here``;
this is the default behavior), or submit them through the SSH
protocol itself.
In Fabric 2.x this defaulted to ``False`` (try using the protocol
behavior), but in 3.x it changed to ``True`` due to the simple fact
that most remote servers are deployed with a restricted
``AcceptEnv`` setting, making use of the protocol approach
non-viable.
The actual default value is the value of the ``inline_ssh_env``
:ref:`configuration value <default-values>` (which, as above,
currently defaults to ``True``).
.. warning::
This functionality does **not** currently perform any shell
escaping on your behalf! Be careful when using nontrivial
values, and note that you can put in your own quoting,
backslashing etc if desired.
Consider using a different approach (such as actual
remote shell scripts) if you run into too many issues here.
.. note::
When serializing into prefixed ``FOO=bar`` format, we apply the
builtin `sorted` function to the env dictionary's keys, to
remove what would otherwise be ambiguous/arbitrary ordering.
.. note::
This setting has no bearing on *local* shell commands; it only
affects remote commands, and thus, methods like `.run` and
`.sudo`.
:raises ValueError:
if user or port values are given via both ``host`` shorthand *and*
their own arguments. (We `refuse the temptation to guess`_).
.. _refuse the temptation to guess:
http://zen-of-python.info/
in-the-face-of-ambiguity-refuse-the-temptation-to-guess.html#12
.. versionchanged:: 2.3
Added the ``inline_ssh_env`` parameter.
.. versionchanged:: 3.0
``inline_ssh_env`` still defaults to the config value, but said
config value has now changed and defaults to ``True``, not
``False``.
"""
# NOTE: parent __init__ sets self._config; for now we simply overwrite
# that below. If it's somehow problematic we would want to break parent
# __init__ up in a manner that is more cleanly overrideable.
super().__init__(config=config)
#: The .Config object referenced when handling default values (for e.g.
#: user or port, when not explicitly given) or deciding how to behave.
if config is None:
config = Config()
# Handle 'vanilla' Invoke config objects, which need cloning 'into' one
# of our own Configs (which grants the new defaults, etc, while not
# squashing them if the Invoke-level config already accounted for them)
elif not isinstance(config, Config):
config = config.clone(into=Config)
self._set(_config=config)
# TODO: when/how to run load_files, merge, load_shell_env, etc?
# TODO: i.e. what is the lib use case here (and honestly in invoke too)
shorthand = self.derive_shorthand(host)
host = shorthand["host"]
err = "You supplied the {} via both shorthand and kwarg! Please pick one." # noqa
if shorthand["user"] is not None:
if user is not None:
raise ValueError(err.format("user"))
user = shorthand["user"]
if shorthand["port"] is not None:
if port is not None:
raise ValueError(err.format("port"))
port = shorthand["port"]
# NOTE: we load SSH config data as early as possible as it has
# potential to affect nearly every other attribute.
#: The per-host SSH config data, if any. (See :ref:`ssh-config`.)
self.ssh_config = self.config.base_ssh_config.lookup(host)
self.original_host = host
#: The hostname of the target server.
self.host = host
if "hostname" in self.ssh_config:
# TODO: log that this occurred?
self.host = self.ssh_config["hostname"]
#: The username this connection will use to connect to the remote end.
self.user = user or self.ssh_config.get("user", self.config.user)
# TODO: is it _ever_ possible to give an empty user value (e.g.
# user='')? E.g. do some SSH server specs allow for that?
#: The network port to connect on.
self.port = port or int(self.ssh_config.get("port", self.config.port))
# Gateway/proxy/bastion/jump setting: non-None values - string,
# Connection, even eg False - get set directly; None triggers seek in
# config/ssh_config
#: The gateway `.Connection` or ``ProxyCommand`` string to be used,
#: if any.
self.gateway = gateway if gateway is not None else self.get_gateway()
# NOTE: we use string above, vs ProxyCommand obj, to avoid spinning up
# the ProxyCommand subprocess at init time, vs open() time.
# TODO: make paramiko.proxy.ProxyCommand lazy instead?
if forward_agent is None:
# Default to config...
forward_agent = self.config.forward_agent
# But if ssh_config is present, it wins
if "forwardagent" in self.ssh_config:
# TODO: SSHConfig really, seriously needs some love here, god
map_ = {"yes": True, "no": False}
forward_agent = map_[self.ssh_config["forwardagent"]]
#: Whether agent forwarding is enabled.
self.forward_agent = forward_agent
if connect_timeout is None:
connect_timeout = self.ssh_config.get(
"connecttimeout", self.config.timeouts.connect
)
if connect_timeout is not None:
connect_timeout = int(connect_timeout)
#: Connection timeout
self.connect_timeout = connect_timeout
#: Keyword arguments given to `paramiko.client.SSHClient.connect` when
#: `open` is called.
self.connect_kwargs = self.resolve_connect_kwargs(connect_kwargs)
#: The `paramiko.client.SSHClient` instance this connection wraps.
client = SSHClient()
client.set_missing_host_key_policy(AutoAddPolicy())
self.client = client
#: A convenience handle onto the return value of
#: ``self.client.get_transport()`` (after connection time).
self.transport = None
if inline_ssh_env is None:
inline_ssh_env = self.config.inline_ssh_env
#: Whether to construct remote command lines with env vars prefixed
#: inline.
self.inline_ssh_env = inline_ssh_env
def resolve_connect_kwargs(self, connect_kwargs):
# TODO: is it better to pre-empt conflicts w/ manually-handled
# connect() kwargs (hostname, username, etc) here or in open()? We're
# doing open() for now in case e.g. someone manually modifies
# .connect_kwargs attributewise, but otherwise it feels better to do it
# early instead of late.
constructor_kwargs = connect_kwargs or {}
config_kwargs = self.config.connect_kwargs
constructor_keys = constructor_kwargs.get("key_filename", [])
config_keys = config_kwargs.get("key_filename", [])
ssh_config_keys = self.ssh_config.get("identityfile", [])
# Default data: constructor if given, config otherwise
final_kwargs = constructor_kwargs or config_kwargs
# Key filename: merge, in order, config (which includes CLI flags),
# then constructor kwargs, and finally SSH config file data.
# Make sure all are normalized to list as well!
final_keys = []
for value in (config_keys, constructor_keys, ssh_config_keys):
if isinstance(value, str):
value = [value]
final_keys.extend(value)
# Only populate if non-empty.
if final_keys:
final_kwargs["key_filename"] = final_keys
return final_kwargs
def get_gateway(self):
# SSH config wins over Invoke-style config
if "proxyjump" in self.ssh_config:
# Reverse hop1,hop2,hop3 style ProxyJump directive so we start
# with the final (itself non-gatewayed) hop and work up to
# the front (actual, supplied as our own gateway) hop
hops = reversed(self.ssh_config["proxyjump"].split(","))
prev_gw = None
for hop in hops:
# Short-circuit if we appear to be our own proxy, which would
# be a RecursionError. Implies SSH config wildcards.
# TODO: in an ideal world we'd check user/port too in case they
# differ, but...seriously? They can file a PR with those extra
# half dozen test cases in play, E_NOTIME
if self.derive_shorthand(hop)["host"] == self.host:
return None
# Happily, ProxyJump uses identical format to our host
# shorthand...
kwargs = dict(config=self.config.clone())
if prev_gw is not None:
kwargs["gateway"] = prev_gw
cxn = Connection(hop, **kwargs)
prev_gw = cxn
return prev_gw
elif "proxycommand" in self.ssh_config:
# Just a string, which we interpret as a proxy command..
return self.ssh_config["proxycommand"]
# Fallback: config value (may be None).
return self.config.gateway
def __repr__(self):
# Host comes first as it's the most common differentiator by far
bits = [("host", self.host)]
# TODO: maybe always show user regardless? Explicit is good...
if self.user != self.config.user:
bits.append(("user", self.user))
# TODO: harder to make case for 'always show port'; maybe if it's
# non-22 (even if config has overridden the local default)?
if self.port != self.config.port:
bits.append(("port", self.port))
# NOTE: sometimes self.gateway may be eg False if someone wants to
# explicitly override a configured non-None value (as otherwise it's
# impossible for __init__ to tell if a None means "nothing given" or
# "seriously please no gatewaying". So, this must always be a vanilla
# truth test and not eg "is not None".
if self.gateway:
# Displaying type because gw params would probs be too verbose
val = "proxyjump"
if isinstance(self.gateway, str):
val = "proxycommand"
bits.append(("gw", val))
return "<Connection {}>".format(
" ".join("{}={}".format(*x) for x in bits)
)
def _identity(self):
# TODO: consider including gateway and maybe even other init kwargs?
# Whether two cxns w/ same user/host/port but different
# gateway/keys/etc, should be considered "the same", is unclear.
return (self.host, self.user, self.port)
def __eq__(self, other):
if not isinstance(other, Connection):
return False
return self._identity() == other._identity()
def __lt__(self, other):
return self._identity() < other._identity()
def __hash__(self):
# NOTE: this departs from Context/DataProxy, which is not usefully
# hashable.
return hash(self._identity())
def derive_shorthand(self, host_string):
# NOTE: used to be defined inline; preserving API call for both
# backwards compatibility and because it seems plausible we may want to
# modify behavior later, using eg config or other attributes.
return derive_shorthand(host_string)
@property
def is_connected(self):
"""
Whether or not this connection is actually open.
.. versionadded:: 2.0
"""
return self.transport.active if self.transport else False
def open(self):
"""
Initiate an SSH connection to the host/port this object is bound to.
This may include activating the configured gateway connection, if one
is set.
Also saves a handle to the now-set Transport object for easier access.
Various connect-time settings (and/or their corresponding :ref:`SSH
config options <ssh-config>`) are utilized here in the call to
`SSHClient.connect <paramiko.client.SSHClient.connect>`. (For details,
see :doc:`the configuration docs </concepts/configuration>`.)
:returns:
The result of the internal call to `.SSHClient.connect`, if
performing an initial connection; ``None`` otherwise.
.. versionadded:: 2.0
.. versionchanged:: 3.1
Now returns the inner Paramiko connect call's return value instead
of always returning the implicit ``None``.
"""
# Short-circuit
if self.is_connected:
return
err = "Refusing to be ambiguous: connect() kwarg '{}' was given both via regular arg and via connect_kwargs!" # noqa
# These may not be given, period
for key in """
hostname
port
username
""".split():
if key in self.connect_kwargs:
raise ValueError(err.format(key))
# These may be given one way or the other, but not both
if (
"timeout" in self.connect_kwargs
and self.connect_timeout is not None
):
raise ValueError(err.format("timeout"))
# No conflicts -> merge 'em together
kwargs = dict(
self.connect_kwargs,
username=self.user,
hostname=self.host,
port=self.port,
)
if self.gateway:
kwargs["sock"] = self.open_gateway()
if self.connect_timeout:
kwargs["timeout"] = self.connect_timeout
# Strip out empty defaults for less noisy debugging
if "key_filename" in kwargs and not kwargs["key_filename"]:
del kwargs["key_filename"]
auth_strategy_class = self.authentication.strategy_class
if auth_strategy_class is not None:
# Pop connect_kwargs related to auth to avoid giving Paramiko
# conflicting signals.
for key in (
"allow_agent",
"key_filename",
"look_for_keys",
"passphrase",
"password",
"pkey",
"username",
):
kwargs.pop(key, None)
kwargs["auth_strategy"] = auth_strategy_class(
ssh_config=self.ssh_config,
fabric_config=self.config,
username=self.user,
)
# Actually connect!
result = self.client.connect(**kwargs)
self.transport = self.client.get_transport()
return result
def open_gateway(self):
"""
Obtain a socket-like object from `gateway`.
:returns:
A ``direct-tcpip`` `paramiko.channel.Channel`, if `gateway` was a
`.Connection`; or a `~paramiko.proxy.ProxyCommand`, if `gateway`
was a string.
.. versionadded:: 2.0
"""
# ProxyCommand is faster to set up, so do it first.
if isinstance(self.gateway, str):
# Leverage a dummy SSHConfig to ensure %h/%p/etc are parsed.
# TODO: use real SSH config once loading one properly is
# implemented.
ssh_conf = SSHConfig()
dummy = "Host {}\n ProxyCommand {}"
ssh_conf.parse(StringIO(dummy.format(self.host, self.gateway)))
return ProxyCommand(ssh_conf.lookup(self.host)["proxycommand"])
# Handle inner-Connection gateway type here.
# TODO: logging
self.gateway.open()
# TODO: expose the opened channel itself as an attribute? (another
# possible argument for separating the two gateway types...) e.g. if
# someone wanted to piggyback on it for other same-interpreter socket
# needs...
# TODO: and the inverse? allow users to supply their own socket/like
# object they got via $WHEREEVER?
# TODO: how best to expose timeout param? reuse general connection
# timeout from config?
return self.gateway.transport.open_channel(
kind="direct-tcpip",
dest_addr=(self.host, int(self.port)),
# NOTE: src_addr needs to be 'empty but not None' values to
# correctly encode into a network message. Theoretically Paramiko
# could auto-interpret None sometime & save us the trouble.
src_addr=("", 0),
)
def close(self):
"""
Terminate the network connection to the remote end, if open.
If any SFTP sessions are open, they will also be closed.
If no connection or SFTP session is open, this method does nothing.
.. versionadded:: 2.0
.. versionchanged:: 3.0
Now closes SFTP sessions too (2.x required manually doing so).
"""
if self._sftp is not None:
self._sftp.close()
self._sftp = None
if self.is_connected:
self.client.close()
if self.forward_agent and self._agent_handler is not None:
self._agent_handler.close()
def __enter__(self):
return self
def __exit__(self, *exc):
self.close()
@opens
def create_session(self):
channel = self.transport.open_session()
if self.forward_agent:
self._agent_handler = AgentRequestHandler(channel)
return channel
def _remote_runner(self):
return self.config.runners.remote(
context=self, inline_env=self.inline_ssh_env
)
@opens
def run(self, command, **kwargs):
"""
Execute a shell command on the remote end of this connection.
This method wraps an SSH-capable implementation of
`invoke.runners.Runner.run`; see its documentation for details.
.. warning::
There are a few spots where Fabric departs from Invoke's default
settings/behaviors; they are documented under
`.Config.global_defaults`.
.. versionadded:: 2.0
"""
return self._run(self._remote_runner(), command, **kwargs)
@opens
def sudo(self, command, **kwargs):
"""
Execute a shell command, via ``sudo``, on the remote end.
This method is identical to `invoke.context.Context.sudo` in every way,
except in that -- like `run` -- it honors per-host/per-connection
configuration overrides in addition to the generic/global ones. Thus,
for example, per-host sudo passwords may be configured.
.. versionadded:: 2.0
"""
return self._sudo(self._remote_runner(), command, **kwargs)
@opens
def shell(self, **kwargs):
"""
Run an interactive login shell on the remote end, as with ``ssh``.
This method is intended strictly for use cases where you can't know
what remote shell to invoke, or are connecting to a non-POSIX-server
environment such as a network appliance or other custom SSH server.
Nearly every other use case, including interactively-focused ones, will
be better served by using `run` plus an explicit remote shell command
(eg ``bash``).
`shell` has the following differences in behavior from `run`:
- It still returns a `~invoke.runners.Result` instance, but the object
will have a less useful set of attributes than with `run` or `local`:
- ``command`` will be ``None``, as there is no such input argument.
- ``stdout`` will contain a full record of the session, including
all interactive input, as that is echoed back to the user. This
can be useful for logging but is much less so for doing
programmatic things after the method returns.
- ``stderr`` will always be empty (same as `run` when
``pty==True``).
- ``pty`` will always be True (because one was automatically used).
- ``exited`` and similar attributes will only reflect the overall
session, which may vary by shell or appliance but often has no
useful relationship with the internally executed commands' exit
codes.
- This method behaves as if ``warn`` is set to ``True``: even if the
remote shell exits uncleanly, no exception will be raised.
- A pty is always allocated remotely, as with ``pty=True`` under `run`.
- The ``inline_env`` setting is ignored, as there is no default shell
command to add the parameters to (and no guarantee the remote end
even is a shell!)
It supports **only** the following kwargs, which behave identically to
their counterparts in `run` unless otherwise stated:
- ``encoding``
- ``env``
- ``in_stream`` (useful in niche cases, but make sure regular `run`
with this argument isn't more suitable!)
- ``replace_env``
- ``watchers`` (note that due to pty echoing your stdin back to stdout,
a watcher will see your input as well as program stdout!)
Those keyword arguments also honor the ``run.*`` configuration tree, as
in `run`/`sudo`.
:returns: `~invoke.runners.Result`
:raises:
`~invoke.exceptions.ThreadException` (if the background I/O threads
encountered exceptions other than
`~invoke.exceptions.WatcherError`).
.. versionadded:: 2.7
"""
runner = self.config.runners.remote_shell(context=self)
# Reinstate most defaults as explicit kwargs to ensure user's config
# doesn't make this mode break horribly. Then override a few that need
# to change, like pty.
allowed = ("encoding", "env", "in_stream", "replace_env", "watchers")
new_kwargs = {}
for key, value in self.config.global_defaults()["run"].items():
if key in allowed:
# Use allowed kwargs if given, otherwise also fill them from
# defaults
new_kwargs[key] = kwargs.pop(key, self.config.run[key])
else:
new_kwargs[key] = value
new_kwargs.update(pty=True)
# At this point, any leftover kwargs would be ignored, so yell instead
if kwargs:
err = "shell() got unexpected keyword arguments: {!r}"
raise TypeError(err.format(list(kwargs.keys())))
return runner.run(command=None, **new_kwargs)
def local(self, *args, **kwargs):
"""
Execute a shell command on the local system.
This method is effectively a wrapper of `invoke.run`; see its docs for
details and call signature.
.. versionadded:: 2.0
"""
# Superclass run() uses runners.local, so we can literally just call it
# straight.
return super().run(*args, **kwargs)
@opens
def sftp(self):
"""
Return a `~paramiko.sftp_client.SFTPClient` object.
If called more than one time, memoizes the first result; thus, any
given `.Connection` instance will only ever have a single SFTP client,
and state (such as that managed by
`~paramiko.sftp_client.SFTPClient.chdir`) will be preserved.
.. versionadded:: 2.0
"""
if self._sftp is None:
self._sftp = self.client.open_sftp()
return self._sftp
def get(self, *args, **kwargs):
"""
Get a remote file to the local filesystem or file-like object.
Simply a wrapper for `.Transfer.get`. Please see its documentation for
all details.
.. versionadded:: 2.0
"""
return Transfer(self).get(*args, **kwargs)
def put(self, *args, **kwargs):
"""
Put a local file (or file-like object) to the remote filesystem.
Simply a wrapper for `.Transfer.put`. Please see its documentation for
all details.
.. versionadded:: 2.0
"""
return Transfer(self).put(*args, **kwargs)
# TODO: yield the socket for advanced users? Other advanced use cases
# (perhaps factor out socket creation itself)?
# TODO: probably push some of this down into Paramiko
@contextmanager
@opens
def forward_local(
self,
local_port,
remote_port=None,
remote_host="localhost",
local_host="localhost",
):
"""
Open a tunnel connecting ``local_port`` to the server's environment.
For example, say you want to connect to a remote PostgreSQL database
which is locked down and only accessible via the system it's running
on. You have SSH access to this server, so you can temporarily make
port 5432 on your local system act like port 5432 on the server::
import psycopg2
from fabric import Connection
with Connection('my-db-server').forward_local(5432):
db = psycopg2.connect(
host='localhost', port=5432, database='mydb'
)
# Do things with 'db' here
This method is analogous to using the ``-L`` option of OpenSSH's
``ssh`` program.
:param int local_port: The local port number on which to listen.
:param int remote_port:
The remote port number. Defaults to the same value as
``local_port``.
:param str local_host:
The local hostname/interface on which to listen. Default:
``localhost``.
:param str remote_host:
The remote hostname serving the forwarded remote port. Default:
``localhost`` (i.e., the host this `.Connection` is connected to.)
:returns:
Nothing; this method is only useful as a context manager affecting
local operating system state.
.. versionadded:: 2.0
"""
if not remote_port:
remote_port = local_port
# TunnelManager does all of the work, sitting in the background (so we
# can yield) and spawning threads every time somebody connects to our
# local port.
finished = Event()
manager = TunnelManager(
local_port=local_port,
local_host=local_host,
remote_port=remote_port,
remote_host=remote_host,
# TODO: not a huge fan of handing in our transport, but...?
transport=self.transport,
finished=finished,
)
manager.start()
# Return control to caller now that things ought to be operational
try:
yield
# Teardown once user exits block
finally:
# Signal to manager that it should close all open tunnels
finished.set()
# Then wait for it to do so
manager.join()
# Raise threading errors from within the manager, which would be
# one of:
# - an inner ThreadException, which was created by the manager on
# behalf of its Tunnels; this gets directly raised.
# - some other exception, which would thus have occurred in the
# manager itself; we wrap this in a new ThreadException.
# NOTE: in these cases, some of the metadata tracking in
# ExceptionHandlingThread/ExceptionWrapper/ThreadException (which
# is useful when dealing with multiple nearly-identical sibling IO
# threads) is superfluous, but it doesn't feel worth breaking
# things up further; we just ignore it for now.
wrapper = manager.exception()
if wrapper is not None:
if wrapper.type is ThreadException:
raise wrapper.value
else:
raise ThreadException([wrapper])
# TODO: cancel port forward on transport? Does that even make sense
# here (where we used direct-tcpip) vs the opposite method (which
# is what uses forward-tcpip)?
# TODO: probably push some of this down into Paramiko
@contextmanager
@opens
def forward_remote(
self,
remote_port,
local_port=None,
remote_host="127.0.0.1",
local_host="localhost",
):
"""
Open a tunnel connecting ``remote_port`` to the local environment.
For example, say you're running a daemon in development mode on your
workstation at port 8080, and want to funnel traffic to it from a
production or staging environment.
In most situations this isn't possible as your office/home network
probably blocks inbound traffic. But you have SSH access to this
server, so you can temporarily make port 8080 on that server act like
port 8080 on your workstation::
from fabric import Connection
c = Connection('my-remote-server')
with c.forward_remote(8080):
c.run("remote-data-writer --port 8080")
# Assuming remote-data-writer runs until interrupted, this will
# stay open until you Ctrl-C...
This method is analogous to using the ``-R`` option of OpenSSH's
``ssh`` program.
:param int remote_port: The remote port number on which to listen.
:param int local_port:
The local port number. Defaults to the same value as
``remote_port``.
:param str local_host:
The local hostname/interface the forwarded connection talks to.
Default: ``localhost``.
:param str remote_host:
The remote interface address to listen on when forwarding
connections. Default: ``127.0.0.1`` (i.e. only listen on the remote
localhost).
:returns:
Nothing; this method is only useful as a context manager affecting
local operating system state.
.. versionadded:: 2.0
"""
if not local_port:
local_port = remote_port
# Callback executes on each connection to the remote port and is given
# a Channel hooked up to said port. (We don't actually care about the
# source/dest host/port pairs at all; only whether the channel has data
# to read and suchlike.)
# We then pair that channel with a new 'outbound' socket connection to
# the local host/port being forwarded, in a new Tunnel.
# That Tunnel is then added to a shared data structure so we can track
# & close them during shutdown.
#
# TODO: this approach is less than ideal because we have to share state
# between ourselves & the callback handed into the transport's own
# thread handling (which is roughly analogous to our self-controlled
# TunnelManager for local forwarding). See if we can use more of
# Paramiko's API (or improve it and then do so) so that isn't
# necessary.
tunnels = []
def callback(channel, src_addr_tup, dst_addr_tup):
sock = socket.socket()
# TODO: handle connection failure such that channel, etc get closed
sock.connect((local_host, local_port))
# TODO: we don't actually need to generate the Events at our level,
# do we? Just let Tunnel.__init__ do it; all we do is "press its
# button" on shutdown...
tunnel = Tunnel(channel=channel, sock=sock, finished=Event())
tunnel.start()
# Communication between ourselves & the Paramiko handling subthread
tunnels.append(tunnel)
# Ask Paramiko (really, the remote sshd) to call our callback whenever
# connections are established on the remote iface/port.
# transport.request_port_forward(remote_host, remote_port, callback)
try:
self.transport.request_port_forward(
address=remote_host, port=remote_port, handler=callback
)
yield
finally:
# TODO: see above re: lack of a TunnelManager
# TODO: and/or also refactor with TunnelManager re: shutdown logic.
# E.g. maybe have a non-thread TunnelManager-alike with a method
# that acts as the callback? At least then there's a tiny bit more
# encapsulation...meh.
for tunnel in tunnels:
tunnel.finished.set()
tunnel.join()
self.transport.cancel_port_forward(
address=remote_host, port=remote_port
)
| Connection |
python | PrefectHQ__prefect | src/prefect/client/orchestration/_flow_runs/client.py | {
"start": 964,
"end": 18320
} | class ____(BaseClient):
def create_flow_run(
self,
flow: "FlowObject[Any, R]",
name: str | None = None,
parameters: dict[str, Any] | None = None,
context: dict[str, Any] | None = None,
tags: "Iterable[str] | None" = None,
parent_task_run_id: "UUID | None" = None,
state: "State[R] | None" = None,
work_pool_name: str | None = None,
work_queue_name: str | None = None,
job_variables: dict[str, Any] | None = None,
) -> "FlowRun":
"""
Create a flow run for a flow.
Args:
flow: The flow model to create the flow run for
name: An optional name for the flow run
parameters: Parameter overrides for this flow run.
context: Optional run context data
tags: a list of tags to apply to this flow run
parent_task_run_id: if a subflow run is being created, the placeholder task
run identifier in the parent flow
state: The initial state for the run. If not provided, defaults to
`Pending`.
work_pool_name: The name of the work pool to run the flow run in.
work_queue_name: The name of the work queue to place the flow run in.
job_variables: The job variables to use when setting up flow run infrastructure.
Raises:
httpx.RequestError: if the Prefect API does not successfully create a run for any reason
Returns:
The flow run model
"""
from prefect.client.schemas.actions import FlowCreate, FlowRunCreate
from prefect.client.schemas.objects import Flow, FlowRun, FlowRunPolicy
from prefect.states import Pending, to_state_create
parameters = parameters or {}
context = context or {}
if state is None:
state = Pending()
# Retrieve the flow id
flow_data = FlowCreate(name=flow.name)
response = self.request(
"POST", "/flows/", json=flow_data.model_dump(mode="json")
)
flow_id = Flow.model_validate(response.json()).id
flow_run_create = FlowRunCreate(
flow_id=flow_id,
flow_version=flow.version,
name=name,
parameters=parameters,
context=context,
tags=list(tags or []),
parent_task_run_id=parent_task_run_id,
state=to_state_create(state),
empirical_policy=FlowRunPolicy(
retries=flow.retries,
retry_delay=int(flow.retry_delay_seconds or 0),
),
)
if work_pool_name is not None:
flow_run_create.work_pool_name = work_pool_name
if work_queue_name is not None:
flow_run_create.work_queue_name = work_queue_name
if job_variables is not None:
flow_run_create.job_variables = job_variables
flow_run_create_json = flow_run_create.model_dump(
mode="json", exclude_unset=True
)
response = self.request("POST", "/flow_runs/", json=flow_run_create_json)
flow_run = FlowRun.model_validate(response.json())
# Restore the parameters to the local objects to retain expectations about
# Python objects
flow_run.parameters = parameters
return flow_run
def update_flow_run(
self,
flow_run_id: "UUID",
flow_version: str | None = None,
parameters: dict[str, Any] | None = None,
name: str | None = None,
tags: "Iterable[str] | None" = None,
empirical_policy: "FlowRunPolicy | None" = None,
infrastructure_pid: str | None = None,
job_variables: dict[str, Any] | None = None,
) -> httpx.Response:
"""
Update a flow run's details.
Args:
flow_run_id: The identifier for the flow run to update.
flow_version: A new version string for the flow run.
parameters: A dictionary of parameter values for the flow run. This will not
be merged with any existing parameters.
name: A new name for the flow run.
empirical_policy: A new flow run orchestration policy. This will not be
merged with any existing policy.
tags: An iterable of new tags for the flow run. These will not be merged with
any existing tags.
infrastructure_pid: The id of flow run as returned by an
infrastructure block.
Returns:
an `httpx.Response` object from the PATCH request
"""
params: dict[str, Any] = {}
if flow_version is not None:
params["flow_version"] = flow_version
if parameters is not None:
params["parameters"] = parameters
if name is not None:
params["name"] = name
if tags is not None:
params["tags"] = tags
if empirical_policy is not None:
params["empirical_policy"] = empirical_policy
if infrastructure_pid:
params["infrastructure_pid"] = infrastructure_pid
if job_variables is not None:
params["job_variables"] = job_variables
from prefect.client.schemas.actions import FlowRunUpdate
flow_run_data = FlowRunUpdate(**params)
return self.request(
"PATCH",
"/flow_runs/{id}",
path_params={"id": flow_run_id},
json=flow_run_data.model_dump(mode="json", exclude_unset=True),
)
def delete_flow_run(
self,
flow_run_id: "UUID",
) -> None:
"""
Delete a flow run by UUID.
Args:
flow_run_id: The flow run UUID of interest.
Raises:
ObjectNotFound: If request returns 404
httpx.RequestError: If requests fails
"""
try:
self.request("DELETE", "/flow_runs/{id}", path_params={"id": flow_run_id})
except httpx.HTTPStatusError as e:
if e.response.status_code == 404:
raise ObjectNotFound(http_exc=e) from e
else:
raise
def read_flow_run(self, flow_run_id: "UUID") -> "FlowRun":
"""
Query the Prefect API for a flow run by id.
Args:
flow_run_id: the flow run ID of interest
Returns:
a Flow Run model representation of the flow run
"""
try:
response = self.request(
"GET", "/flow_runs/{id}", path_params={"id": flow_run_id}
)
except httpx.HTTPStatusError as e:
if e.response.status_code == 404:
raise ObjectNotFound(http_exc=e) from e
else:
raise
from prefect.client.schemas.objects import FlowRun
return FlowRun.model_validate(response.json())
def resume_flow_run(
self, flow_run_id: "UUID", run_input: dict[str, Any] | None = None
) -> "OrchestrationResult[Any]":
"""
Resumes a paused flow run.
Args:
flow_run_id: the flow run ID of interest
run_input: the input to resume the flow run with
Returns:
an OrchestrationResult model representation of state orchestration output
"""
try:
response = self.request(
"POST",
"/flow_runs/{id}/resume",
path_params={"id": flow_run_id},
json={"run_input": run_input},
)
except httpx.HTTPStatusError:
raise
from prefect.client.schemas import OrchestrationResult
result: OrchestrationResult[Any] = OrchestrationResult.model_validate(
response.json()
)
return result
def read_flow_runs(
self,
*,
flow_filter: "FlowFilter | None" = None,
flow_run_filter: "FlowRunFilter | None" = None,
task_run_filter: "TaskRunFilter | None" = None,
deployment_filter: "DeploymentFilter | None" = None,
work_pool_filter: "WorkPoolFilter | None" = None,
work_queue_filter: "WorkQueueFilter | None" = None,
sort: "FlowRunSort | None" = None,
limit: int | None = None,
offset: int = 0,
) -> "list[FlowRun]":
"""
Query the Prefect API for flow runs. Only flow runs matching all criteria will
be returned.
Args:
flow_filter: filter criteria for flows
flow_run_filter: filter criteria for flow runs
task_run_filter: filter criteria for task runs
deployment_filter: filter criteria for deployments
work_pool_filter: filter criteria for work pools
work_queue_filter: filter criteria for work pool queues
sort: sort criteria for the flow runs
limit: limit for the flow run query
offset: offset for the flow run query
Returns:
a list of Flow Run model representations
of the flow runs
"""
body: dict[str, Any] = {
"flows": flow_filter.model_dump(mode="json") if flow_filter else None,
"flow_runs": (
flow_run_filter.model_dump(mode="json", exclude_unset=True)
if flow_run_filter
else None
),
"task_runs": (
task_run_filter.model_dump(mode="json") if task_run_filter else None
),
"deployments": (
deployment_filter.model_dump(mode="json") if deployment_filter else None
),
"work_pools": (
work_pool_filter.model_dump(mode="json") if work_pool_filter else None
),
"work_pool_queues": (
work_queue_filter.model_dump(mode="json") if work_queue_filter else None
),
"sort": sort,
"limit": limit,
"offset": offset,
}
response = self.request("POST", "/flow_runs/filter", json=body)
from prefect.client.schemas.objects import FlowRun
return FlowRun.model_validate_list(response.json())
def count_flow_runs(
self,
*,
flow_filter: "FlowFilter | None" = None,
flow_run_filter: "FlowRunFilter | None" = None,
task_run_filter: "TaskRunFilter | None" = None,
deployment_filter: "DeploymentFilter | None" = None,
work_pool_filter: "WorkPoolFilter | None" = None,
work_queue_filter: "WorkQueueFilter | None" = None,
) -> int:
"""
Returns the count of flow runs matching all criteria for flow runs.
Args:
flow_filter: filter criteria for flows
flow_run_filter: filter criteria for flow runs
task_run_filter: filter criteria for task runs
deployment_filter: filter criteria for deployments
work_pool_filter: filter criteria for work pools
work_queue_filter: filter criteria for work pool queues
Returns:
count of flow runs
"""
body: dict[str, Any] = {
"flows": flow_filter.model_dump(mode="json") if flow_filter else None,
"flow_runs": (
flow_run_filter.model_dump(mode="json", exclude_unset=True)
if flow_run_filter
else None
),
"task_runs": (
task_run_filter.model_dump(mode="json") if task_run_filter else None
),
"deployments": (
deployment_filter.model_dump(mode="json") if deployment_filter else None
),
"work_pools": (
work_pool_filter.model_dump(mode="json") if work_pool_filter else None
),
"work_pool_queues": (
work_queue_filter.model_dump(mode="json") if work_queue_filter else None
),
}
response = self.request("POST", "/flow_runs/count", json=body)
return response.json()
def set_flow_run_state(
self,
flow_run_id: "UUID | str",
state: "State[T]",
force: bool = False,
) -> "OrchestrationResult[T]":
"""
Set the state of a flow run.
Args:
flow_run_id: the id of the flow run
state: the state to set
force: if True, disregard orchestration logic when setting the state,
forcing the Prefect API to accept the state
Returns:
an OrchestrationResult model representation of state orchestration output
"""
from uuid import UUID, uuid4
from prefect.states import to_state_create
flow_run_id = (
flow_run_id if isinstance(flow_run_id, UUID) else UUID(flow_run_id)
)
state_create = to_state_create(state)
state_create.state_details.flow_run_id = flow_run_id
state_create.state_details.transition_id = uuid4()
try:
response = self.request(
"POST",
"/flow_runs/{id}/set_state",
path_params={"id": flow_run_id},
json=dict(
state=state_create.model_dump(mode="json", serialize_as_any=True),
force=force,
),
)
except httpx.HTTPStatusError as e:
if e.response.status_code == 404:
raise ObjectNotFound(http_exc=e) from e
else:
raise
from prefect.client.schemas import OrchestrationResult
result: OrchestrationResult[T] = OrchestrationResult.model_validate(
response.json()
)
return result
def read_flow_run_states(self, flow_run_id: "UUID") -> "list[State]":
"""
Query for the states of a flow run
Args:
flow_run_id: the id of the flow run
Returns:
a list of State model representations
of the flow run states
"""
response = self.request(
"GET", "/flow_run_states/", params=dict(flow_run_id=str(flow_run_id))
)
from prefect.states import State
return State.model_validate_list(response.json())
def set_flow_run_name(self, flow_run_id: "UUID", name: str) -> httpx.Response:
from prefect.client.schemas.actions import FlowRunUpdate
flow_run_data = FlowRunUpdate(name=name)
return self.request(
"PATCH",
"/flow_runs/{id}",
path_params={"id": flow_run_id},
json=flow_run_data.model_dump(mode="json", exclude_unset=True),
)
def create_flow_run_input(
self, flow_run_id: "UUID", key: str, value: str, sender: str | None = None
) -> None:
"""
Creates a flow run input.
Args:
flow_run_id: The flow run id.
key: The input key.
value: The input value.
sender: The sender of the input.
"""
# Initialize the input to ensure that the key is valid.
FlowRunInput(flow_run_id=flow_run_id, key=key, value=value)
response = self.request(
"POST",
"/flow_runs/{id}/input",
path_params={"id": flow_run_id},
json={"key": key, "value": value, "sender": sender},
)
response.raise_for_status()
def filter_flow_run_input(
self, flow_run_id: "UUID", key_prefix: str, limit: int, exclude_keys: "set[str]"
) -> "list[FlowRunInput]":
response = self.request(
"POST",
"/flow_runs/{id}/input/filter",
path_params={"id": flow_run_id},
json={
"prefix": key_prefix,
"limit": limit,
"exclude_keys": list(exclude_keys),
},
)
response.raise_for_status()
from prefect.client.schemas.objects import FlowRunInput
return FlowRunInput.model_validate_list(response.json())
def read_flow_run_input(self, flow_run_id: "UUID", key: str) -> str:
"""
Reads a flow run input.
Args:
flow_run_id: The flow run id.
key: The input key.
"""
response = self.request(
"GET",
"/flow_runs/{id}/input/{key}",
path_params={"id": flow_run_id, "key": key},
)
response.raise_for_status()
return response.content.decode()
def delete_flow_run_input(self, flow_run_id: "UUID", key: str) -> None:
"""
Deletes a flow run input.
Args:
flow_run_id: The flow run id.
key: The input key.
"""
response = self.request(
"DELETE",
"/flow_runs/{id}/input/{key}",
path_params={"id": flow_run_id, "key": key},
)
response.raise_for_status()
def update_flow_run_labels(
self, flow_run_id: "UUID", labels: "KeyValueLabelsField"
) -> None:
"""
Updates the labels of a flow run.
"""
response = self.request(
"PATCH",
"/flow_runs/{id}/labels",
path_params={"id": flow_run_id},
json=labels,
)
response.raise_for_status()
| FlowRunClient |
python | davidhalter__jedi | jedi/inference/names.py | {
"start": 15557,
"end": 17859
} | class ____(BaseTreeParamName):
def __init__(self, function_value, tree_name):
super().__init__(
function_value.get_default_param_context(), tree_name)
self.function_value = function_value
def _get_param_node(self):
return search_ancestor(self.tree_name, 'param')
@property
def annotation_node(self):
return self._get_param_node().annotation
def infer_annotation(self, execute_annotation=True, ignore_stars=False):
from jedi.inference.gradual.annotation import infer_param
values = infer_param(
self.function_value, self._get_param_node(),
ignore_stars=ignore_stars)
if execute_annotation:
values = values.execute_annotation()
return values
def infer_default(self):
node = self.default_node
if node is None:
return NO_VALUES
return self.parent_context.infer_node(node)
@property
def default_node(self):
return self._get_param_node().default
def get_kind(self):
tree_param = self._get_param_node()
if tree_param.star_count == 1: # *args
return Parameter.VAR_POSITIONAL
if tree_param.star_count == 2: # **kwargs
return Parameter.VAR_KEYWORD
# Params starting with __ are an equivalent to positional only
# variables in typeshed.
if tree_param.name.value.startswith('__'):
return Parameter.POSITIONAL_ONLY
parent = tree_param.parent
param_appeared = False
for p in parent.children:
if param_appeared:
if p == '/':
return Parameter.POSITIONAL_ONLY
else:
if p == '*':
return Parameter.KEYWORD_ONLY
if p.type == 'param':
if p.star_count:
return Parameter.KEYWORD_ONLY
if p == tree_param:
param_appeared = True
return Parameter.POSITIONAL_OR_KEYWORD
def infer(self):
values = self.infer_annotation()
if values:
return values
doc_params = docstrings.infer_param(self.function_value, self._get_param_node())
return doc_params
| _ActualTreeParamName |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.