language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | chroma-core__chroma | chromadb/utils/embedding_functions/huggingface_embedding_function.py | {
"start": 4646,
"end": 8690
} | class ____(EmbeddingFunction[Documents]):
"""
This class is used to get embeddings for a list of texts using the HuggingFace Embedding server
(https://github.com/huggingface/text-embeddings-inference).
The embedding model is configured in the server.
"""
def __init__(
self,
url: str,
api_key_env_var: Optional[str] = None,
api_key: Optional[str] = None,
):
"""
Initialize the HuggingFaceEmbeddingServer.
Args:
url (str): The URL of the HuggingFace Embedding Server.
api_key (Optional[str]): The API key for the HuggingFace Embedding Server.
api_key_env_var (str, optional): Environment variable name that contains your API key for the HuggingFace API.
"""
try:
import httpx
except ImportError:
raise ValueError(
"The httpx python package is not installed. Please install it with `pip install httpx`"
)
if api_key is not None:
warnings.warn(
"Direct api_key configuration will not be persisted. "
"Please use environment variables via api_key_env_var for persistent storage.",
DeprecationWarning,
)
self.url = url
self.api_key_env_var = api_key_env_var
if os.getenv("HUGGINGFACE_API_KEY") is not None:
self.api_key_env_var = "HUGGINGFACE_API_KEY"
if self.api_key_env_var is not None:
self.api_key = api_key or os.getenv(self.api_key_env_var)
else:
self.api_key = api_key
self._api_url = f"{url}"
self._session = httpx.Client()
if self.api_key is not None:
self._session.headers.update({"Authorization": f"Bearer {self.api_key}"})
def __call__(self, input: Documents) -> Embeddings:
"""
Get the embeddings for a list of texts.
Args:
input (Documents): A list of texts to get embeddings for.
Returns:
Embeddings: The embeddings for the texts.
Example:
>>> hugging_face = HuggingFaceEmbeddingServer(url="http://localhost:8080/embed")
>>> texts = ["Hello, world!", "How are you?"]
>>> embeddings = hugging_face(texts)
"""
# Call HuggingFace Embedding Server API for each document
response = self._session.post(self._api_url, json={"inputs": input}).json()
# Convert to numpy arrays
return [np.array(embedding, dtype=np.float32) for embedding in response]
@staticmethod
def name() -> str:
return "huggingface_server"
def default_space(self) -> Space:
return "cosine"
def supported_spaces(self) -> List[Space]:
return ["cosine", "l2", "ip"]
@staticmethod
def build_from_config(config: Dict[str, Any]) -> "EmbeddingFunction[Documents]":
url = config.get("url")
api_key_env_var = config.get("api_key_env_var")
if url is None:
raise ValueError("URL must be provided for HuggingFaceEmbeddingServer")
return HuggingFaceEmbeddingServer(url=url, api_key_env_var=api_key_env_var)
def get_config(self) -> Dict[str, Any]:
return {"url": self.url, "api_key_env_var": self.api_key_env_var}
def validate_config_update(
self, old_config: Dict[str, Any], new_config: Dict[str, Any]
) -> None:
if "url" in new_config and new_config["url"] != self.url:
raise ValueError(
"The URL cannot be changed after the embedding function has been initialized."
)
@staticmethod
def validate_config(config: Dict[str, Any]) -> None:
"""
Validate the configuration using the JSON schema.
Args:
config: Configuration to validate
Raises:
ValidationError: If the configuration does not match the schema
"""
validate_config_schema(config, "huggingface_server")
| HuggingFaceEmbeddingServer |
python | readthedocs__readthedocs.org | readthedocs/builds/migrations/0018_add_hidden_field_to_version.py | {
"start": 149,
"end": 683
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("builds", "0017_builds_deterministic_order_index"),
]
operations = [
migrations.AddField(
model_name="version",
name="hidden",
field=models.BooleanField(
null=True,
default=False,
help_text="Hide this version from the version (flyout) menu and search results?",
verbose_name="Hidden",
),
),
]
| Migration |
python | python-markdown__markdown | markdown/extensions/meta.py | {
"start": 1000,
"end": 1364
} | class ____ (Extension):
""" Meta-Data extension for Python-Markdown. """
def extendMarkdown(self, md):
""" Add `MetaPreprocessor` to Markdown instance. """
md.registerExtension(self)
self.md = md
md.preprocessors.register(MetaPreprocessor(md), 'meta', 27)
def reset(self) -> None:
self.md.Meta = {}
| MetaExtension |
python | ray-project__ray | python/ray/tune/tests/test_trial_scheduler_pbt.py | {
"start": 12989,
"end": 14129
} | class ____(unittest.TestCase):
def setUp(self):
ray.init(num_cpus=2)
def tearDown(self):
ray.shutdown()
def testNoConfig(self):
def MockTrainingFunc(config):
a = config["a"]
b = config["b"]
c1 = config["c"]["c1"]
c2 = config["c"]["c2"]
while True:
tune.report({"mean_accuracy": a * b * (c1 + c2)})
scheduler = PopulationBasedTraining(
time_attr="training_iteration",
metric="mean_accuracy",
mode="max",
perturbation_interval=1,
hyperparam_mutations={
"a": tune.uniform(0, 0.3),
"b": [1, 2, 3],
"c": {
"c1": lambda: np.random.uniform(0.5),
"c2": tune.choice([2, 3, 4]),
},
},
)
tune.run(
MockTrainingFunc,
fail_fast=True,
num_samples=4,
scheduler=scheduler,
name="testNoConfig",
stop={"training_iteration": 3},
)
| PopulationBasedTrainingConfigTest |
python | joke2k__faker | tests/providers/test_address.py | {
"start": 96238,
"end": 97068
} | class ____:
"""Test sl_SI address provider methods"""
def test_city_name(self, faker, num_samples):
for _ in range(num_samples):
city_name = faker.city_name()
assert isinstance(city_name, str)
assert city_name in SlSiAddressProvider.cities
def test_street_name(self, faker, num_samples):
for _ in range(num_samples):
street_name = faker.street_name()
assert isinstance(street_name, str)
assert street_name in SlSiAddressProvider.streets
def test_administrative_unit(self, faker, num_samples):
for _ in range(num_samples):
administrative_unit = faker.administrative_unit()
assert isinstance(administrative_unit, str)
assert administrative_unit in SlSiAddressProvider.states
| TestSlSi |
python | kennethreitz__tablib | tests/test_tablib.py | {
"start": 38909,
"end": 43433
} | class ____(BaseTestCase):
def test_dbf_import_set(self):
data.append(self.john)
data.append(self.george)
data.headers = self.headers
_dbf = data.dbf
data.dbf = _dbf
# self.assertEqual(_dbf, data.dbf)
try:
self.assertEqual(_dbf, data.dbf)
except AssertionError:
index = 0
so_far = ''
for reg_char, data_char in zip(_dbf, data.dbf):
so_far += chr(data_char)
if reg_char != data_char and index not in [1, 2, 3]:
raise AssertionError('Failing at char {}: {} vs {} {}'.format(
index, reg_char, data_char, so_far))
index += 1
def test_dbf_export_set(self):
"""Test DBF import."""
data.append(self.john)
data.append(self.george)
data.append(self.tom)
data.headers = self.headers
_regression_dbf = (b'\x03r\x06\x06\x03\x00\x00\x00\x81\x00\xab\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00FIRST_NAME\x00C\x00\x00\x00\x00P\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00LAST_NAME\x00\x00C\x00'
b'\x00\x00\x00P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00GPA\x00\x00\x00\x00\x00\x00\x00\x00N\x00\x00\x00\x00\n'
b'\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\r'
)
_regression_dbf += b' John' + (b' ' * 75)
_regression_dbf += b' Adams' + (b' ' * 74)
_regression_dbf += b' 90.0000000'
_regression_dbf += b' George' + (b' ' * 73)
_regression_dbf += b' Washington' + (b' ' * 69)
_regression_dbf += b' 67.0000000'
_regression_dbf += b' Thomas' + (b' ' * 73)
_regression_dbf += b' Jefferson' + (b' ' * 70)
_regression_dbf += b' 50.0000000'
_regression_dbf += b'\x1a'
# If in python3, decode regression string to binary.
# _regression_dbf = bytes(_regression_dbf, 'utf-8')
# _regression_dbf = _regression_dbf.replace(b'\n', b'\r')
try:
self.assertEqual(_regression_dbf, data.dbf)
except AssertionError:
index = 0
found_so_far = ''
for reg_char, data_char in zip(_regression_dbf, data.dbf):
# found_so_far += chr(data_char)
if reg_char != data_char and index not in [1, 2, 3]:
raise AssertionError(
'Failing at char {}: {} vs {} (found {})'.format(
index, reg_char, data_char, found_so_far))
index += 1
def test_dbf_format_detect(self):
"""Test the DBF format detection."""
_dbf = (b'\x03r\x06\x03\x03\x00\x00\x00\x81\x00\xab\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00FIRST_NAME\x00C\x00\x00\x00\x00P\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00LAST_NAME\x00\x00C\x00'
b'\x00\x00\x00P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00GPA\x00\x00\x00\x00\x00\x00\x00\x00N\x00\x00\x00\x00\n'
b'\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\r'
)
_dbf += b' John' + (b' ' * 75)
_dbf += b' Adams' + (b' ' * 74)
_dbf += b' 90.0000000'
_dbf += b' George' + (b' ' * 73)
_dbf += b' Washington' + (b' ' * 69)
_dbf += b' 67.0000000'
_dbf += b' Thomas' + (b' ' * 73)
_dbf += b' Jefferson' + (b' ' * 70)
_dbf += b' 50.0000000'
_dbf += b'\x1a'
_dbf = BytesIO(_dbf)
_yaml = '- {age: 90, first_name: John, last_name: Adams}'
_tsv = 'foo\tbar'
_csv = '1,2,3\n4,5,6\n7,8,9\n'
_json = '[{"last_name": "Adams","age": 90,"first_name": "John"}]'
_bunk = (
'¡¡¡¡¡¡¡¡£™∞¢£§∞§¶•¶ª∞¶•ªº••ª–º§•†•§º¶•†¥ª–º•§ƒø¥¨©πƒø†ˆ¥ç©¨√øˆ¥≈†ƒ¥ç©ø¨çˆ¥ƒçø¶'
)
fmt = registry.get_format('dbf')
self.assertTrue(fmt.detect(_dbf))
self.assertFalse(fmt.detect(_yaml))
self.assertFalse(fmt.detect(_tsv))
self.assertFalse(fmt.detect(_csv))
self.assertFalse(fmt.detect(_json))
self.assertFalse(fmt.detect(_bunk))
| DBFTests |
python | spyder-ide__spyder | spyder/plugins/ipythonconsole/tests/test_ipython_config_dialog.py | {
"start": 575,
"end": 1032
} | class ____(QMainWindow):
register_shortcut = MagicMock()
editor = MagicMock()
def __getattr__(self, attr):
return MagicMock()
@pytest.mark.parametrize(
'config_dialog',
# [[MainWindowMock, [ConfigPlugins], [Plugins]]]
[[MainWindowMock, [], [IPythonConsole]]],
indirect=True)
def test_config_dialog(config_dialog):
configpage = config_dialog.get_page()
assert configpage
configpage.save_to_conf()
| MainWindowMock |
python | pytest-dev__pytest | src/_pytest/fixtures.py | {
"start": 48789,
"end": 59028
} | class ____:
def __init__(
self,
*,
function: Callable[..., Any],
fixture_function_marker: FixtureFunctionMarker,
instance: object | None = None,
_ispytest: bool = False,
) -> None:
check_ispytest(_ispytest)
self.name = fixture_function_marker.name or function.__name__
# In order to show the function that this fixture contains in messages.
# Set the __name__ to be same as the function __name__ or the given fixture name.
self.__name__ = self.name
self._fixture_function_marker = fixture_function_marker
if instance is not None:
self._fixture_function = cast(
Callable[..., Any], function.__get__(instance)
)
else:
self._fixture_function = function
functools.update_wrapper(self, function)
def __repr__(self) -> str:
return f"<pytest_fixture({self._fixture_function})>"
def __get__(self, instance, owner=None):
"""Behave like a method if the function it was applied to was a method."""
return FixtureFunctionDefinition(
function=self._fixture_function,
fixture_function_marker=self._fixture_function_marker,
instance=instance,
_ispytest=True,
)
def __call__(self, *args: Any, **kwds: Any) -> Any:
message = (
f'Fixture "{self.name}" called directly. Fixtures are not meant to be called directly,\n'
"but are created automatically when test functions request them as parameters.\n"
"See https://docs.pytest.org/en/stable/explanation/fixtures.html for more information about fixtures, and\n"
"https://docs.pytest.org/en/stable/deprecations.html#calling-fixtures-directly"
)
fail(message, pytrace=False)
def _get_wrapped_function(self) -> Callable[..., Any]:
return self._fixture_function
@overload
def fixture(
fixture_function: Callable[..., object],
*,
scope: _ScopeName | Callable[[str, Config], _ScopeName] = ...,
params: Iterable[object] | None = ...,
autouse: bool = ...,
ids: Sequence[object | None] | Callable[[Any], object | None] | None = ...,
name: str | None = ...,
) -> FixtureFunctionDefinition: ...
@overload
def fixture(
fixture_function: None = ...,
*,
scope: _ScopeName | Callable[[str, Config], _ScopeName] = ...,
params: Iterable[object] | None = ...,
autouse: bool = ...,
ids: Sequence[object | None] | Callable[[Any], object | None] | None = ...,
name: str | None = None,
) -> FixtureFunctionMarker: ...
def fixture(
fixture_function: FixtureFunction | None = None,
*,
scope: _ScopeName | Callable[[str, Config], _ScopeName] = "function",
params: Iterable[object] | None = None,
autouse: bool = False,
ids: Sequence[object | None] | Callable[[Any], object | None] | None = None,
name: str | None = None,
) -> FixtureFunctionMarker | FixtureFunctionDefinition:
"""Decorator to mark a fixture factory function.
This decorator can be used, with or without parameters, to define a
fixture function.
The name of the fixture function can later be referenced to cause its
invocation ahead of running tests: test modules or classes can use the
``pytest.mark.usefixtures(fixturename)`` marker.
Test functions can directly use fixture names as input arguments in which
case the fixture instance returned from the fixture function will be
injected.
Fixtures can provide their values to test functions using ``return`` or
``yield`` statements. When using ``yield`` the code block after the
``yield`` statement is executed as teardown code regardless of the test
outcome, and must yield exactly once.
:param scope:
The scope for which this fixture is shared; one of ``"function"``
(default), ``"class"``, ``"module"``, ``"package"`` or ``"session"``.
This parameter may also be a callable which receives ``(fixture_name, config)``
as parameters, and must return a ``str`` with one of the values mentioned above.
See :ref:`dynamic scope` in the docs for more information.
:param params:
An optional list of parameters which will cause multiple invocations
of the fixture function and all of the tests using it. The current
parameter is available in ``request.param``.
:param autouse:
If True, the fixture func is activated for all tests that can see it.
If False (the default), an explicit reference is needed to activate
the fixture.
:param ids:
Sequence of ids each corresponding to the params so that they are
part of the test id. If no ids are provided they will be generated
automatically from the params.
:param name:
The name of the fixture. This defaults to the name of the decorated
function. If a fixture is used in the same module in which it is
defined, the function name of the fixture will be shadowed by the
function arg that requests the fixture; one way to resolve this is to
name the decorated function ``fixture_<fixturename>`` and then use
``@pytest.fixture(name='<fixturename>')``.
"""
fixture_marker = FixtureFunctionMarker(
scope=scope,
params=tuple(params) if params is not None else None,
autouse=autouse,
ids=None if ids is None else ids if callable(ids) else tuple(ids),
name=name,
_ispytest=True,
)
# Direct decoration.
if fixture_function:
return fixture_marker(fixture_function)
return fixture_marker
def yield_fixture(
fixture_function=None,
*args,
scope="function",
params=None,
autouse=False,
ids=None,
name=None,
):
"""(Return a) decorator to mark a yield-fixture factory function.
.. deprecated:: 3.0
Use :py:func:`pytest.fixture` directly instead.
"""
warnings.warn(YIELD_FIXTURE, stacklevel=2)
return fixture(
fixture_function,
*args,
scope=scope,
params=params,
autouse=autouse,
ids=ids,
name=name,
)
@fixture(scope="session")
def pytestconfig(request: FixtureRequest) -> Config:
"""Session-scoped fixture that returns the session's :class:`pytest.Config`
object.
Example::
def test_foo(pytestconfig):
if pytestconfig.get_verbosity() > 0:
...
"""
return request.config
def pytest_addoption(parser: Parser) -> None:
parser.addini(
"usefixtures",
type="args",
default=[],
help="List of default fixtures to be used with this project",
)
group = parser.getgroup("general")
group.addoption(
"--fixtures",
"--funcargs",
action="store_true",
dest="showfixtures",
default=False,
help="Show available fixtures, sorted by plugin appearance "
"(fixtures with leading '_' are only shown with '-v')",
)
group.addoption(
"--fixtures-per-test",
action="store_true",
dest="show_fixtures_per_test",
default=False,
help="Show fixtures per test",
)
def pytest_cmdline_main(config: Config) -> int | ExitCode | None:
if config.option.showfixtures:
showfixtures(config)
return 0
if config.option.show_fixtures_per_test:
show_fixtures_per_test(config)
return 0
return None
def _resolve_args_directness(
argnames: Sequence[str],
indirect: bool | Sequence[str],
nodeid: str,
) -> dict[str, Literal["indirect", "direct"]]:
"""Resolve if each parametrized argument must be considered an indirect
parameter to a fixture of the same name, or a direct parameter to the
parametrized function, based on the ``indirect`` parameter of the
parametrize() call.
:param argnames:
List of argument names passed to ``parametrize()``.
:param indirect:
Same as the ``indirect`` parameter of ``parametrize()``.
:param nodeid:
Node ID to which the parametrization is applied.
:returns:
A dict mapping each arg name to either "indirect" or "direct".
"""
arg_directness: dict[str, Literal["indirect", "direct"]]
if isinstance(indirect, bool):
arg_directness = dict.fromkeys(argnames, "indirect" if indirect else "direct")
elif isinstance(indirect, Sequence):
arg_directness = dict.fromkeys(argnames, "direct")
for arg in indirect:
if arg not in argnames:
fail(
f"In {nodeid}: indirect fixture '{arg}' doesn't exist",
pytrace=False,
)
arg_directness[arg] = "indirect"
else:
fail(
f"In {nodeid}: expected Sequence or boolean for indirect, got {type(indirect).__name__}",
pytrace=False,
)
return arg_directness
def _get_direct_parametrize_args(node: nodes.Node) -> set[str]:
"""Return all direct parametrization arguments of a node, so we don't
mistake them for fixtures.
Check https://github.com/pytest-dev/pytest/issues/5036.
These things are done later as well when dealing with parametrization
so this could be improved.
"""
parametrize_argnames: set[str] = set()
for marker in node.iter_markers(name="parametrize"):
indirect = marker.kwargs.get("indirect", False)
p_argnames, _ = ParameterSet._parse_parametrize_args(
*marker.args, **marker.kwargs
)
p_directness = _resolve_args_directness(p_argnames, indirect, node.nodeid)
parametrize_argnames.update(
argname
for argname, directness in p_directness.items()
if directness == "direct"
)
return parametrize_argnames
def deduplicate_names(*seqs: Iterable[str]) -> tuple[str, ...]:
"""De-duplicate the sequence of names while keeping the original order."""
# Ideally we would use a set, but it does not preserve insertion order.
return tuple(dict.fromkeys(name for seq in seqs for name in seq))
| FixtureFunctionDefinition |
python | milvus-io__pymilvus | pymilvus/orm/future.py | {
"start": 1669,
"end": 1777
} | class ____(BaseFuture):
def on_response(self, res: Any):
return MutationResult(res)
| MutationFuture |
python | huggingface__transformers | src/transformers/models/edgetam_video/modular_edgetam_video.py | {
"start": 2218,
"end": 19092
} | class ____(Sam2VideoConfig):
r"""
[`EdgeTamVideoConfig`] is the configuration class to store the configuration of a [`EdgeTamVideoModel`]. It is used to instantiate a
EDGETAM model according to the specified arguments, defining the memory attention, memory encoder, and image encoder
configs. Instantiating a configuration defaults will yield a similar configuration to that of the SAM 2.1 Hiera-tiny
[facebook/EdgeTAM](https://huggingface.co/facebook/EdgeTAM) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vision_config (Union[`dict`, `EdgeTamVideoVisionConfig`], *optional*):
Dictionary of configuration options used to initialize [`EdgeTamVideoVisionConfig`].
prompt_encoder_config (Union[`dict`, `EdgeTamVideoPromptEncoderConfig`], *optional*):
Dictionary of configuration options used to initialize [`EdgeTamVideoPromptEncoderConfig`].
mask_decoder_config (Union[`dict`, `EdgeTamVideoMaskDecoderConfig`], *optional*):
Dictionary of configuration options used to initialize [`EdgeTamMaskDecoderConfig`].
initializer_range (`float`, *optional*, defaults to 0.02):
Standard deviation for parameter initialization.
num_maskmem (`int`, *optional*, defaults to 7):
The number of memory slots for the mask memory.
image_size (`int`, *optional*, defaults to 1024):
The size of the input images.
sigmoid_scale_for_mem_enc (`float`, *optional*, defaults to 20.0):
Scale factor for the sigmoid function in the memory encoder.
sigmoid_bias_for_mem_enc (`float`, *optional*, defaults to -10.0):
Bias for the sigmoid function in the memory encoder.
enable_occlusion_spatial_embedding (`bool`, *optional*, defaults to `True`):
Whether to enable spatial embedding for occlusions.
multimask_output_in_sam (`bool`, *optional*, defaults to `True`):
Whether to output multiple masks from the SAM head.
multimask_min_pt_num (`int`, *optional*, defaults to 0):
The minimum number of points to trigger multimask output.
multimask_max_pt_num (`int`, *optional*, defaults to 1):
The maximum number of points to trigger multimask output.
multimask_output_for_tracking (`bool`, *optional*, defaults to `True`):
Whether to use multimask output for tracking.
max_object_pointers_in_encoder (`int`, *optional*, defaults to 16):
The maximum number of object pointers in the encoder.
max_cond_frame_num (`int`, *optional*, defaults to -1):
Maximum number of conditioning frames to use in memory attention. Set to -1 to use all conditioning frames.
enable_temporal_pos_encoding_for_object_pointers (`bool`, *optional*, defaults to `True`):
Whether to enable temporal positional encoding for object pointers.
memory_attention_hidden_size (`int`, *optional*, defaults to 256):
Dimensionality of the memory attention hidden states.
memory_attention_num_layers (`int`, *optional*, defaults to 2):
The number of layers in the memory attention module.
memory_attention_num_attention_heads (`int`, *optional*, defaults to 1):
Number of attention heads for each attention layer in the memory attention.
memory_attention_downsample_rate (`int`, *optional*, defaults to 1):
The downsample rate for the attention layers.
memory_attention_mlp_hidden_size (`int`, *optional*, defaults to 2048):
The dimension of the feedforward network in the memory attention module.
memory_attention_mlp_hidden_act (`str`, *optional*, defaults to `"relu"`):
The non-linear activation function in the feedforward network in the memory attention module.
memory_attention_dropout (`float`, *optional*, defaults to 0.1):
The dropout rate for the memory attention module.
memory_attention_rope_theta (`float`, *optional*, defaults to 10000):
The Rope theta parameter.
memory_attention_rope_feat_sizes (`Tuple[int, int]`, *optional*, defaults to `[64, 64]`):
The feature sizes for the Rope positional encoding.
memory_attention_rope_k_sizes (`List[int]`, *optional*, defaults to `[16, 16]`):
The key feature sizes for the RoPE positional encoding in memory attention.
memory_attention_rope_dropout (`float`, *optional*, defaults to 0.1):
The dropout rate for the Rope positional encoding.
perceiver_resampler_num_latents (`int`, *optional*, defaults to 256):
The number of 1D latent tokens in the perceiver resampler.
perceiver_resampler_num_latents_2d (`int`, *optional*, defaults to 256):
The number of 2D latent tokens in the perceiver resampler.
perceiver_resampler_hidden_size (`int`, *optional*, defaults to 64):
The hidden size of the perceiver resampler.
perceiver_resampler_mlp_intermediate_size (`int`, *optional*, defaults to 256):
The intermediate size of the feedforward network in the perceiver resampler.
perceiver_resampler_num_attention_heads (`int`, *optional*, defaults to 1):
The number of attention heads in the perceiver resampler.
perceiver_resampler_attention_head_dim (`int`, *optional*, defaults to 64):
The dimension of each attention head in the perceiver resampler.
perceiver_resampler_num_layers (`int`, *optional*, defaults to 2):
The number of layers in the perceiver resampler.
perceiver_resampler_hidden_dropout (`float`, *optional*, defaults to 0.0):
The dropout rate for the hidden layers in the perceiver resampler.
perceiver_resampler_attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout rate for the attention layers in the perceiver resampler.
memory_encoder_hidden_size (`int`, *optional*, defaults to 256):
Dimensionality of the memory encoder hidden states.
memory_encoder_output_channels (`int`, *optional*, defaults to 64):
The number of output channels for the memory encoder.
mask_downsampler_embed_dim (`int`, *optional*, defaults to 256):
The dimension of the mask downsampler embedding.
memory_fuser_intermediate_dim (`int`, *optional*, defaults to 1024):
The intermediate dimension of the memory fuser feedforward network.
mask_downsampler_kernel_size (`int`, *optional*, defaults to 3):
The kernel size for the mask downsampler.
mask_downsampler_stride (`int`, *optional*, defaults to 2):
The stride for the mask downsampler.
mask_downsampler_padding (`int`, *optional*, defaults to 1):
The padding for the mask downsampler.
mask_downsampler_total_stride (`int`, *optional*, defaults to 16):
The total stride for the mask downsampler.
mask_downsampler_hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function in the mask downsampler.
memory_fuser_num_layers (`int`, *optional*, defaults to 2):
The number of layers in the memory fuser.
memory_fuser_embed_dim (`int`, *optional*, defaults to 256):
The dimension of the memory fuser embedding.
memory_fuser_kernel_size (`int`, *optional*, defaults to 7):
The kernel size for the memory fuser.
memory_fuser_padding (`int`, *optional*, defaults to 3):
The padding for the memory fuser.
memory_fuser_layer_scale_init_value (`float`, *optional*, defaults to 1e-06):
The initial value for the layer scale in the memory fuser.
memory_fuser_hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function in the memory fuser.
Example:
```python
>>> from transformers import (
... EdgeTamVisionConfig,
... EdgeTamVideoPromptEncoderConfig,
... EdgeTamVideoMaskDecoderConfig,
... EdgeTamVideoModel,
... EdgeTamVideoConfig,
... )
>>> # Initializing a EdgeTamVideoConfig with `"facebook/edgetam.1_hiera_tiny"` style configuration
>>> configuration = EdgeTamVideoConfig()
>>> # Initializing a EdgeTamVideoModel (with random weights) from the `"facebook/edgetam.1_hiera_tiny"` style configuration
>>> model = EdgeTamVideoModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
>>> # We can also initialize a EdgeTamConfig from a EdgeTamVisionConfig, EdgeTamPromptEncoderConfig, and EdgeTamMaskDecoderConfig
>>> # Initializing EDGETAM vision encoder, memory attention, and memory encoder configurations
>>> vision_config = EdgeTamVisionConfig()
>>> prompt_encoder_config = EdgeTamVideoPromptEncoderConfig()
>>> mask_decoder_config = EdgeTamVideoMaskDecoderConfig()
>>> config = EdgeTamVideoConfig(vision_config, prompt_encoder_config, mask_decoder_config)
```"""
model_type = "edgetam_video"
sub_configs = {
"vision_config": AutoConfig,
"prompt_encoder_config": EdgeTamVideoPromptEncoderConfig,
"mask_decoder_config": EdgeTamVideoMaskDecoderConfig,
}
def __init__(
self,
vision_config=None,
prompt_encoder_config=None,
mask_decoder_config=None,
initializer_range=0.02,
num_maskmem=7,
image_size=1024,
sigmoid_scale_for_mem_enc=20.0,
sigmoid_bias_for_mem_enc=-10.0,
enable_occlusion_spatial_embedding=True,
multimask_output_in_sam=True,
multimask_min_pt_num=0,
multimask_max_pt_num=1,
multimask_output_for_tracking=True,
max_object_pointers_in_encoder=16,
max_cond_frame_num=-1,
enable_temporal_pos_encoding_for_object_pointers=True,
# memory attention
memory_attention_hidden_size=256,
memory_attention_num_layers=2,
memory_attention_num_attention_heads=1,
memory_attention_downsample_rate=1,
memory_attention_mlp_hidden_size=2048,
memory_attention_mlp_hidden_act="relu",
memory_attention_dropout=0.1,
memory_attention_rope_theta=10000,
memory_attention_rope_feat_sizes=None,
memory_attention_rope_k_sizes=None,
memory_attention_rope_dropout=0.1,
# spatial perceiver resampler
perceiver_resampler_num_latents=256,
perceiver_resampler_num_latents_2d=256,
perceiver_resampler_hidden_size=64,
perceiver_resampler_mlp_intermediate_size=256,
perceiver_resampler_num_attention_heads=1,
perceiver_resampler_attention_head_dim=64,
perceiver_resampler_num_layers=2,
perceiver_resampler_hidden_dropout=0.0,
perceiver_resampler_attention_dropout=0.0,
# memory encoder
memory_encoder_hidden_size=256,
memory_encoder_output_channels=64,
mask_downsampler_embed_dim=256,
memory_fuser_intermediate_dim=1024,
mask_downsampler_kernel_size=3,
mask_downsampler_stride=2,
mask_downsampler_padding=1,
mask_downsampler_total_stride=16,
mask_downsampler_hidden_act="gelu",
memory_fuser_num_layers=2,
memory_fuser_embed_dim=256,
memory_fuser_kernel_size=7,
memory_fuser_padding=3,
memory_fuser_layer_scale_init_value=1e-6,
memory_fuser_hidden_act="gelu",
**kwargs,
):
PreTrainedConfig.__init__(**kwargs)
vision_config = vision_config if vision_config is not None else {}
prompt_encoder_config = prompt_encoder_config if prompt_encoder_config is not None else {}
mask_decoder_config = mask_decoder_config if mask_decoder_config is not None else {}
memory_attention_rope_feat_sizes = (
[64, 64] if memory_attention_rope_feat_sizes is None else memory_attention_rope_feat_sizes
)
memory_attention_rope_k_sizes = (
[16, 16] if memory_attention_rope_k_sizes is None else memory_attention_rope_k_sizes
)
if isinstance(vision_config, dict):
vision_config["model_type"] = vision_config.get("model_type", "sam2_vision_model")
vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config)
if isinstance(prompt_encoder_config, EdgeTamVideoPromptEncoderConfig):
prompt_encoder_config = prompt_encoder_config.to_dict()
if isinstance(mask_decoder_config, EdgeTamVideoMaskDecoderConfig):
mask_decoder_config = mask_decoder_config.to_dict()
self.vision_config = vision_config
self.prompt_encoder_config = EdgeTamVideoPromptEncoderConfig(**prompt_encoder_config)
self.mask_decoder_config = EdgeTamVideoMaskDecoderConfig(**mask_decoder_config)
self.initializer_range = initializer_range
self.num_maskmem = num_maskmem # default 1 input frame + 6 previous frames
self.image_size = image_size
self.sigmoid_scale_for_mem_enc = sigmoid_scale_for_mem_enc # scale factor for mask sigmoid prob
self.sigmoid_bias_for_mem_enc = sigmoid_bias_for_mem_enc # bias factor for mask sigmoid prob
self.enable_occlusion_spatial_embedding = enable_occlusion_spatial_embedding
self.multimask_output_in_sam = multimask_output_in_sam
self.multimask_min_pt_num = multimask_min_pt_num
self.multimask_max_pt_num = multimask_max_pt_num
self.multimask_output_for_tracking = multimask_output_for_tracking
self.max_object_pointers_in_encoder = max_object_pointers_in_encoder
self.max_cond_frame_num = max_cond_frame_num
self.enable_temporal_pos_encoding_for_object_pointers = enable_temporal_pos_encoding_for_object_pointers
# memory attention
self.memory_attention_hidden_size = memory_attention_hidden_size
self.memory_attention_num_layers = memory_attention_num_layers
self.memory_attention_num_attention_heads = memory_attention_num_attention_heads
self.memory_attention_downsample_rate = memory_attention_downsample_rate
self.memory_attention_mlp_hidden_size = memory_attention_mlp_hidden_size
self.memory_attention_mlp_hidden_act = memory_attention_mlp_hidden_act
self.memory_attention_dropout = memory_attention_dropout
self.memory_attention_rope_theta = memory_attention_rope_theta
self.memory_attention_rope_feat_sizes = memory_attention_rope_feat_sizes
self.memory_attention_rope_k_sizes = memory_attention_rope_k_sizes
self.memory_attention_rope_dropout = memory_attention_rope_dropout
# spatial perceiver resampler
self.perceiver_resampler_num_latents = perceiver_resampler_num_latents
self.perceiver_resampler_num_latents_2d = perceiver_resampler_num_latents_2d
self.perceiver_resampler_hidden_size = perceiver_resampler_hidden_size
self.perceiver_resampler_mlp_intermediate_size = perceiver_resampler_mlp_intermediate_size
self.perceiver_resampler_attention_head_dim = perceiver_resampler_attention_head_dim
self.perceiver_resampler_num_attention_heads = perceiver_resampler_num_attention_heads
self.perceiver_resampler_num_layers = perceiver_resampler_num_layers
self.perceiver_resampler_hidden_dropout = perceiver_resampler_hidden_dropout
self.perceiver_resampler_attention_dropout = perceiver_resampler_attention_dropout
# memory encoder
self.memory_encoder_hidden_size = memory_encoder_hidden_size
self.memory_encoder_output_channels = memory_encoder_output_channels
self.mask_downsampler_embed_dim = mask_downsampler_embed_dim
self.mask_downsampler_kernel_size = mask_downsampler_kernel_size
self.mask_downsampler_stride = mask_downsampler_stride
self.mask_downsampler_padding = mask_downsampler_padding
self.mask_downsampler_total_stride = mask_downsampler_total_stride
self.mask_downsampler_hidden_act = mask_downsampler_hidden_act
self.memory_fuser_num_layers = memory_fuser_num_layers
self.memory_fuser_embed_dim = memory_fuser_embed_dim
self.memory_fuser_intermediate_dim = memory_fuser_intermediate_dim
self.memory_fuser_kernel_size = memory_fuser_kernel_size
self.memory_fuser_padding = memory_fuser_padding
self.memory_fuser_layer_scale_init_value = memory_fuser_layer_scale_init_value
self.memory_fuser_hidden_act = memory_fuser_hidden_act
| EdgeTamVideoConfig |
python | PrefectHQ__prefect | src/prefect/_experimental/plugins/spec.py | {
"start": 1000,
"end": 1513
} | class ____:
"""
Result returned by a plugin's setup_environment hook.
Attributes:
env: Environment variables to set (e.g., AWS_* variables)
note: Short, non-secret human-readable hint about what was configured
required: If True and hook fails, abort in strict mode
"""
env: Mapping[str, str] # e.g. AWS_* variables
note: Optional[str] = None # short, non-secret human hint
required: bool = False # if True and hook fails -> abort (in strict mode)
| SetupResult |
python | openai__openai-python | src/openai/types/realtime/realtime_audio_formats_param.py | {
"start": 289,
"end": 492
} | class ____(TypedDict, total=False):
rate: Literal[24000]
"""The sample rate of the audio. Always `24000`."""
type: Literal["audio/pcm"]
"""The audio format. Always `audio/pcm`."""
| AudioPCM |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/strategies.py | {
"start": 8710,
"end": 11640
} | class ____(_ColumnLoader):
def __init__(self, parent, strategy_key):
super().__init__(parent, strategy_key)
# compare to the "default" expression that is mapped in
# the column. If it's sql.null, we don't need to render
# unless an expr is passed in the options.
null = sql.null().label(None)
self._have_default_expression = any(
not c.compare(null) for c in self.parent_property.columns
)
def setup_query(
self,
compile_state,
query_entity,
path,
loadopt,
adapter,
column_collection,
memoized_populators,
**kwargs,
):
columns = None
if loadopt and loadopt._extra_criteria:
columns = loadopt._extra_criteria
elif self._have_default_expression:
columns = self.parent_property.columns
if columns is None:
return
for c in columns:
if adapter:
c = adapter.columns[c]
compile_state._append_dedupe_col_collection(c, column_collection)
fetch = columns[0]
if adapter:
fetch = adapter.columns[fetch]
if fetch is None:
# None is not expected to be the result of any
# adapter implementation here, however there may be theoretical
# usages of returning() with context.DMLReturningColFilter
return
memoized_populators[self.parent_property] = fetch
def create_row_processor(
self,
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
):
# look through list of columns represented here
# to see which, if any, is present in the row.
if loadopt and loadopt._extra_criteria:
columns = loadopt._extra_criteria
for col in columns:
if adapter:
col = adapter.columns[col]
getter = result._getter(col, False)
if getter:
populators["quick"].append((self.key, getter))
break
else:
populators["expire"].append((self.key, True))
def init_class_attribute(self, mapper):
self.is_class_level = True
_register_attribute(
self.parent_property,
mapper,
useobject=False,
compare_function=self.columns[0].type.compare_values,
accepts_scalar_loader=False,
default_scalar_value=self.parent_property._default_scalar_value,
)
@log.class_logger
@properties.ColumnProperty.strategy_for(deferred=True, instrument=True)
@properties.ColumnProperty.strategy_for(
deferred=True, instrument=True, raiseload=True
)
@properties.ColumnProperty.strategy_for(do_nothing=True)
| _ExpressionColumnLoader |
python | django__django | tests/auth_tests/test_migrations.py | {
"start": 9768,
"end": 10864
} | class ____(TransactionTestCase):
databases = {"default", "other"}
available_apps = [
"auth_tests",
"django.contrib.auth",
"django.contrib.contenttypes",
]
def setUp(self):
ContentType.objects.all().delete()
Permission.objects.using("other").delete()
concrete_content_type = ContentType.objects.db_manager("other").get_for_model(
Proxy
)
self.permission = Permission.objects.using("other").create(
content_type=concrete_content_type,
codename="add_proxy",
name="Can add proxy",
)
def test_migrate_other_database(self):
proxy_model_content_type = ContentType.objects.db_manager(
"other"
).get_for_model(Proxy, for_concrete_model=False)
with connections["other"].schema_editor() as editor:
update_proxy_permissions.update_proxy_model_permissions(apps, editor)
self.permission.refresh_from_db()
self.assertEqual(self.permission.content_type, proxy_model_content_type)
| MultiDBProxyModelAppLabelTests |
python | fluentpython__example-code-2e | 14-inheritance/uppermixin.py | {
"start": 2878,
"end": 3028
} | class ____(UpperCaseMixin, collections.Counter): # <2>
"""Specialized 'Counter' that uppercases string keys""" # <3>
# end::UPPERDICT[]
| UpperCounter |
python | jazzband__django-simple-history | simple_history/tests/tests/test_utils.py | {
"start": 1172,
"end": 3629
} | class ____(unittest.TestCase):
def test__get_m2m_field_name__returns_expected_value(self):
def field_names(model):
history_model = get_history_model_for_model(model)
# Sort the fields, to prevent flaky tests
fields = sorted(history_model._history_m2m_fields, key=lambda f: f.name)
return [get_m2m_field_name(field) for field in fields]
self.assertListEqual(field_names(PollWithManyToMany), ["pollwithmanytomany"])
self.assertListEqual(
field_names(PollWithManyToManyCustomHistoryID),
["pollwithmanytomanycustomhistoryid"],
)
self.assertListEqual(
field_names(PollWithManyToManyWithIPAddress),
["pollwithmanytomanywithipaddress"],
)
self.assertListEqual(
field_names(PollWithSeveralManyToMany), ["pollwithseveralmanytomany"] * 3
)
self.assertListEqual(
field_names(PollChildBookWithManyToMany),
["pollchildbookwithmanytomany"] * 2,
)
self.assertListEqual(
field_names(PollChildRestaurantWithManyToMany),
["pollchildrestaurantwithmanytomany"] * 2,
)
self.assertListEqual(
field_names(PollWithSelfManyToMany), ["from_pollwithselfmanytomany"]
)
def test__get_m2m_reverse_field_name__returns_expected_value(self):
def field_names(model):
history_model = get_history_model_for_model(model)
# Sort the fields, to prevent flaky tests
fields = sorted(history_model._history_m2m_fields, key=lambda f: f.name)
return [get_m2m_reverse_field_name(field) for field in fields]
self.assertListEqual(field_names(PollWithManyToMany), ["place"])
self.assertListEqual(field_names(PollWithManyToManyCustomHistoryID), ["place"])
self.assertListEqual(field_names(PollWithManyToManyWithIPAddress), ["place"])
self.assertListEqual(
field_names(PollWithSeveralManyToMany), ["book", "place", "restaurant"]
)
self.assertListEqual(
field_names(PollChildBookWithManyToMany), ["book", "place"]
)
self.assertListEqual(
field_names(PollChildRestaurantWithManyToMany), ["place", "restaurant"]
)
self.assertListEqual(
field_names(PollWithSelfManyToMany), ["to_pollwithselfmanytomany"]
)
| GetM2MFieldNamesTestCase |
python | allegroai__clearml | clearml/backend_api/services/v2_13/workers.py | {
"start": 50444,
"end": 56587
} | class ____(Response):
"""
Response of workers.get_all endpoint.
:param workers:
:type workers: Sequence[Worker]
"""
_service = "workers"
_action = "get_all"
_version = "2.13"
_schema = {
"definitions": {
"current_task_entry": {
"properties": {
"id": {"description": "ID", "type": ["string", "null"]},
"last_iteration": {
"description": "Last task iteration",
"type": ["integer", "null"],
},
"name": {"description": "Name", "type": ["string", "null"]},
"running_time": {
"description": "Task running time",
"type": ["integer", "null"],
},
},
"type": "object",
},
"id_name_entry": {
"properties": {
"id": {"description": "ID", "type": ["string", "null"]},
"name": {"description": "Name", "type": ["string", "null"]},
},
"type": "object",
},
"queue_entry": {
"properties": {
"id": {"description": "ID", "type": ["string", "null"]},
"name": {"description": "Name", "type": ["string", "null"]},
"next_task": {
"description": "Next task in the queue",
"oneOf": [
{"$ref": "#/definitions/id_name_entry"},
{"type": "null"},
],
},
"num_tasks": {
"description": "Number of task entries in the queue",
"type": ["integer", "null"],
},
},
"type": "object",
},
"worker": {
"properties": {
"company": {
"description": "Associated company",
"oneOf": [
{"$ref": "#/definitions/id_name_entry"},
{"type": "null"},
],
},
"id": {"description": "Worker ID", "type": ["string", "null"]},
"ip": {
"description": "IP of the worker",
"type": ["string", "null"],
},
"last_activity_time": {
"description": "Last activity time (even if an error occurred)",
"format": "date-time",
"type": ["string", "null"],
},
"last_report_time": {
"description": "Last successful report time",
"format": "date-time",
"type": ["string", "null"],
},
"project": {
"description": "Project in which currently executing task resides",
"oneOf": [
{"$ref": "#/definitions/id_name_entry"},
{"type": "null"},
],
},
"queue": {
"description": "Queue from which running task was taken",
"oneOf": [
{"$ref": "#/definitions/queue_entry"},
{"type": "null"},
],
},
"queues": {
"description": "List of queues on which the worker is listening",
"items": {"$ref": "#/definitions/queue_entry"},
"type": ["array", "null"],
},
"register_time": {
"description": "Registration time",
"format": "date-time",
"type": ["string", "null"],
},
"tags": {
"description": "User tags for the worker",
"items": {"type": "string"},
"type": ["array", "null"],
},
"task": {
"description": "Task currently being run by the worker",
"oneOf": [
{"$ref": "#/definitions/current_task_entry"},
{"type": "null"},
],
},
"user": {
"description": "Associated user (under whose credentials are used by the worker daemon)",
"oneOf": [
{"$ref": "#/definitions/id_name_entry"},
{"type": "null"},
],
},
},
"type": "object",
},
},
"properties": {
"workers": {
"items": {"$ref": "#/definitions/worker"},
"type": ["array", "null"],
}
},
"type": "object",
}
def __init__(self, workers: Optional[List[Any]] = None, **kwargs: Any) -> None:
super(GetAllResponse, self).__init__(**kwargs)
self.workers = workers
@schema_property("workers")
def workers(self) -> Optional[List[Any]]:
return self._property_workers
@workers.setter
def workers(self, value: Optional[List[Any]]) -> None:
if value is None:
self._property_workers = None
return
self.assert_isinstance(value, "workers", (list, tuple))
if any((isinstance(v, dict) for v in value)):
value = [Worker.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "workers", Worker, is_array=True)
self._property_workers = value
| GetAllResponse |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_run_cancellation.py | {
"start": 2709,
"end": 7612
} | class ____(QueuedRunCoordinatorTestSuite):
def test_cancel_queued_run(self, graphql_context: WorkspaceRequestContext):
selector = infer_job_selector(graphql_context, "infinite_loop_job")
with safe_tempfile_path() as path:
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PIPELINE_EXECUTION_MUTATION,
variables={
"executionParams": {
"selector": selector,
"mode": "default",
"runConfigData": {"ops": {"loop": {"config": {"file": path}}}},
}
},
)
assert not result.errors
assert result.data
# just test existence
assert result.data["launchPipelineExecution"]["__typename"] == "LaunchRunSuccess"
run_id = result.data["launchPipelineExecution"]["run"]["runId"]
run = graphql_context.instance.get_run_by_id(run_id)
assert run and run.status == DagsterRunStatus.QUEUED
result = execute_dagster_graphql(
graphql_context, RUN_CANCELLATION_QUERY, variables={"runId": run_id}
)
assert (
result.data["terminatePipelineExecution"]["__typename"] == "TerminateRunSuccess"
), str(result.data)
def test_cancel_runs(self, graphql_context: WorkspaceRequestContext):
selector = infer_job_selector(graphql_context, "infinite_loop_job")
with safe_tempfile_path() as path:
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PIPELINE_EXECUTION_MUTATION,
variables={
"executionParams": {
"selector": selector,
"mode": "default",
"runConfigData": {"ops": {"loop": {"config": {"file": path}}}},
}
},
)
assert not result.errors
assert result.data
# just test existence
assert result.data["launchPipelineExecution"]["__typename"] == "LaunchRunSuccess"
run_id = result.data["launchPipelineExecution"]["run"]["runId"]
run = graphql_context.instance.get_run_by_id(run_id)
assert run and run.status == DagsterRunStatus.QUEUED
result = execute_dagster_graphql(
graphql_context,
RUNS_CANCELLATION_QUERY,
variables={"runIds": [run_id, "nonexistent_id"]},
)
assert result.data["terminateRuns"]["__typename"] == "TerminateRunsResult"
assert (
result.data["terminateRuns"]["terminateRunResults"][0]["__typename"]
== "TerminateRunSuccess"
)
assert (
result.data["terminateRuns"]["terminateRunResults"][1]["__typename"]
== "RunNotFoundError"
)
assert (
result.data["terminateRuns"]["terminateRunResults"][1]["runId"] == "nonexistent_id"
)
def test_force_cancel_queued_run(self, graphql_context: WorkspaceRequestContext):
selector = infer_job_selector(graphql_context, "infinite_loop_job")
with safe_tempfile_path() as path:
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PIPELINE_EXECUTION_MUTATION,
variables={
"executionParams": {
"selector": selector,
"mode": "default",
"runConfigData": {"ops": {"loop": {"config": {"file": path}}}},
}
},
)
assert not result.errors
assert result.data
# just test existence
assert result.data["launchPipelineExecution"]["__typename"] == "LaunchRunSuccess"
run_id = result.data["launchPipelineExecution"]["run"]["runId"]
run = graphql_context.instance.get_run_by_id(run_id)
assert run and run.status == DagsterRunStatus.QUEUED
result = execute_dagster_graphql(
graphql_context,
RUN_CANCELLATION_QUERY,
variables={
"runId": run_id,
"terminatePolicy": "MARK_AS_CANCELED_IMMEDIATELY",
},
)
assert result.data["terminatePipelineExecution"]["__typename"] == "TerminateRunSuccess"
RunTerminationTestSuite: Any = make_graphql_context_test_suite(
context_variants=[GraphQLContextVariant.sqlite_with_default_run_launcher_managed_grpc_env()]
)
def _exception_terminate(_run_id):
raise Exception("FAILED TO TERMINATE")
def _return_fail_terminate(_run_id):
return False
| TestQueuedRunTermination |
python | pennersr__django-allauth | allauth/socialaccount/providers/lichess/provider.py | {
"start": 328,
"end": 534
} | class ____(ProviderAccount):
def get_profile_url(self):
return self.account.extra_data.get("url")
def get_avatar_url(self):
return self.account.extra_data.get("avatar")
| LichessAccount |
python | gevent__gevent | src/gevent/testing/openfiles.py | {
"start": 7983,
"end": 8739
} | class ____(object): # pragma: no cover
"""
A test case mixin that helps find a method that's leaking an
open file.
Only mix this in when needed to debug, it slows tests down.
"""
def setUp(self):
self.__open_files_count = get_number_open_files()
super(DoesNotLeakFilesMixin, self).setUp()
def tearDown(self):
super(DoesNotLeakFilesMixin, self).tearDown()
after = get_number_open_files()
if after > self.__open_files_count:
raise AssertionError(
"Too many open files. Before: %s < After: %s.\n%s" % (
self.__open_files_count,
after,
get_open_files()
)
)
| DoesNotLeakFilesMixin |
python | Pylons__pyramid | tests/pkgs/eventonly/__init__.py | {
"start": 667,
"end": 1899
} | class ____:
pass
@subscriber(Foo)
def foo(event):
event.response.text += 'foo '
@subscriber(Foo, yup=True)
def fooyup(event):
event.response.text += 'fooyup '
@subscriber([Foo, Bar])
def foobar(event):
event.response.text += 'foobar '
@subscriber([Foo, Bar])
def foobar2(event, context):
event.response.text += 'foobar2 '
@subscriber([Foo, Bar], yup=True)
def foobaryup(event):
event.response.text += 'foobaryup '
@subscriber([Foo, Bar], yup=True)
def foobaryup2(event, context):
event.response.text += 'foobaryup2 '
@subscriber([Foo, Bar], yup=True, yup_with_all_args=True)
def foobaryup3(event, context):
event.response.text += 'foobaryup3 '
@view_config(name='sendfoo')
def sendfoo(request):
response = request.response
response.yup = True
request.registry.notify(Foo(response))
return response
@view_config(name='sendfoobar')
def sendfoobar(request):
response = request.response
response.yup = True
request.registry.notify(Foo(response), Bar())
return response
def includeme(config):
config.add_subscriber_predicate('yup', Yup)
config.add_subscriber_predicate('yup_with_all_args', YupWithAllArgs)
config.scan('tests.pkgs.eventonly')
| Bar |
python | ray-project__ray | python/ray/util/client/server/server_pickler.py | {
"start": 2416,
"end": 4450
} | class ____(pickle.Unpickler):
def __init__(self, server, *args, **kwargs):
super().__init__(*args, **kwargs)
self.server = server
def persistent_load(self, pid):
assert isinstance(pid, PickleStub)
if pid.type == "Ray":
return ray
elif pid.type == "Object":
return self.server.object_refs[pid.client_id][pid.ref_id]
elif pid.type == "Actor":
return self.server.actor_refs[pid.ref_id]
elif pid.type == "RemoteFuncSelfReference":
return ClientReferenceFunction(pid.client_id, pid.ref_id)
elif pid.type == "RemoteFunc":
return self.server.lookup_or_register_func(
pid.ref_id, pid.client_id, pid.baseline_options
)
elif pid.type == "RemoteActorSelfReference":
return ClientReferenceActor(pid.client_id, pid.ref_id)
elif pid.type == "RemoteActor":
return self.server.lookup_or_register_actor(
pid.ref_id, pid.client_id, pid.baseline_options
)
elif pid.type == "RemoteMethod":
actor = self.server.actor_refs[pid.ref_id]
return getattr(actor, pid.name)
else:
raise NotImplementedError("Uncovered client data type")
def dumps_from_server(
obj: Any, client_id: str, server_instance: "RayletServicer", protocol=None
) -> bytes:
with io.BytesIO() as file:
sp = ServerPickler(client_id, server_instance, file, protocol=protocol)
sp.dump(obj)
return file.getvalue()
def loads_from_client(
data: bytes,
server_instance: "RayletServicer",
*,
fix_imports=True,
encoding="ASCII",
errors="strict"
) -> Any:
with disable_client_hook():
if isinstance(data, str):
raise TypeError("Can't load pickle from unicode string")
file = io.BytesIO(data)
return ClientUnpickler(
server_instance, file, fix_imports=fix_imports, encoding=encoding
).load()
| ClientUnpickler |
python | numpy__numpy | numpy/distutils/system_info.py | {
"start": 56898,
"end": 66096
} | class ____(system_info):
# LAPACK_SRC is deprecated, please do not use this!
# Build or install a BLAS library via your package manager or from
# source separately.
section = 'lapack_src'
dir_env_var = 'LAPACK_SRC'
notfounderror = LapackSrcNotFoundError
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend([d] + self.combine_paths(d, ['LAPACK*/SRC', 'SRC']))
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
src_dirs = self.get_src_dirs()
src_dir = ''
for d in src_dirs:
if os.path.isfile(os.path.join(d, 'dgesv.f')):
src_dir = d
break
if not src_dir:
#XXX: Get sources from netlib. May be ask first.
return
# The following is extracted from LAPACK-3.0/SRC/Makefile.
# Added missing names from lapack-lite-3.1.1/SRC/Makefile
# while keeping removed names for Lapack-3.0 compatibility.
allaux = '''
ilaenv ieeeck lsame lsamen xerbla
iparmq
''' # *.f
laux = '''
bdsdc bdsqr disna labad lacpy ladiv lae2 laebz laed0 laed1
laed2 laed3 laed4 laed5 laed6 laed7 laed8 laed9 laeda laev2
lagtf lagts lamch lamrg lanst lapy2 lapy3 larnv larrb larre
larrf lartg laruv las2 lascl lasd0 lasd1 lasd2 lasd3 lasd4
lasd5 lasd6 lasd7 lasd8 lasd9 lasda lasdq lasdt laset lasq1
lasq2 lasq3 lasq4 lasq5 lasq6 lasr lasrt lassq lasv2 pttrf
stebz stedc steqr sterf
larra larrc larrd larr larrk larrj larrr laneg laisnan isnan
lazq3 lazq4
''' # [s|d]*.f
lasrc = '''
gbbrd gbcon gbequ gbrfs gbsv gbsvx gbtf2 gbtrf gbtrs gebak
gebal gebd2 gebrd gecon geequ gees geesx geev geevx gegs gegv
gehd2 gehrd gelq2 gelqf gels gelsd gelss gelsx gelsy geql2
geqlf geqp3 geqpf geqr2 geqrf gerfs gerq2 gerqf gesc2 gesdd
gesv gesvd gesvx getc2 getf2 getrf getri getrs ggbak ggbal
gges ggesx ggev ggevx ggglm gghrd gglse ggqrf ggrqf ggsvd
ggsvp gtcon gtrfs gtsv gtsvx gttrf gttrs gtts2 hgeqz hsein
hseqr labrd lacon laein lags2 lagtm lahqr lahrd laic1 lals0
lalsa lalsd langb lange langt lanhs lansb lansp lansy lantb
lantp lantr lapll lapmt laqgb laqge laqp2 laqps laqsb laqsp
laqsy lar1v lar2v larf larfb larfg larft larfx largv larrv
lartv larz larzb larzt laswp lasyf latbs latdf latps latrd
latrs latrz latzm lauu2 lauum pbcon pbequ pbrfs pbstf pbsv
pbsvx pbtf2 pbtrf pbtrs pocon poequ porfs posv posvx potf2
potrf potri potrs ppcon ppequ pprfs ppsv ppsvx pptrf pptri
pptrs ptcon pteqr ptrfs ptsv ptsvx pttrs ptts2 spcon sprfs
spsv spsvx sptrf sptri sptrs stegr stein sycon syrfs sysv
sysvx sytf2 sytrf sytri sytrs tbcon tbrfs tbtrs tgevc tgex2
tgexc tgsen tgsja tgsna tgsy2 tgsyl tpcon tprfs tptri tptrs
trcon trevc trexc trrfs trsen trsna trsyl trti2 trtri trtrs
tzrqf tzrzf
lacn2 lahr2 stemr laqr0 laqr1 laqr2 laqr3 laqr4 laqr5
''' # [s|c|d|z]*.f
sd_lasrc = '''
laexc lag2 lagv2 laln2 lanv2 laqtr lasy2 opgtr opmtr org2l
org2r orgbr orghr orgl2 orglq orgql orgqr orgr2 orgrq orgtr
orm2l orm2r ormbr ormhr orml2 ormlq ormql ormqr ormr2 ormr3
ormrq ormrz ormtr rscl sbev sbevd sbevx sbgst sbgv sbgvd sbgvx
sbtrd spev spevd spevx spgst spgv spgvd spgvx sptrd stev stevd
stevr stevx syev syevd syevr syevx sygs2 sygst sygv sygvd
sygvx sytd2 sytrd
''' # [s|d]*.f
cz_lasrc = '''
bdsqr hbev hbevd hbevx hbgst hbgv hbgvd hbgvx hbtrd hecon heev
heevd heevr heevx hegs2 hegst hegv hegvd hegvx herfs hesv
hesvx hetd2 hetf2 hetrd hetrf hetri hetrs hpcon hpev hpevd
hpevx hpgst hpgv hpgvd hpgvx hprfs hpsv hpsvx hptrd hptrf
hptri hptrs lacgv lacp2 lacpy lacrm lacrt ladiv laed0 laed7
laed8 laesy laev2 lahef lanhb lanhe lanhp lanht laqhb laqhe
laqhp larcm larnv lartg lascl laset lasr lassq pttrf rot spmv
spr stedc steqr symv syr ung2l ung2r ungbr unghr ungl2 unglq
ungql ungqr ungr2 ungrq ungtr unm2l unm2r unmbr unmhr unml2
unmlq unmql unmqr unmr2 unmr3 unmrq unmrz unmtr upgtr upmtr
''' # [c|z]*.f
#######
sclaux = laux + ' econd ' # s*.f
dzlaux = laux + ' secnd ' # d*.f
slasrc = lasrc + sd_lasrc # s*.f
dlasrc = lasrc + sd_lasrc # d*.f
clasrc = lasrc + cz_lasrc + ' srot srscl ' # c*.f
zlasrc = lasrc + cz_lasrc + ' drot drscl ' # z*.f
oclasrc = ' icmax1 scsum1 ' # *.f
ozlasrc = ' izmax1 dzsum1 ' # *.f
sources = ['s%s.f' % f for f in (sclaux + slasrc).split()] \
+ ['d%s.f' % f for f in (dzlaux + dlasrc).split()] \
+ ['c%s.f' % f for f in (clasrc).split()] \
+ ['z%s.f' % f for f in (zlasrc).split()] \
+ ['%s.f' % f for f in (allaux + oclasrc + ozlasrc).split()]
sources = [os.path.join(src_dir, f) for f in sources]
# Lapack 3.1:
src_dir2 = os.path.join(src_dir, '..', 'INSTALL')
sources += [os.path.join(src_dir2, p + 'lamch.f') for p in 'sdcz']
# Lapack 3.2.1:
sources += [os.path.join(src_dir, p + 'larfp.f') for p in 'sdcz']
sources += [os.path.join(src_dir, 'ila' + p + 'lr.f') for p in 'sdcz']
sources += [os.path.join(src_dir, 'ila' + p + 'lc.f') for p in 'sdcz']
# Should we check here actual existence of source files?
# Yes, the file listing is different between 3.0 and 3.1
# versions.
sources = [f for f in sources if os.path.isfile(f)]
info = {'sources': sources, 'language': 'f77'}
self.set_info(**info)
atlas_version_c_text = r'''
/* This file is generated from numpy/distutils/system_info.py */
void ATL_buildinfo(void);
int main(void) {
ATL_buildinfo();
return 0;
}
'''
_cached_atlas_version = {}
def get_atlas_version(**config):
libraries = config.get('libraries', [])
library_dirs = config.get('library_dirs', [])
key = (tuple(libraries), tuple(library_dirs))
if key in _cached_atlas_version:
return _cached_atlas_version[key]
c = cmd_config(Distribution())
atlas_version = None
info = {}
try:
s, o = c.get_output(atlas_version_c_text,
libraries=libraries, library_dirs=library_dirs,
)
if s and re.search(r'undefined reference to `_gfortran', o, re.M):
s, o = c.get_output(atlas_version_c_text,
libraries=libraries + ['gfortran'],
library_dirs=library_dirs,
)
if not s:
warnings.warn(textwrap.dedent("""
*****************************************************
Linkage with ATLAS requires gfortran. Use
python setup.py config_fc --fcompiler=gnu95 ...
when building extension libraries that use ATLAS.
Make sure that -lgfortran is used for C++ extensions.
*****************************************************
"""), stacklevel=2)
dict_append(info, language='f90',
define_macros=[('ATLAS_REQUIRES_GFORTRAN', None)])
except Exception: # failed to get version from file -- maybe on Windows
# look at directory name
for o in library_dirs:
m = re.search(r'ATLAS_(?P<version>\d+[.]\d+[.]\d+)_', o)
if m:
atlas_version = m.group('version')
if atlas_version is not None:
break
# final choice --- look at ATLAS_VERSION environment
# variable
if atlas_version is None:
atlas_version = os.environ.get('ATLAS_VERSION', None)
if atlas_version:
dict_append(info, define_macros=[(
'ATLAS_INFO', _c_string_literal(atlas_version))
])
else:
dict_append(info, define_macros=[('NO_ATLAS_INFO', -1)])
return atlas_version or '?.?.?', info
if not s:
m = re.search(r'ATLAS version (?P<version>\d+[.]\d+[.]\d+)', o)
if m:
atlas_version = m.group('version')
if atlas_version is None:
if re.search(r'undefined symbol: ATL_buildinfo', o, re.M):
atlas_version = '3.2.1_pre3.3.6'
else:
log.info('Status: %d', s)
log.info('Output: %s', o)
elif atlas_version == '3.2.1_pre3.3.6':
dict_append(info, define_macros=[('NO_ATLAS_INFO', -2)])
else:
dict_append(info, define_macros=[(
'ATLAS_INFO', _c_string_literal(atlas_version))
])
result = _cached_atlas_version[key] = atlas_version, info
return result
| lapack_src_info |
python | great-expectations__great_expectations | great_expectations/datasource/fluent/sqlite_datasource.py | {
"start": 3406,
"end": 3601
} | class ____(pydantic.AnyUrl):
allowed_schemes = {
"sqlite",
"sqlite+pysqlite",
"sqlite+aiosqlite",
"sqlite+pysqlcipher",
}
host_required = False
| SqliteDsn |
python | gevent__gevent | src/greentest/3.10/test_selectors.py | {
"start": 17240,
"end": 18421
} | class ____(BaseSelectorTestCase, ScalableSelectorMixIn,
unittest.TestCase):
SELECTOR = getattr(selectors, 'KqueueSelector', None)
def test_register_bad_fd(self):
# a file descriptor that's been closed should raise an OSError
# with EBADF
s = self.SELECTOR()
bad_f = os_helper.make_bad_fd()
with self.assertRaises(OSError) as cm:
s.register(bad_f, selectors.EVENT_READ)
self.assertEqual(cm.exception.errno, errno.EBADF)
# the SelectorKey has been removed
with self.assertRaises(KeyError):
s.get_key(bad_f)
def test_empty_select_timeout(self):
# Issues #23009, #29255: Make sure timeout is applied when no fds
# are registered.
s = self.SELECTOR()
self.addCleanup(s.close)
t0 = time()
self.assertEqual(s.select(1), [])
t1 = time()
dt = t1 - t0
# Tolerate 2.0 seconds for very slow buildbots
self.assertTrue(0.8 <= dt <= 2.0, dt)
@unittest.skipUnless(hasattr(selectors, 'DevpollSelector'),
"Test needs selectors.DevpollSelector")
| KqueueSelectorTestCase |
python | scikit-learn__scikit-learn | sklearn/_loss/link.py | {
"start": 4894,
"end": 5197
} | class ____(BaseLink):
"""The logit link function g(x)=logit(x)."""
interval_y_pred = Interval(0, 1, False, False)
def link(self, y_pred, out=None):
return logit(y_pred, out=out)
def inverse(self, raw_prediction, out=None):
return expit(raw_prediction, out=out)
| LogitLink |
python | getsentry__sentry | src/sentry/plugins/providers/integration_repository.py | {
"start": 1359,
"end": 2464
} | class ____(SentryAPIException):
status_code = status.HTTP_400_BAD_REQUEST
code = "repo_exists"
message = "A repository with that configuration already exists"
def __init__(
self,
code=None,
message=None,
detail=None,
repos: list[RepositoryConfig] | None = None,
**kwargs,
):
super().__init__(code=code, message=message, detail=detail, **kwargs)
self.repos = repos
def __str__(self) -> str:
if self.repos:
return f"Repositories already exist: {', '.join(repo['name'] for repo in self.repos)}"
return "Repositories already exist."
def get_integration_repository_provider(integration):
from sentry.plugins.base import bindings # circular import
binding_key = "integration-repository.provider"
provider_key = (
integration.provider
if integration.provider.startswith("integrations:")
else "integrations:" + integration.provider
)
provider_cls = bindings.get(binding_key).get(provider_key)
return provider_cls(id=provider_key)
| RepoExistsError |
python | huggingface__transformers | src/transformers/models/idefics/image_processing_idefics.py | {
"start": 1285,
"end": 2366
} | class ____(ImagesKwargs, total=False):
"""
transform (`Callable`, *optional*):
A custom transform function that accepts a single image can be passed for training. For example,
`torchvision.Compose` can be used to compose multiple transforms. If `None` - an inference mode is
assumed - and then a preset of inference-specific transforms will be applied to the images
image_size (`dict[str, int]`, *optional*):
Resize to image size
"""
transform: Optional[Callable]
image_size: dict[str, int]
def convert_to_rgb(image):
# `image.convert("RGB")` would only work for .jpg images, as it creates a wrong background
# for transparent images. The call to `alpha_composite` handles this case
if image.mode == "RGB":
return image
image_rgba = image.convert("RGBA")
background = Image.new("RGBA", image_rgba.size, (255, 255, 255))
alpha_composite = Image.alpha_composite(background, image_rgba)
alpha_composite = alpha_composite.convert("RGB")
return alpha_composite
| IdeficsImageProcessorKwargs |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1141875,
"end": 1142138
} | class ____(VegaLiteSchema):
"""ScaleInvalidDataShowAsstroke schema wrapper."""
_schema = {"$ref": '#/definitions/ScaleInvalidDataShowAs<"stroke">'}
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
| ScaleInvalidDataShowAsstroke |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/definitions_class.py | {
"start": 12402,
"end": 48651
} | class ____(IHaveNew):
"""A set of definitions explicitly available and loadable by Dagster tools.
Parameters:
assets (Optional[Iterable[Union[AssetsDefinition, SourceAsset, CacheableAssetsDefinition]]]):
A list of assets. Assets can be created by annotating
a function with :py:func:`@asset <asset>` or
:py:func:`@observable_source_asset <observable_source_asset>`.
Or they can by directly instantiating :py:class:`AssetsDefinition`,
:py:class:`SourceAsset`, or :py:class:`CacheableAssetsDefinition`.
asset_checks (Optional[Iterable[AssetChecksDefinition]]):
A list of asset checks.
schedules (Optional[Iterable[Union[ScheduleDefinition, UnresolvedPartitionedAssetScheduleDefinition]]]):
List of schedules.
sensors (Optional[Iterable[SensorDefinition]]):
List of sensors, typically created with :py:func:`@sensor <sensor>`.
jobs (Optional[Iterable[Union[JobDefinition, UnresolvedAssetJobDefinition]]]):
List of jobs. Typically created with :py:func:`define_asset_job <define_asset_job>`
or with :py:func:`@job <job>` for jobs defined in terms of ops directly.
Jobs created with :py:func:`@job <job>` must already have resources bound
at job creation time. They do not respect the `resources` argument here.
resources (Optional[Mapping[str, Any]]): Dictionary of resources to bind to assets.
The resources dictionary takes raw Python objects,
not just instances of :py:class:`ResourceDefinition`. If that raw object inherits from
:py:class:`IOManager`, it gets coerced to an :py:class:`IOManagerDefinition`.
Any other object is coerced to a :py:class:`ResourceDefinition`.
These resources will be automatically bound
to any assets passed to this Definitions instance using
:py:func:`with_resources <with_resources>`. Assets passed to Definitions with
resources already bound using :py:func:`with_resources <with_resources>` will
override this dictionary.
executor (Optional[Union[ExecutorDefinition, Executor]]):
Default executor for jobs. Individual jobs can override this and define their own executors
by setting the executor on :py:func:`@job <job>` or :py:func:`define_asset_job <define_asset_job>`
explicitly. This executor will also be used for materializing assets directly
outside of the context of jobs. If an :py:class:`Executor` is passed, it is coerced into
an :py:class:`ExecutorDefinition`.
loggers (Optional[Mapping[str, LoggerDefinition]):
Default loggers for jobs. Individual jobs
can define their own loggers by setting them explictly.
metadata (Optional[MetadataMapping]):
Arbitrary metadata for the Definitions. Not displayed in the UI but accessible on
the Definitions instance at runtime.
component_tree (Optional[ComponentTree]):
Information about the Components that were used to construct part of this
Definitions object.
Example usage:
.. code-block:: python
Definitions(
assets=[asset_one, asset_two],
schedules=[a_schedule],
sensors=[a_sensor],
jobs=[a_job],
resources={
"a_resource": some_resource,
},
asset_checks=[asset_one_check_one]
)
Dagster separates user-defined code from system tools such the web server and
the daemon. Rather than loading code directly into process, a tool such as the
webserver interacts with user-defined code over a serialization boundary.
These tools must be able to locate and load this code when they start. Via CLI
arguments or config, they specify a Python module to inspect.
A Python module is loadable by Dagster tools if there is a top-level variable
that is an instance of :py:class:`Definitions`.
"""
assets: TAssets = None
schedules: TSchedules = None
sensors: TSensors = None
jobs: TJobs = None
resources: Optional[Mapping[str, Any]] = None
executor: Optional[Union[ExecutorDefinition, Executor]] = None
loggers: Optional[Mapping[str, LoggerDefinition]] = None
# There's a bug that means that sometimes it's Dagster's fault when AssetsDefinitions are
# passed here instead of AssetChecksDefinitions: https://github.com/dagster-io/dagster/issues/22064.
# After we fix the bug, we should remove AssetsDefinition from the set of accepted types.
asset_checks: TAssetChecks = None
metadata: Mapping[str, MetadataValue]
component_tree: Optional[
Annotated["ComponentTree", ImportFrom("dagster.components.core.component_tree")]
]
def __new__(
cls,
assets: TAssets = None,
schedules: TSchedules = None,
sensors: TSensors = None,
jobs: TJobs = None,
resources: Optional[Mapping[str, Any]] = None,
executor: Optional[Union[ExecutorDefinition, Executor]] = None,
loggers: Optional[Mapping[str, LoggerDefinition]] = None,
asset_checks: TAssetChecks = None,
metadata: Optional[RawMetadataMapping] = None,
component_tree: Optional["ComponentTree"] = None,
):
instance = super().__new__(
cls,
assets=assets,
schedules=schedules,
sensors=sensors,
jobs=jobs,
resources=resources,
executor=executor,
loggers=loggers,
asset_checks=asset_checks,
metadata=normalize_metadata(check.opt_mapping_param(metadata, "metadata")),
component_tree=component_tree,
)
check.invariant(
not instance.has_resolved_repository_def(),
"Definitions object should not have been resolved",
)
return instance
@public
def get_job_def(self, name: str) -> JobDefinition:
"""Get a job definition by name. This will only return a `JobDefinition` if it was directly passed in to the `Definitions` object.
If that is not found, the Definitions object is resolved (transforming UnresolvedAssetJobDefinitions to JobDefinitions and an example). It
also finds jobs passed to sensors and schedules and retrieves them from the repository.
After dagster 1.11, this resolution step will not happen, and will throw an error if the job is not found.
"""
found_direct = False
for job in self.jobs or []:
if job.name == name:
if isinstance(job, JobDefinition):
found_direct = True
if not found_direct:
warning = self.dig_for_warning(name)
if warning:
warnings.warn(warning)
else:
warnings.warn(
f"JobDefinition with name {name} directly passed to Definitions not found, "
"will attempt to resolve to a JobDefinition. "
"This will be an error in a future release and will require a call to "
"resolve_job_def in dagster 1.11. "
)
return self.resolve_job_def(name)
def resolve_job_def(self, name: str) -> JobDefinition:
"""Resolve a job definition by name. If you passed in an :py:class:`UnresolvedAssetJobDefinition`
(return value of :py:func:`define_asset_job`) it will be resolved to a :py:class:`JobDefinition` when returned
from this function, with all resource dependencies fully resolved.
"""
check.str_param(name, "name")
return self.get_repository_def().get_job(name)
def dig_for_warning(self, name: str) -> Optional[str]:
for job in self.jobs or []:
if job.name == name:
if isinstance(job, JobDefinition):
return None
return (
f"Found asset job named {job.name} of type {type(job)} passed to `jobs` parameter. Starting in "
"dagster 1.11, you must now use Definitions.resolve_job_def to correctly "
"retrieve this job definition."
)
for sensor in self.sensors or []:
for job in sensor.jobs:
if job.name == name:
return (
f"Found job or graph named {job.name} passed to sensor named {sensor.name} "
"that was passed to Definitions in the sensors param. Starting in dagster 1.11, "
"you must call Definitions.resolve_job_def to retrieve this job definition."
)
for schedule in self.schedules or []:
job = schedule.job
if job.name == name:
return (
f"Found job named {job.name} passed to schedule named {schedule.name} "
"that was passed to Definitions in the schedules param. Starting in dagster 1.11, "
"you must call Definitions.resolve_job_def to retrieve this job definition."
)
return None
@public
def get_sensor_def(self, name: str) -> SensorDefinition:
"""Get a :py:class:`SensorDefinition` by name.
If your passed-in sensor had resource dependencies, or the job targeted by the sensor had
resource dependencies, those resource dependencies will be fully resolved on the returned object.
"""
warnings.warn(
"Starting in dagster 1.11, get_sensor_def will return a SensorDefinition without resolving resource dependencies on it or its target."
)
return self.resolve_sensor_def(name)
# TODO: after dagster 1.11, this will become the implementation of get_sensor_def -- schrockn 2025-06-02
def get_unresolved_sensor_def(self, name: str) -> SensorDefinition:
for sensor in self.sensors or []:
if sensor.name == name:
return sensor
raise ValueError(f"SensorDefinition with name {name} not found")
def resolve_sensor_def(self, name: str) -> SensorDefinition:
check.str_param(name, "name")
return self.get_repository_def().get_sensor_def(name)
@public
def get_schedule_def(self, name: str) -> ScheduleDefinition:
"""Get a :py:class:`ScheduleDefinition` by name.
If your passed-in schedule had resource dependencies, or the job targeted by the schedule had
resource dependencies, those resource dependencies will be fully resolved on the returned object.
"""
warnings.warn(
"Starting in dagster 1.11, get_schedule_def will return a ScheduleDefinition without resolving resource dependencies on it or its target."
)
return self.resolve_schedule_def(name)
# TODO: after dagster 1.11, this will become the implementation of get_schedule_def -- schrockn 2025-06-02
def get_unresolved_schedule_def(self, name: str) -> ScheduleDefinition:
for schedule in self.schedules or []:
if schedule.name == name:
if isinstance(schedule, ScheduleDefinition):
return schedule
raise ValueError(
f"ScheduleDefinition with name {name} is an UnresolvedPartitionedAssetScheduleDefinition, which is not supported in get_unresolved_schedule_def"
)
raise ValueError(f"ScheduleDefinition with name {name} not found")
def resolve_schedule_def(self, name: str) -> ScheduleDefinition:
check.str_param(name, "name")
return self.get_repository_def().get_schedule_def(name)
@public
def load_asset_value(
self,
asset_key: CoercibleToAssetKey,
*,
python_type: Optional[type] = None,
instance: Optional[DagsterInstance] = None,
partition_key: Optional[str] = None,
metadata: Optional[dict[str, Any]] = None,
) -> object:
"""Load the contents of an asset as a Python object.
Invokes `load_input` on the :py:class:`IOManager` associated with the asset.
If you want to load the values of multiple assets, it's more efficient to use
:py:meth:`~dagster.Definitions.get_asset_value_loader`, which avoids spinning up
resources separately for each asset.
Args:
asset_key (Union[AssetKey, Sequence[str], str]): The key of the asset to load.
python_type (Optional[Type]): The python type to load the asset as. This is what will
be returned inside `load_input` by `context.dagster_type.typing_type`.
partition_key (Optional[str]): The partition of the asset to load.
metadata (Optional[Dict[str, Any]]): Input metadata to pass to the :py:class:`IOManager`
(is equivalent to setting the metadata argument in `In` or `AssetIn`).
Returns:
The contents of an asset as a Python object.
"""
return self.get_repository_def().load_asset_value(
asset_key=asset_key,
python_type=python_type,
instance=instance,
partition_key=partition_key,
metadata=metadata,
)
@public
def get_asset_value_loader(
self, instance: Optional[DagsterInstance] = None
) -> "AssetValueLoader":
"""Returns an object that can load the contents of assets as Python objects.
Invokes `load_input` on the :py:class:`IOManager` associated with the assets. Avoids
spinning up resources separately for each asset.
Usage:
.. code-block:: python
with defs.get_asset_value_loader() as loader:
asset1 = loader.load_asset_value("asset1")
asset2 = loader.load_asset_value("asset2")
"""
return self.get_repository_def().get_asset_value_loader(
instance=instance,
)
def resolve_all_job_defs(self) -> Sequence[JobDefinition]:
"""Get all the Job definitions in the project.
This includes both jobs passed into the Definitions object and any implicit jobs created.
All jobs returned from this function will have all resource dependencies resolved.
"""
return self.get_repository_def().get_all_jobs()
def has_implicit_global_asset_job_def(self) -> bool:
return self.get_repository_def().has_implicit_global_asset_job_def()
def get_implicit_global_asset_job_def(self) -> JobDefinition:
warnings.warn(
"This will be renamed to resolve_implicit_global_asset_job_def in dagster 1.11"
)
return self.get_repository_def().get_implicit_global_asset_job_def()
def resolve_implicit_global_asset_job_def(self) -> JobDefinition:
"""A useful conveninence method when there is a single defined global asset job.
This occurs when all assets in the project use a single partitioning scheme.
If there are multiple partitioning schemes you must use get_implicit_job_def_for_assets
instead to access to the correct implicit asset one.
"""
return self.get_repository_def().get_implicit_global_asset_job_def()
def resolve_implicit_job_def_def_for_assets(
self, asset_keys: Iterable[AssetKey]
) -> Optional[JobDefinition]:
return self.get_repository_def().get_implicit_job_def_for_assets(asset_keys)
def get_assets_def(self, key: CoercibleToAssetKey) -> AssetsDefinition:
key = AssetKey.from_coercible(key)
# Sadly both self.assets and self.asset_checks can contain either AssetsDefinition or AssetChecksDefinition
# objects. We need to check both collections and exclude the AssetChecksDefinition objects.
for asset in [*(self.assets or []), *(self.asset_checks or [])]:
if isinstance(asset, AssetsDefinition) and not isinstance(asset, AssetChecksDefinition):
if key in asset.keys:
return asset
warnings.warn(
f"Could not find assets_def with key {key} directly passed to Definitions. This will be an error starting in 1.11 and will require a call to resolve_assets_def in dagster 1.11."
)
return self.resolve_assets_def(key)
def get_asset_checks_def(self, key: AssetCheckKey) -> AssetChecksDefinition:
for possible_assets_check_def in [*(self.assets or []), *(self.asset_checks or [])]:
if (
isinstance(possible_assets_check_def, AssetChecksDefinition)
and key in possible_assets_check_def.asset_and_check_keys
):
return possible_assets_check_def
raise DagsterInvariantViolationError(f"Could not find asset checks defs for {key}")
def resolve_assets_def(self, key: CoercibleToAssetKey) -> AssetsDefinition:
asset_key = AssetKey.from_coercible(key)
for assets_def in self.resolve_asset_graph().assets_defs:
if asset_key in assets_def.keys:
return assets_def
raise DagsterInvariantViolationError(f"Could not find asset {asset_key}")
@cached_method
def get_repository_def(self) -> RepositoryDefinition:
"""Definitions is implemented by wrapping RepositoryDefinition. Get that underlying object
in order to access any functionality which is not exposed on Definitions.
"""
return _create_repository_using_definitions_args(
name=SINGLETON_REPOSITORY_NAME,
assets=self.assets,
schedules=self.schedules,
sensors=self.sensors,
jobs=self.jobs,
resources=self.resources,
executor=self.executor,
loggers=self.loggers,
asset_checks=self.asset_checks,
metadata=self.metadata,
component_tree=self.component_tree,
)
def resolve_asset_graph(self) -> AssetGraph:
"""Get the AssetGraph for this set of definitions."""
return self.get_repository_def().asset_graph
@public
@staticmethod
def validate_loadable(defs: "Definitions") -> None:
"""Validates that the enclosed definitions will be loadable by Dagster:
- No assets have conflicting keys.
- No jobs, sensors, or schedules have conflicting names.
- All asset jobs can be resolved.
- All resource requirements are satisfied.
- All partition mappings are valid.
Meant to be used in unit tests.
Raises an error if any of the above are not true.
"""
defs.get_repository_def().validate_loadable()
@staticmethod
def merge_unbound_defs(*def_sets: "Definitions") -> "Definitions":
"""Merges multiple Definitions objects into a single Definitions object.
Asserts that input Definitions objects have not yet had their asset graphs resolved,
intended for internal use-cases to safeguard against unnecessarily resolving subgraphs.
"""
for i, def_set in enumerate(def_sets):
check.invariant(
not def_set.has_resolved_repository_def(),
f"Definitions object {i} has previously been resolved."
" merge_unbound_defs should only be used on definitions that have not been resolved.",
)
return Definitions.merge(*def_sets)
@public
@staticmethod
def merge(*def_sets: "Definitions") -> "Definitions":
"""Merges multiple Definitions objects into a single Definitions object.
The returned Definitions object has the union of all the definitions in the input
Definitions objects.
Raises an error if the Definitions objects to be merged contain conflicting values for the
same resource key or logger key, or if they have different executors defined.
Examples:
.. code-block:: python
import submodule1
import submodule2
defs = Definitions.merge(submodule1.defs, submodule2.defs)
Returns:
Definitions: The merged definitions.
"""
check.sequence_param(def_sets, "def_sets", of_type=Definitions)
assets = []
schedules = []
sensors = []
jobs = []
asset_checks = []
metadata = {}
component_tree = None
resources = {}
resource_key_indexes: dict[str, int] = {}
loggers = {}
logger_key_indexes: dict[str, int] = {}
executor = None
executor_index: Optional[int] = None
for i, def_set in enumerate(def_sets):
assets.extend(def_set.assets or [])
asset_checks.extend(def_set.asset_checks or [])
schedules.extend(def_set.schedules or [])
sensors.extend(def_set.sensors or [])
jobs.extend(def_set.jobs or [])
metadata.update(def_set.metadata)
for resource_key, resource_value in (def_set.resources or {}).items():
if resource_key in resources and resources[resource_key] is not resource_value:
raise DagsterInvariantViolationError(
f"Definitions objects {resource_key_indexes[resource_key]} and {i} have "
f"different resources with same key '{resource_key}'"
)
resources[resource_key] = resource_value
resource_key_indexes[resource_key] = i
for logger_key, logger_value in (def_set.loggers or {}).items():
if logger_key in loggers and loggers[logger_key] is not logger_value:
raise DagsterInvariantViolationError(
f"Definitions objects {logger_key_indexes[logger_key]} and {i} have "
f"different loggers with same key '{logger_key}'"
)
loggers[logger_key] = logger_value
logger_key_indexes[logger_key] = i
if def_set.executor is not None:
if executor is not None and executor is not def_set.executor:
raise DagsterInvariantViolationError(
f"Definitions objects {executor_index} and {i} both have an executor"
)
executor = def_set.executor
executor_index = i
if def_set.component_tree:
if component_tree is not None:
raise DagsterInvariantViolationError(
"Can not merge Definitions that both contain component_tree."
)
component_tree = def_set.component_tree
return Definitions(
assets=assets,
schedules=schedules,
sensors=sensors,
jobs=jobs,
resources=resources,
executor=executor,
loggers=loggers,
asset_checks=asset_checks,
metadata=metadata,
component_tree=component_tree,
)
@public
@deprecated(
breaking_version="1.11",
additional_warn_text="Use resolve_all_asset_specs instead",
subject="get_all_asset_specs",
)
def get_all_asset_specs(self) -> Sequence[AssetSpec]:
"""Returns an AssetSpec object for AssetsDefinitions and AssetSpec passed directly to the Definitions object."""
return self.resolve_all_asset_specs()
@public
def resolve_all_asset_specs(self) -> Sequence[AssetSpec]:
"""Returns an AssetSpec object for every asset contained inside the resolved Definitions object."""
asset_graph = self.resolve_asset_graph()
return [asset_node.to_asset_spec() for asset_node in asset_graph.asset_nodes]
@public
def resolve_all_asset_keys(self) -> Sequence[AssetKey]:
"""Returns an AssetKey object for every asset contained inside the resolved Definitions object."""
return [spec.key for spec in self.resolve_all_asset_specs()]
@preview
def with_reconstruction_metadata(self, reconstruction_metadata: Mapping[str, str]) -> Self:
"""Add reconstruction metadata to the Definitions object. This is typically used to cache data
loaded from some external API that is computed during initialization of a code server.
The cached data is then made available on the DefinitionsLoadContext during
reconstruction of the same project context (such as a run worker), allowing use of the
cached data to avoid additional external API queries. Values are expected to be serialized
in advance and must be strings.
"""
check.mapping_param(reconstruction_metadata, "reconstruction_metadata", key_type=str)
for k, v in reconstruction_metadata.items():
if not isinstance(v, str):
raise DagsterInvariantViolationError(
f"Reconstruction metadata values must be strings. State-representing values are"
f" expected to be serialized before being passed as reconstruction metadata."
f" Got for key {k}:\n\n{v}"
)
normalized_metadata = {
k: CodeLocationReconstructionMetadataValue(v)
for k, v in reconstruction_metadata.items()
}
return copy(
self,
metadata={
**(self.metadata or {}),
**normalized_metadata,
},
)
@public
@preview
def map_asset_specs(
self,
*,
func: Callable[[AssetSpec], AssetSpec],
selection: Optional[CoercibleToAssetSelection] = None,
) -> "Definitions":
"""Map a function over the included AssetSpecs or AssetsDefinitions in this Definitions object, replacing specs in the sequence
or specs in an AssetsDefinitions with the result of the function.
Args:
func (Callable[[AssetSpec], AssetSpec]): The function to apply to each AssetSpec.
selection (Optional[Union[str, Sequence[str], Sequence[AssetKey], Sequence[Union[AssetsDefinition, SourceAsset]], AssetSelection]]): An asset selection to narrow down the set of assets to apply the function to. If not provided, applies to all assets.
Returns:
Definitions: A Definitions object where the AssetSpecs have been replaced with the result of the function where the selection applies.
Examples:
.. code-block:: python
import dagster as dg
my_spec = dg.AssetSpec("asset1")
@dg.asset
def asset1(_): ...
@dg.asset
def asset2(_): ...
defs = Definitions(
assets=[asset1, asset2]
)
# Applies to asset1 and asset2
mapped_defs = defs.map_asset_specs(
func=lambda s: s.merge_attributes(metadata={"new_key": "new_value"}),
)
"""
check.invariant(
selection is None,
"The selection parameter is no longer supported for map_asset_specs, Please use map_resolved_asset_specs instead",
)
return self.map_resolved_asset_specs(func=func, selection=None)
@public
@preview
def map_resolved_asset_specs(
self,
*,
func: Callable[[AssetSpec], AssetSpec],
selection: Optional[CoercibleToAssetSelection] = None,
) -> "Definitions":
"""Map a function over the included AssetSpecs or AssetsDefinitions in this Definitions object, replacing specs in the sequence.
See map_asset_specs for more details.
Supports selection and therefore requires resolving the Definitions object to a RepositoryDefinition when there is a selection.
Examples:
.. code-block:: python
import dagster as dg
my_spec = dg.AssetSpec("asset1")
@dg.asset
def asset1(_): ...
@dg.asset
def asset2(_): ...
# Applies only to asset1
mapped_defs = defs.map_resolved_asset_specs(
func=lambda s: s.replace_attributes(metadata={"new_key": "new_value"}),
selection="asset1",
)
"""
non_spec_asset_types = {
type(d) for d in self.assets or [] if not isinstance(d, (AssetsDefinition, AssetSpec))
}
if non_spec_asset_types:
raise DagsterInvariantViolationError(
"Can only map over AssetSpec or AssetsDefinition objects. "
"Received objects of types: "
f"{non_spec_asset_types}."
)
return self.permissive_map_resolved_asset_specs(
func=func,
selection=selection,
)
def permissive_map_resolved_asset_specs(
self,
func: Callable[[AssetSpec], AssetSpec],
selection: Optional[CoercibleToAssetSelection],
) -> "Definitions":
"""This is a permissive version of map_resolved_asset_specs that allows for non-spec asset types, i.e. SourceAssets and CacheableAssetsDefinitions."""
target_keys = None
if selection:
if isinstance(selection, str):
selection = AssetSelection.from_string(selection, include_sources=True)
else:
selection = AssetSelection.from_coercible(selection)
target_keys = selection.resolve(self.resolve_asset_graph())
mappable = iter(
d for d in self.assets or [] if isinstance(d, (AssetsDefinition, AssetSpec))
)
mapped_assets = map_asset_specs(
lambda spec: func(spec) if (target_keys is None or spec.key in target_keys) else spec,
mappable,
)
assets = [
*mapped_assets,
*[d for d in self.assets or [] if not isinstance(d, (AssetsDefinition, AssetSpec))],
]
return replace(self, assets=assets)
def with_resources(self, resources: Optional[Mapping[str, Any]]) -> "Definitions":
return Definitions.merge(self, Definitions(resources=resources)) if resources else self
def has_resolved_repository_def(self) -> bool:
return len(get_cached_method_cache(self, self.get_repository_def.__name__)) > 0
def with_definition_metadata_update(
self, update: Callable[[RawMetadataMapping], RawMetadataMapping]
):
"""Run a provided update function on every contained definition that supports it
to updated its metadata. Return a new Definitions object containing the updated objects.
"""
updated_jobs = _update_jobs_metadata(self.jobs, update)
updated_schedules = _update_schedules_metadata(self.schedules, update, updated_jobs)
updated_sensors = _update_sensors_metadata(self.sensors, update, updated_jobs)
updated_assets = _update_assets_metadata(self.assets, update)
updated_asset_checks = _update_checks_metadata(self.asset_checks, update)
return replace(
self,
jobs=updated_jobs.values(),
schedules=updated_schedules,
sensors=updated_sensors,
assets=updated_assets,
asset_checks=updated_asset_checks,
)
def _update_assets_metadata(
assets: TAssets,
update: Callable[[RawMetadataMapping], RawMetadataMapping],
) -> TAssets:
if not assets:
return assets
updated_assets = []
for asset in assets:
if isinstance(asset, AssetsDefinition):
updated_assets.append(_update_assets_def_metadata(asset, update))
elif isinstance(asset, AssetSpec):
updated_assets.append(asset.replace_attributes(metadata=update(asset.metadata)))
elif isinstance(asset, (SourceAsset, CacheableAssetsDefinition)):
# these types are deprecated and do not support metadata updates, ignore
updated_assets.append(asset)
else:
check.assert_never(asset)
return updated_assets
def _update_schedules_metadata(
schedules: TSchedules,
update: Callable[[RawMetadataMapping], RawMetadataMapping],
updated_jobs: Mapping[int, TJob],
) -> TSchedules:
if not schedules:
return schedules
updated_schedules = []
for schedule in schedules:
if isinstance(schedule, ScheduleDefinition):
# updated schedule
new_attrs: dict[str, Any] = {"metadata": update(schedule.metadata)}
if schedule.has_job:
# use the already updated job if possible to ensure obj equality
if id(schedule.job) in updated_jobs:
new_attrs["job"] = updated_jobs[id(schedule.job)]
else: # otherwise, update the job metadata too
new_attrs["job"] = schedule.job.with_metadata(
update(schedule.job.metadata or {})
)
updated_schedules.append(schedule.with_attributes(**new_attrs))
elif isinstance(schedule, UnresolvedPartitionedAssetScheduleDefinition):
updated_schedules.append(schedule.with_metadata(update(schedule.metadata or {})))
else:
check.assert_never(schedule)
return updated_schedules
def _update_sensors_metadata(
sensors: TSensors,
update: Callable[[RawMetadataMapping], RawMetadataMapping],
updated_jobs: Mapping[int, TJob],
) -> TSensors:
if not sensors:
return sensors
updated_sensors = []
for sensor in sensors:
if isinstance(sensor, SensorDefinition):
new_attrs: dict[str, Any] = {"metadata": update(sensor.metadata)}
if sensor.has_jobs:
new_sensor_jobs = []
for sensor_job in sensor.jobs:
if isinstance(sensor_job, (JobDefinition, UnresolvedAssetJobDefinition)):
# use the already updated job if possible to ensure obj equality
if id(sensor_job) in updated_jobs:
new_sensor_jobs.append(updated_jobs[id(sensor_job)])
else: # otherwise, update the job metadata too
new_sensor_jobs.append(
sensor_job.with_metadata(update(sensor_job.metadata or {}))
)
else:
new_sensor_jobs.append(sensor_job) # other types are not updated
new_attrs["jobs"] = new_sensor_jobs
updated_sensors.append(sensor.with_attributes(**new_attrs))
else:
check.assert_never(sensor)
return updated_sensors
def _update_jobs_metadata(
jobs: TJobs,
update: Callable[[RawMetadataMapping], RawMetadataMapping],
) -> Mapping[int, TJob]:
if not jobs:
return {}
updated_jobs = {}
for job in jobs:
if isinstance(job, (JobDefinition, UnresolvedAssetJobDefinition)):
updated_jobs[id(job)] = job.with_metadata(update(job.metadata or {}))
else:
check.assert_never(job)
return updated_jobs
def _update_checks_metadata(
asset_checks: TAssetChecks,
update: Callable[[RawMetadataMapping], RawMetadataMapping],
) -> TAssetChecks:
if not asset_checks:
return asset_checks
updated_checks = []
for asset_check in asset_checks:
if isinstance(asset_check, AssetsDefinition):
updated_checks.append(_update_assets_def_metadata(asset_check, update))
else:
check.assert_never(asset_check)
return updated_checks
def _update_assets_def_metadata(
assets_def: AssetsDefinition,
update: Callable[[RawMetadataMapping], RawMetadataMapping],
) -> AssetsDefinition:
return assets_def.with_attributes(
metadata_by_key={
**{key: update(metadata) for key, metadata in assets_def.metadata_by_key.items()},
**{c.key: update(c.metadata) for c in assets_def.check_specs},
}
)
| Definitions |
python | encode__django-rest-framework | tests/test_fields.py | {
"start": 26723,
"end": 29532
} | class ____(FieldValues):
"""
Valid and invalid values for `CharField`.
"""
valid_inputs = {
1: '1',
'abc': 'abc'
}
invalid_inputs = {
(): ['Not a valid string.'],
True: ['Not a valid string.'],
'': ['This field may not be blank.']
}
outputs = {
1: '1',
'abc': 'abc'
}
field = serializers.CharField()
def test_trim_whitespace_default(self):
field = serializers.CharField()
assert field.to_internal_value(' abc ') == 'abc'
def test_trim_whitespace_disabled(self):
field = serializers.CharField(trim_whitespace=False)
assert field.to_internal_value(' abc ') == ' abc '
def test_disallow_blank_with_trim_whitespace(self):
field = serializers.CharField(allow_blank=False, trim_whitespace=True)
with pytest.raises(serializers.ValidationError) as exc_info:
field.run_validation(' ')
assert exc_info.value.detail == ['This field may not be blank.']
def test_null_bytes(self):
field = serializers.CharField()
for value in ('\0', 'foo\0', '\0foo', 'foo\0foo'):
with pytest.raises(serializers.ValidationError) as exc_info:
field.run_validation(value)
assert exc_info.value.detail == [
'Null characters are not allowed.'
]
def test_surrogate_characters(self):
field = serializers.CharField()
for code_point, expected_message in (
(0xD800, 'Surrogate characters are not allowed: U+D800.'),
(0xDFFF, 'Surrogate characters are not allowed: U+DFFF.'),
):
with pytest.raises(serializers.ValidationError) as exc_info:
field.run_validation(chr(code_point))
assert exc_info.value.detail[0].code == 'surrogate_characters_not_allowed'
assert str(exc_info.value.detail[0]) == expected_message
for code_point in (0xD800 - 1, 0xDFFF + 1):
field.run_validation(chr(code_point))
def test_iterable_validators(self):
"""
Ensure `validators` parameter is compatible with reasonable iterables.
"""
value = 'example'
for validators in ([], (), set()):
field = serializers.CharField(validators=validators)
field.run_validation(value)
def raise_exception(value):
raise exceptions.ValidationError('Raised error')
for validators in ([raise_exception], (raise_exception,), {raise_exception}):
field = serializers.CharField(validators=validators)
with pytest.raises(serializers.ValidationError) as exc_info:
field.run_validation(value)
assert exc_info.value.detail == ['Raised error']
| TestCharField |
python | ray-project__ray | python/ray/train/_internal/backend_executor.py | {
"start": 2375,
"end": 29694
} | class ____:
"""Main execution class for training backends.
This class holds a worker group and is responsible for executing the
training function on the workers, and collecting intermediate results
from ``session.report()``.
Args:
backend_config: The configurations for this
specific backend.
num_workers: Number of workers to use for training.
resources_per_worker (Optional[Dict[str, float]]):
Dictionary specifying the resources that will be
requested for each worker. Defaults to {"CPU": 1}.
max_retries: Number of retries when Ray actors fail.
Defaults to 3. Set to -1 for unlimited retries.
"""
def __init__(
self,
backend_config: BackendConfig,
# TODO(xwjiang): Legacy Ray Train trainer clean up!
trial_info: Optional[TrialInfo] = None,
num_workers: int = 1,
resources_per_worker: Optional[Dict[str, float]] = None,
max_retries: int = 3,
):
if resources_per_worker is None:
self._resources_per_worker = {"CPU": 1}
else:
self._resources_per_worker = resources_per_worker.copy()
self._backend_config = backend_config
self._backend = backend_config.backend_cls()
self._num_workers = num_workers
self._max_failures = max_retries
if self._max_failures < 0:
self._max_failures = float("inf")
self._num_failures = 0
self._last_failure = None
self._initialization_hook = None
self._placement_group = None
self._trial_info = trial_info
self.worker_group = InactiveWorkerGroup()
self.dataset_shards = None
self._resource_configs = [
ResourceConfig(
ray_constants.NEURON_CORES,
ENABLE_SHARE_NEURON_CORES_ACCELERATOR_ENV,
NEURON_RT_VISIBLE_CORES_ENV_VAR,
),
ResourceConfig(
ray_constants.NPU,
ENABLE_SHARE_NPU_RT_VISIBLE_DEVICES_ENV,
ASCEND_RT_VISIBLE_DEVICES_ENV_VAR,
),
# For AMD GPUs, they are using HIP_VISIBLE_DEVICES env var.
ResourceConfig(
ray_constants.GPU,
ENABLE_SHARE_HIP_VISIBLE_DEVICES_ENV,
HIP_VISIBLE_DEVICES_ENV_VAR,
),
]
# Record the initialization time of BackendExecutor, which is
# after trainer.fit() and before worker_group executes the training function.
self._start_time_ms = int(time.time() * 1000)
self.state_tracking_enabled = env_integer(RAY_TRAIN_ENABLE_STATE_TRACKING, 0)
def start(
self,
initialization_hook: Optional[Callable[[], None]] = None,
train_cls: Optional[Type] = None,
train_cls_args: Optional[Tuple] = None,
train_cls_kwargs: Optional[Dict] = None,
):
"""Starts the worker group."""
self._create_placement_group()
placement_group = self._placement_group or "default"
self.worker_group = WorkerGroup(
num_workers=self._num_workers,
resources_per_worker=self._resources_per_worker,
actor_cls=train_cls,
actor_cls_args=train_cls_args,
actor_cls_kwargs=train_cls_kwargs,
placement_group=placement_group,
)
# Hack to avoid OOMs.
# This is just a temporary solution for Train loading entire checkpoints
# into memory by ensuring that the rank 0 worker is on the same node as
# trainable, thus allowing for lazy checkpoint transfer to be used.
# See https://github.com/ray-project/ray/issues/33073
# for more context.
# TODO remove passing in trial_driver_ip.
trial_driver_node_id = (
self._trial_info.driver_node_id if self._trial_info else None
)
self.worker_group.sort_workers_by_node_id_and_gpu_id(trial_driver_node_id)
try:
if initialization_hook:
self._initialization_hook = initialization_hook
self.worker_group.execute(initialization_hook)
# Always propagate the driver's DataContext to each worker in the group.
from ray.data import DataContext
def _set_driver_dataset_context(ctx: DataContext):
DataContext._set_current(ctx)
self.worker_group.execute(
_set_driver_dataset_context,
DataContext.get_current(),
)
share_cuda_visible_devices_enabled = bool(
env_integer(
ENABLE_SHARE_CUDA_VISIBLE_DEVICES_ENV,
self._backend.share_cuda_visible_devices,
)
)
if (
self._resources_per_worker.get("GPU", 0) > 0
and share_cuda_visible_devices_enabled
):
self._share_cuda_visible_devices()
for resource_config in self._resource_configs:
if self._is_share_resources_enabled(
resource_config.resource_name,
resource_config.resource_enable_sharing_env_var,
):
self._share_resource_ids(
resource_config.resource_name,
resource_config.share_resource_ids_env_var,
)
self._backend.on_start(self.worker_group, self._backend_config)
except RayActorError as exc:
logger.exception(str(exc))
logger.warning(
"Failure occurred during startup. Restarting all workers and "
"attempting to startup again."
)
self._increment_failures()
self._restart()
if self.state_tracking_enabled:
from ray.train._internal.state import TrainRunStateManager
from ray.train._internal.state.state_actor import get_state_actor
self.state_manager = TrainRunStateManager(state_actor=get_state_actor())
def _create_placement_group(self):
"""Creates a placement group if it does not exist.
If a placement group is already detected (Tune) this will be a no-op.
By default the placement group will be created with PACK strategy.
This is optimized for colocating GPUs on a minimal number of nodes.
This behavior can be overridden to use the SPREAD strategy by defining
``TRAIN_ENABLE_WORKER_SPREAD_ENV``
If a placement group is created it will be stored as
self._placement_group.
"""
current_placement_group = get_current_placement_group()
worker = ray._private.worker.global_worker
should_capture_child_tasks_in_placement_group = (
worker.should_capture_child_tasks_in_placement_group
)
should_create_placement_group = (
current_placement_group is None
or not should_capture_child_tasks_in_placement_group
)
if should_create_placement_group:
bundles = [
self._resources_per_worker.copy() for _ in range(self._num_workers)
]
use_spread = bool(env_integer(TRAIN_ENABLE_WORKER_SPREAD_ENV, 0))
strategy = "SPREAD" if use_spread else "PACK"
placement_group = ray.util.placement_group(bundles, strategy=strategy)
logger.debug("Waiting for placement group to start.")
timeout = env_integer(TRAIN_PLACEMENT_GROUP_TIMEOUT_S_ENV, 100)
ready, _ = ray.wait([placement_group.ready()], timeout=timeout)
if ready:
logger.debug("Placement group has started.")
else:
raise TimeoutError(
"Placement group creation timed out. Make sure your "
"cluster either has enough resources or use an "
"autoscaling cluster. If you are running on a cluster, "
"make sure you specify an address in `ray.init()`, for example, "
'`ray.init("auto")`. You can also increase the timeout by setting '
"the TRAIN_PLACEMENT_GROUP_TIMEOUT_S environment variable. "
"Current resources available: {}, resources requested by the "
"placement group: {}".format(
ray.available_resources(), placement_group.bundle_specs
)
)
self._placement_group = placement_group
def _share_cuda_visible_devices(self):
"""Sets CUDA_VISIBLE_DEVICES on all workers.
For each worker, CUDA_VISIBLE_DEVICES will be set to the GPU IDs
visible to all workers on that worker's node.
This allows GPU workers on the same node to communicate with one
another.
Example:
Setup:
- Node1:
- Worker1: {0, 1}
- Worker2: {2, 3}
- Node2:
- Worker3: {0, 1}
CUDA_VISIBLE_DEVICES:
- Worker1: "0,1,2,3"
- Worker2: "0,1,2,3"
- Worker3: "0,1"
"""
self._share_resource_ids(ray_constants.GPU, CUDA_VISIBLE_DEVICES_ENV_VAR)
def _share_resource_ids(self, resource: str, env_var: str):
"""Sets the given env_var on all workers.
For each worker, the cores/devices are visible to all the
workers on that worker's node.This allows workers on the
same node to communicate with one another.
Example:
Setup:
- Node1:
- Worker1: {0, 1}
- Worker2: {2, 3}
- Node2:
- Worker3: {0, 1}
NEURON_RT_VISIBLE_CORES/TPU_VISIBLE_CHIPS/...:
- Worker1: "0,1,2,3"
- Worker2: "0,1,2,3"
- Worker2: "0,1"
Args:
resource: The name of the resource/accelerator.
env_var: The name of the environment variable to set.
"""
node_ids_and_resource_ids = [
(
w.metadata.node_id,
w.metadata.resource_ids[resource],
)
for w in self.worker_group.workers
]
node_id_to_worker_id = defaultdict(set)
node_id_to_resource_ids = defaultdict(set)
for worker_id, (node_id, resource_ids) in enumerate(node_ids_and_resource_ids):
node_id_to_worker_id[node_id].add(worker_id)
node_id_to_resource_ids[node_id].update(resource_ids)
futures = []
for node_id, resource_ids in node_id_to_resource_ids.items():
resource_ids = sorted(resource_ids)
all_resource_ids = ",".join(resource_ids)
def set_resource_ids():
os.environ[env_var] = all_resource_ids
for worker_id in node_id_to_worker_id[node_id]:
futures.append(
self.worker_group.execute_single_async(worker_id, set_resource_ids)
)
ray.get(futures)
def _is_share_resources_enabled(self, resource_name: str, enable_sharing_env: str):
"""Whether to share resource IDs on all workers
based on enable_sharing_env.
This will return true if resources are requested and greater than 0.
Also, user can disable by configuring the `enable_sharing_env` to "0".
Args:
resource_name: The name of the resource/accelerator.
enable_sharing_env: The name of the environment variable
to check.
"""
has_resource_requested = self._resources_per_worker.get(resource_name, 0) > 0
return has_resource_requested and ray_constants.env_bool(
enable_sharing_env, True
)
def _create_rank_world_size_mappings(self) -> List[Dict]:
"""Create rank and world size mappings for workers.
There are three maps returned:
- local_rank_map, which maps from worker world_rank to local_rank.
- local_world_size_map, which maps from world_rank to local_world_size
- node_rank_map, which maps from world rank to node rank
Example:
Worker 0: node 0
Worker 1: node 0
Worker 2: node 1
Worker 3: node 0
Worker 4: node 1
Workers 0, 1, 3 are on node 0.
Workers 2, 4 are on node 1.
Expected local_rank_map:
{
0 -> 0,
1 -> 1,
2 -> 0,
3 -> 2,
4 -> 1
}
Expected local_world_size_map:
{
0 -> 3,
1 -> 3,
2 -> 2,
3 -> 3,
4 -> 2
}
Expected node_rank_map:
{
0 -> 0,
1 -> 0,
2 -> 1,
3 -> 0,
4 -> 1
}
"""
local_rank_map = {} # map from world rank to local rank
local_world_size_map = {} # map from world rank to local world size
node_rank_map = {} # map from world rank to node rank
node_ids = {} # map from node id to node index
node_cnt = 0 # count the number of nodes
node_id_dict = defaultdict(
int
) # map from node id to the number of workers on it.
for world_rank in range(len(self.worker_group)):
worker = self.worker_group.workers[world_rank]
node_id = worker.metadata.node_id
local_rank_map[world_rank] = node_id_dict[node_id]
node_id_dict[node_id] += 1
if node_id not in node_ids:
node_ids[node_id] = node_cnt
node_cnt += 1
node_rank_map[world_rank] = node_ids[node_id]
for world_rank in range(len(self.worker_group)):
worker = self.worker_group.workers[world_rank]
node_id = worker.metadata.node_id
local_world_size_map[world_rank] = node_id_dict[node_id]
workers_info = "\n".join(
[
f"- (node_id={w.metadata.node_id}, ip={w.metadata.node_ip}, "
f"pid={w.metadata.pid}) world_rank={i}, "
f"local_rank={local_rank_map[i]}, node_rank={node_rank_map[i]}"
for i, w in enumerate(self.worker_group.workers)
]
)
logger.info(f"Started distributed worker processes: \n{workers_info}")
return local_rank_map, local_world_size_map, node_rank_map
def start_training(
self,
train_func: Callable[[], T],
datasets: Dict[str, Dataset],
metadata: Dict[str, Any],
data_config: DataConfig,
storage: StorageContext,
checkpoint: Optional[Checkpoint] = None,
) -> None:
"""Executes a training function on all workers in a separate thread.
``finish_training`` should be called after this.
Args:
train_func: The training function to run on each worker.
datasets: The base datasets.
data_config: The config object for creating dataset shards for workers.
checkpoint: The checkpoint data that
should be loaded onto each worker and accessed by the
training function via ``session.get_checkpoint()``. If this
is ``None`` then no checkpoint will be loaded.
"""
use_detailed_autofilled_metrics = env_integer(
ENABLE_DETAILED_AUTOFILLED_METRICS_ENV, 0
)
# First initialize the session.
def initialize_session(
train_func,
world_rank,
local_rank,
node_rank,
local_world_size,
world_size,
trial_info,
checkpoint,
dataset_shard,
metadata,
storage,
):
try:
init_session(
training_func=train_func,
world_rank=world_rank,
local_rank=local_rank,
node_rank=node_rank,
local_world_size=local_world_size,
world_size=world_size,
trial_info=trial_info,
dataset_shard=dataset_shard,
metadata=metadata,
checkpoint=checkpoint,
detailed_autofilled_metrics=use_detailed_autofilled_metrics,
storage=storage,
)
except ValueError:
raise TrainBackendError(
"Attempting to start training but a "
"previous training run is still ongoing. "
"You must call `finish_training` before "
"calling `start_training` again."
)
if self.dataset_shards is None:
actors = [worker.actor for worker in self.worker_group.workers]
node_ids = [worker.metadata.node_id for worker in self.worker_group.workers]
self.dataset_shards = data_config.configure(
datasets,
world_size=len(self.worker_group),
worker_handles=actors,
worker_node_ids=node_ids,
)
(
local_rank_map,
local_world_size_map,
node_rank_map,
) = self._create_rank_world_size_mappings()
futures = []
for index in range(len(self.worker_group)):
futures.append(
self.worker_group.execute_single_async(
index,
initialize_session,
world_rank=index,
local_rank=local_rank_map[index],
node_rank=node_rank_map[index],
local_world_size=local_world_size_map[index],
world_size=len(self.worker_group),
trial_info=self._trial_info,
train_func=train_func,
dataset_shard=self.dataset_shards[index],
metadata=metadata,
checkpoint=checkpoint,
storage=storage,
)
)
self._backend.on_training_start(self.worker_group, self._backend_config)
self.get_with_failure_handling(futures)
# Register Train Run before training starts
if self.state_tracking_enabled:
from ray.train._internal.state.schema import RunStatusEnum
core_context = ray.runtime_context.get_runtime_context()
self.state_manager.register_train_run(
run_id=self._trial_info.run_id,
run_name=self._trial_info.experiment_name,
job_id=core_context.get_job_id(),
controller_actor_id=core_context.get_actor_id(),
datasets=datasets,
worker_group=self.worker_group,
start_time_ms=self._start_time_ms,
run_status=RunStatusEnum.RUNNING,
resources=[self._resources_per_worker] * self._num_workers,
)
# Run the training function asynchronously in its own thread.
def train_async():
session = get_session()
session.start()
self.worker_group.execute_async(train_async)
def get_next_results(self) -> Optional[List[_TrainingResult]]:
"""Fetches the next ``_TrainingResult`` from each worker.
Each ``_TrainingResult`` is expected to correspond to the same step from
each worker (e.g. the same call to ``train.report()``).
Returns:
A list of ``_TrainingResult``s or ``None`` if there are no more results
since the training function has exited on all workers.
"""
def get_next():
session = _get_session("get_next_results")
try:
result = session.get_next()
except RuntimeError:
# Training thread has not been started yet.
raise TrainBackendError(
"`get_next_results` has been called "
"before `start_training`. Please call "
"`start_training` before "
"`get_next_results`."
)
return result
# Get next result from each worker.
futures = self.worker_group.execute_async(get_next)
results = self.get_with_failure_handling(futures)
# Check if any worker returned None.
if any(r is None for r in results):
# Either all workers have results or none of them do.
if not all(r is None for r in results):
raise RuntimeError(
"Some workers returned results while "
"others didn't. Make sure that "
"`session.report()` are called the "
"same number of times on all workers."
)
else:
# Return None if all results are None.
return None
return results
def pause_reporting(self):
"""Disable workers from enqueuing results from ``session.report()``.
Note: Already reported results may still be enqueued at this point,
and should be handled appropriately.
"""
def pause_session_reporting():
session = _get_session("pause_reporting")
return session.pause_reporting()
futures = self.worker_group.execute_async(pause_session_reporting)
self.get_with_failure_handling(futures)
def finish_training(self):
"""Finish training and return final results. Propagate any exceptions.
Blocks until training is finished on all workers.
Assumes `start_training` has already been called.
Returns:
A list of return values from calling ``train_func`` on each worker.
Each item corresponds to the return value from a single worker.
"""
def end_training():
session = _get_session("finish_training")
try:
# session.finish raises any Exceptions from training.
output = session.finish()
finally:
# Shutdown session even if session.finish() raises an
# Exception.
shutdown_session()
return output
futures = self.worker_group.execute_async(end_training)
results = self.get_with_failure_handling(futures)
return results
def report_final_run_status(
self,
errored: bool = False,
failed_rank: Optional[int] = None,
stack_trace: Optional[str] = None,
):
"""Report the final train run status, error, and end time to TrainStateActor."""
if self.state_tracking_enabled:
from ray.train._internal.state.schema import (
MAX_ERROR_STACK_TRACE_LENGTH,
RunStatusEnum,
)
if errored:
run_status = RunStatusEnum.ERRORED
status_detail = ""
if failed_rank is not None:
status_detail += f"Rank {failed_rank} worker raised an error. \n"
if stack_trace is not None:
# Keep only the last part of the stack trace if it's too long.
status_detail += stack_trace[-MAX_ERROR_STACK_TRACE_LENGTH:]
else:
run_status = RunStatusEnum.FINISHED
status_detail = ""
self.state_manager.end_train_run(
run_id=self._trial_info.run_id,
run_status=run_status,
status_detail=status_detail,
end_time_ms=int(time.time() * 1000),
)
def get_with_failure_handling(self, remote_values):
"""Gets the remote values while handling for worker failures.
This method should be called instead of ``ray.get()`` directly in
order to handle worker failures.
If a worker failure is identified, backend specific failure handling
is executed and a ``TrainingWorkerError`` is raised.
Args:
remote_values: List of object refs representing functions
that may fail in the middle of execution. For example, running
a Train training loop in multiple parallel actor calls.
Returns:
The resolved objects represented by the passed in ObjectRefs.
"""
success, exception = check_for_failure(remote_values)
if success:
return ray.get(remote_values)
else:
self._last_failure = exception
self._increment_failures()
logger.warning(
"Failure identified during training. Restarting all workers and "
"continuing training from latest checkpoint."
)
self._restart()
raise TrainingWorkerError
def shutdown(self, graceful_termination: bool = True):
"""Shuts down the workers in the worker group.
Args:
graceful_termination: If set to True, attempt to clean up the backend
before terminating the Ray actors.
"""
if graceful_termination:
try:
self._backend.on_shutdown(self.worker_group, self._backend_config)
except RayActorError:
logger.warning(
"Graceful shutdown of backend failed. This is "
"expected if one of the workers has crashed."
)
if graceful_termination:
self.worker_group.shutdown()
else:
self.worker_group.shutdown(patience_s=0)
self.worker_group = InactiveWorkerGroup()
if self._placement_group:
remove_placement_group(self._placement_group)
self._placement_group = None
self.dataset_shards = None
def is_started(self):
return not isinstance(self.worker_group, InactiveWorkerGroup)
def _restart(self):
self.worker_group.shutdown()
if self._initialization_hook is not None:
initialization_hook = self._initialization_hook
else:
initialization_hook = None
if self._placement_group:
remove_placement_group(self._placement_group)
self._placement_group = None
self.start(initialization_hook=initialization_hook)
def _increment_failures(self):
self._num_failures += 1
if self._num_failures >= self._max_failures:
failure = self._last_failure
self._last_failure = None
if self._max_failures > 0:
exc = RuntimeError(
f"Training has failed after {self._num_failures} attempts."
)
raise exc.with_traceback(None) from failure
else:
raise failure
def get_worker_group(self):
return self.worker_group
def _get_num_failures(self):
return self._num_failures
| BackendExecutor |
python | allegroai__clearml | clearml/backend_api/services/v2_13/workers.py | {
"start": 28049,
"end": 30941
} | class ____(NonStrictDataModel):
"""
:param id: ID
:type id: str
:param name: Name
:type name: str
:param next_task: Next task in the queue
:type next_task: IdNameEntry
:param num_tasks: Number of task entries in the queue
:type num_tasks: int
"""
_schema = {
"properties": {
"id": {"description": "ID", "type": ["string", "null"]},
"name": {"description": "Name", "type": ["string", "null"]},
"next_task": {
"description": "Next task in the queue",
"oneOf": [{"$ref": "#/definitions/id_name_entry"}, {"type": "null"}],
},
"num_tasks": {
"description": "Number of task entries in the queue",
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(
self,
id: Optional[str] = None,
name: Optional[str] = None,
next_task: Any = None,
num_tasks: Optional[int] = None,
**kwargs: Any
) -> None:
super(QueueEntry, self).__init__(**kwargs)
self.id = id
self.name = name
self.next_task = next_task
self.num_tasks = num_tasks
@schema_property("id")
def id(self) -> Optional[str]:
return self._property_id
@id.setter
def id(self, value: Optional[str]) -> None:
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", six.string_types)
self._property_id = value
@schema_property("name")
def name(self) -> Optional[str]:
return self._property_name
@name.setter
def name(self, value: Optional[str]) -> None:
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("next_task")
def next_task(self) -> Any:
return self._property_next_task
@next_task.setter
def next_task(self, value: Any) -> None:
if value is None:
self._property_next_task = None
return
if isinstance(value, dict):
value = IdNameEntry.from_dict(value)
else:
self.assert_isinstance(value, "next_task", IdNameEntry)
self._property_next_task = value
@schema_property("num_tasks")
def num_tasks(self) -> Optional[int]:
return self._property_num_tasks
@num_tasks.setter
def num_tasks(self, value: Optional[int]) -> None:
if value is None:
self._property_num_tasks = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "num_tasks", six.integer_types)
self._property_num_tasks = value
| QueueEntry |
python | kamyu104__LeetCode-Solutions | Python/linked-list-in-binary-tree.py | {
"start": 1603,
"end": 2280
} | class ____(object):
def isSubPath(self, head, root):
"""
:type head: ListNode
:type root: TreeNode
:rtype: bool
"""
def dfs(head, root):
if not head:
return True
if not root:
return False
return root.val == head.val and \
(dfs(head.next, root.left) or
dfs(head.next, root.right))
if not head:
return True
if not root:
return False
return dfs(head, root) or \
self.isSubPath(head, root.left) or \
self.isSubPath(head, root.right)
| Solution2 |
python | weaviate__weaviate-python-client | weaviate/debug/sync.py | {
"start": 161,
"end": 216
} | class ____(_DebugExecutor[ConnectionSync]):
pass
| _Debug |
python | kubernetes-client__python | kubernetes/client/models/v1_pod_extended_resource_claim_status.py | {
"start": 383,
"end": 5654
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'request_mappings': 'list[V1ContainerExtendedResourceRequest]',
'resource_claim_name': 'str'
}
attribute_map = {
'request_mappings': 'requestMappings',
'resource_claim_name': 'resourceClaimName'
}
def __init__(self, request_mappings=None, resource_claim_name=None, local_vars_configuration=None): # noqa: E501
"""V1PodExtendedResourceClaimStatus - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._request_mappings = None
self._resource_claim_name = None
self.discriminator = None
self.request_mappings = request_mappings
self.resource_claim_name = resource_claim_name
@property
def request_mappings(self):
"""Gets the request_mappings of this V1PodExtendedResourceClaimStatus. # noqa: E501
RequestMappings identifies the mapping of <container, extended resource backed by DRA> to device request in the generated ResourceClaim. # noqa: E501
:return: The request_mappings of this V1PodExtendedResourceClaimStatus. # noqa: E501
:rtype: list[V1ContainerExtendedResourceRequest]
"""
return self._request_mappings
@request_mappings.setter
def request_mappings(self, request_mappings):
"""Sets the request_mappings of this V1PodExtendedResourceClaimStatus.
RequestMappings identifies the mapping of <container, extended resource backed by DRA> to device request in the generated ResourceClaim. # noqa: E501
:param request_mappings: The request_mappings of this V1PodExtendedResourceClaimStatus. # noqa: E501
:type: list[V1ContainerExtendedResourceRequest]
"""
if self.local_vars_configuration.client_side_validation and request_mappings is None: # noqa: E501
raise ValueError("Invalid value for `request_mappings`, must not be `None`") # noqa: E501
self._request_mappings = request_mappings
@property
def resource_claim_name(self):
"""Gets the resource_claim_name of this V1PodExtendedResourceClaimStatus. # noqa: E501
ResourceClaimName is the name of the ResourceClaim that was generated for the Pod in the namespace of the Pod. # noqa: E501
:return: The resource_claim_name of this V1PodExtendedResourceClaimStatus. # noqa: E501
:rtype: str
"""
return self._resource_claim_name
@resource_claim_name.setter
def resource_claim_name(self, resource_claim_name):
"""Sets the resource_claim_name of this V1PodExtendedResourceClaimStatus.
ResourceClaimName is the name of the ResourceClaim that was generated for the Pod in the namespace of the Pod. # noqa: E501
:param resource_claim_name: The resource_claim_name of this V1PodExtendedResourceClaimStatus. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and resource_claim_name is None: # noqa: E501
raise ValueError("Invalid value for `resource_claim_name`, must not be `None`") # noqa: E501
self._resource_claim_name = resource_claim_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1PodExtendedResourceClaimStatus):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1PodExtendedResourceClaimStatus):
return True
return self.to_dict() != other.to_dict()
| V1PodExtendedResourceClaimStatus |
python | joke2k__faker | faker/providers/job/es/__init__.py | {
"start": 71,
"end": 18940
} | class ____(BaseProvider):
# Source:
# https://www.ilo.org/public/spanish/bureau/stat/isco/docs/struct08.xls
jobs: ElementsType[str] = (
"Abogado",
"Acarreador de agua",
"Recolector de leña",
"Ayudante de cámara",
"Actor",
"Administrador de sistemas",
"Agente de administración tributaria",
"Agente de aduanas",
"Inspector de fronteras",
"Agente de bolsa",
"Agente de compras",
"Consignatario",
"Agente de empleo",
"Agente de seguros",
"Agente de servicios de expedición de licencias y permisos",
"Agente de servicios de seguridad social",
"Agente inmobiliario",
"Agricultor",
"Agrónomo",
"Albañil",
"Alfarero",
"Analista de gestión y organización",
"Analista de sistemas",
"Analista financiero",
"Aparejador",
"Empalmador de cables",
"Curtidor",
"Apicultor",
"Sericultor",
"Archivista",
"Curador de museos",
"Arquitecto",
"Paisajista",
"Artesano",
"Artista plástico",
"Asesor financiero y en inversiones",
"Asesor de inversiones",
"Asistente de venta",
"Astrólogo",
"Adivinador",
"Deportista",
"Audiólogo",
"Escritor",
"Auxiliar de maestro",
"Auxiliar de servicio de abordo",
"Auxiliar laico de las religión",
"Avicultor",
"Ayudante de ambulancia",
"Ayudante de cocina",
"Bailarín",
"Coreógrafo",
"Barnizador",
"Barrendero",
"Bibliotecarios",
"Focumentalista",
"Biólogo",
"Botánico",
"Zoólogo",
"Zoólogo",
"Bombero",
"Buzo",
"Cajero de banco",
"Cajero",
"Tipógrafo",
"Camarero de barra",
"Camarero de mesa",
"Capitán decubierta",
"Oficial de cubierta",
"Carnicero",
"Pescadero",
"Carpintero",
"Cartógrafo",
"Agrimensor",
"Catador de alimentos y bebidas",
"Catador de bebidas",
"Cazador",
"Tramper",
"Chapista",
"Calderero",
"Chef",
"Clasificador de desechos",
"Clasificador de productos",
"Cobrador",
"Cocinero",
"Cocinero de comidas rápidas",
"Codificador de datos",
"Corrector de pruebas de imprenta",
"Comerciante de tiendas",
"Conductor de autobús",
"Conductor de tranvía",
"Conductor de automóviles",
"Conductor de taxis",
"Conductor de camiones pesados",
"Conductor de motocicletas",
"Conductor de vehículos accionados a pedal o a brazo",
"Conductor de vehículos y máquinas de tracción animal",
"Conserje",
"Constructor de casas",
"Contable",
"Controlador de instalaciones de procesamiento de productos químicos",
"Controlador de procesos",
"Controlador de tráfico aéreo",
"Costurero",
"Bordador",
"Criador de ganado",
"Cristalero",
"Cuidador de animales",
"Cuidador de niños",
"Declarante de aduana",
"Gestor de aduana",
"Delineante",
"Dibujante técnico",
"Demostrador de tiendas",
"Dentista",
"Ayudante de odontología",
"Desarrollador de software",
"Desarrollador Web y multimedia",
"Nutricionista",
"Dinamitero",
"Director de servicios de bienestar social",
"Director de cine",
"Director de teatro",
"Director de empresas de abastecimiento, distribución y afines",
"Director de empresas de construcción",
"Director de explotaciones de minería",
"Director de industrias manufactureras",
"Director de investigación y desarrollo",
"Director de políticas y planificación",
"Director de producción agropecuaria y silvicultura",
"Director de producción de piscicultura y pesca",
"Director de publicidad y relaciones públicas",
"Director de recursos humanos",
"Director de servicios de cuidado de las personas de edad",
"Director de servicios de cuidados infantiles",
"Director de servicios de educación",
"Director de servicios de salud",
"Director de servicios de tecnología de la información y las comunicaciones",
"Director de ventas y comercialización",
"Director financiero",
"Gerente general",
"Diseñador de productos",
"Diseñador de prendas",
"Diseñador gráfico",
"Diseñador multimedia",
"Diseñador de bases de datos",
"Administrador de bases de datos",
"Diseñador de interior",
"Decorador de interior",
"Ebanista",
"Economista",
"Ecónomo y mayordomos domésticos",
"Mayordomo doméstico",
"Educador para necesidades especiales",
"Electricista de obras",
"Electrotécnico",
"Empacador manual",
"Empleado de agencia de viajes",
"Empleado de archivos",
"Empleado de biblioteca",
"Empleado de centro de llamadas",
"Empleado de contabilidad y cálculo de costos",
"Empleado de control de abastecimientos e inventario",
"Empleado de servicios de apoyo a la producción",
"Empleado de servicios de correos",
"Empleado de servicios de transporte",
"Empleado de servicios estadísticos, financieros y de seguros",
"Empleado de ventanillas de informaciones",
"Empleado del servicio de personal",
"Empleado encargado de las nóminas",
"Encuadernador",
"Ensamblador de equipos eléctricos",
"Ensamblador de equipos electrónicos",
"Ensamblador de maquinaria mecánica",
"Entrenador deportivo",
"Árbitro deportivo",
"Entrevistador de encuestas",
"Entrevistador de investigaciones de mercados",
"Escribiente público",
"Especialista en formación del personal",
"Especialista en métodos pedagógicos",
"Especialista en políticas de administración",
"Especialista en políticas y servicios de personal",
"Especialista en tratamientos de belleza",
"Expendedor de gasolineras",
"Fabricante de instrumentos musicales",
"Afinador de instrumentos musicales",
"Farmacéutico",
"Filósofo",
"Historiador",
"Especialista en ciencias políticas",
"Físico",
"Astrónomos",
"Fisioterapeuta",
"Fontanero",
"Fotógrafo",
"Fumigador",
"Controlador de plagas y malas hierbas",
"Geólogo",
"Ggeofísico",
"Gerente de centros deportivos, de esparcimiento y culturales",
"Gerente de comercios al por mayor y al por menor",
"Gerente de hoteles o restaurantes",
"Gerente de sucursales de bancos, de servicios financieros y de seguros",
"Grabador de datos",
"Guardafrenos",
"Guardagujas",
"Agente de maniobras",
"Guardián de prisión",
"Guardia de protección",
"Guía de turismo",
"Herramentista",
"Herrero",
"Gorjadore",
"Impresor",
"Ingeniero civil",
"Ingeniero de minas",
"Ingeniero metalúrgico",
"Ingeniero electricista",
"Ingeniero electrónico",
"Ingeniero en telecomunicaciones",
"Ingeniero industrial",
"Ingeniero mecánico",
"Ingeniero medioambiental",
"Ingeniero químico",
"Inspector de la salud laboral",
"Inspector medioambiental y afines",
"Inspector de policía",
"Detective",
"Instalador de material aislante y de insonorización",
"Instalador y reparador de líneas eléctricas",
"Instalador y reparador en tecnología de la información y las comunicaciones",
"Instructor de autoescuela",
"Instructor de educación física y actividades recreativas",
"Instructor en tecnologías de la información",
"Jefe de pequeñas poblaciones",
"Joyero",
"Orfebre",
"Platero",
"Juez",
"Lavador de vehículos",
"Lavador de ventanas",
"Lavandero",
"Planchador manuales",
"Limpiador de fachadas",
"Deshollinador",
"Limpiador y asistente de oficinas, hoteles y otros establecimientos",
"Limpiador y asistente doméstico",
"Locutor de radio",
"Locutor de televisión",
"Maestro de enseñanza primaria",
"Maestro preescolar",
"Mampostero",
"Labrante",
"Tronzador",
"Grabador de piedra",
"Maquinista de locomotoras",
"Marinero de cubierta",
"Matemático",
"Actuario",
"Estadístico",
"Mecánico y ajustador electricista",
"Mecánico y reparador de instrumentos de precisión",
"Mecánico y reparador de máquinas agrícolas e industriales",
"Mecánico y reparador de motores de avión",
"Mecánico y reparador de vehículos de motor",
"Mecánico y reparador en electrónica",
"Mecánico-montador de instalaciones de refrigeración y climatización",
"Médico especialista",
"Médico general",
"Mensajero",
"Mandader",
"Maleter",
"Repartidor",
"Meteorólogo",
"Minero",
"Operador de instalaciones mineras",
"Modelo de moda, arte y publicidad",
"Moldeador y machero",
"Montador de estructuras metálicas",
"Músico",
"Cantante",
"Compositor",
"Oficial de las fuerzas armadas",
"Oficial de préstamos y créditos",
"Oficial maquinistas en navegación",
"Oficinista general",
"Operador de autoelevadoras",
"Operador de grúas y aparatos elevadores",
"Operador de incineradores, instalaciones de tratamiento de agua",
"Operador de instalaciones de tratamiento de agua",
"Operador de instalaciones de procesamiento de la madera",
"Operador de instalaciones de procesamiento de metales",
"Operador de instalaciones de procesamiento de minerales y rocas",
"Operador de instalaciones de producción de energía",
"Operador de instalaciones de refinación de petróleo y gas natural",
"Operador de instalaciones de vidriería y cerámica",
"Operador de instalaciones para la preparación de pasta para papel y papel",
"Operador de maquinaria agrícola y forestal móvil",
"Operador de máquinas de blanqueamiento, teñido y limpieza de tejidos",
"Operador de máquinas de coser",
"Operador de máquinas de embalaje, embotellamiento y etiquetado ",
"Operador de máquinas de movimiento de tierras",
"Operador de máquinas de preparación de fibras, hilado y devanado",
"Operador de máquinas de procesamiento de texto y mecanógrafos",
"Operador de máquinas de tratamiento de pieles y cueros",
"Operador de máquinas de vapor y calderas",
"Operador de máquinas lavarropas",
"Operador de máquinas para elaborar alimentos y productos afines",
"Operador de máquinas para fabricar cemento y otros productos minerales",
"Operador de máquinas para fabricar productos de caucho",
"Operador de máquinas para fabricar productos de material plástico",
"Operador de máquinas para fabricar productos de papel",
"Operador de máquinas para fabricar productos fotográficos",
"Operador de máquinas para la fabricación de calzado",
"Operador de máquinas pulidoras, galvanizadoras y recubridoras de metales ",
"Operador de plantas y máquinas de productos químicos",
"Operador de telar y otras máquinas tejedoras",
"Operario de la conservación de frutas, legumbres y verduras",
"Operario de la elaboración de productos lácteos",
"Operario del tratamiento de la madera",
"Operario en cemento armado y enfoscador",
"Optometrista",
"Organizador de conferencias y eventos",
"Personal de limpieza",
"Miembro de las fuerzas armadas",
"Profesional de nivel medio en actividades culturales y artísticas",
"Profesor de artes",
"Profesor de idiomas",
"Profesor de música",
"Panaderos, pasteleros y confiteros",
"Parquetero y colocador de suelos",
"Patronista y cortador de tela",
"Peluqueros",
"Peón de carga",
"Peón de explotaciones agrícolas",
"Peón de explotaciones de cultivos mixtos y ganaderos",
"Peón de explotaciones ganaderas",
"Peón de jardinería y horticultura",
"Peón de la construcción de edificios",
"Peón de minas y canteras",
"Peón de obras públicas y mantenimiento",
"Peón de pesca y acuicultura",
"Peón forestales",
"Perforador y sondista de pozos",
"Periodista",
"Personal de pompas fúnebres y embalsamador",
"Personal directivo de la administración pública",
"Personas que realizan trabajos varios",
"Pescador, cazador, tramperos y recolector de subsistencia",
"Pescador de agua dulce y en aguas costeras",
"Pescador de alta mar",
"Piloto de aviación",
"Pintor y empapelador",
"Policías",
"Practicante paramédico",
"Practicante y asistente médico",
"Preparador y elaborador de tabaco y sus productos",
"Prestamista",
"Productor y trabajador calificado de explotaciones agropecuarias mixtas",
"Profesional de enfermería",
"Profesional de la protección medioambiental",
"Profesional de la publicidad y la comercialización",
"Profesional de la salud y la higiene laboral y ambiental",
"Profesional de medicina",
"Profesional de medicina alternativa",
"Profesional de nivel medio de enfermería",
"Profesional de nivel medio de medicina tradicional y alternativa",
"Profesional de nivel medio de medicina alternativa",
"Profesional de nivel medio de partería",
"Profesional de nivel medio de servicios estadísticos o matemáticos",
"Profesional de nivel medio del derecho y servicios legales",
"Profesional de partería",
"Profesional de relaciones públicas",
"Profesional de ventas de tecnología de la información y las comunicaciones",
"Profesional de ventas técnicas y médicas",
"Profesional del trabajo social",
"Profesional en redes de computadores",
"Profesional religioso",
"Profesor de enseñanza secundaria",
"Profesor de formación profesional",
"Profesor de universidades y de la enseñanza superior",
"Programador de aplicaciones",
"Psicólogo",
"Pulidor de metales y afilador de herramientas",
"Químico",
"Recepcionista de hoteles",
"Recepcionista",
"Receptor de apuestas",
"Recolector de basura y material reciclable",
"Recolector de dinero en aparatos de venta automática y lector de medidores",
"Redactor de carteles, pintor decorativos y grabador",
"Regulador y operador de máquinas de labrar madera",
"Regulador y operador de máquinas y herramientas",
"Reparador de bicicletas",
"Reponedor de estanterías",
"Representante comercial",
"Revisor y cobrador de los transportes públicos",
"Revocador",
"Modisto",
"Peletero",
"Sombrerero",
"Secretario administrativo",
"Secretario ejecutivo",
"Secretario (general)",
"Secretario jurídicos",
"Secretario médicos",
"Sociólogo",
"Antropólogo",
"Soldador y oxicortador",
"Soplador de vidrio",
"Modelador de vidrio",
"Laminador de vidrio",
"Cortador de vidrio",
"Pulidor de vidrio",
"Suboficial de las fuerzas armadas",
"Supervisor de industria manufacturera",
"Supervisor de la construcción",
"Supervisor de mantenimiento y limpieza en oficinas, hoteles y otros establecimientos",
"Supervisor de secretaría",
"Supervisor de tiendas y almacenes",
"Supervisor en ingeniería de minas",
"Tapicero",
"Colchonero",
"Tasador",
"Techador",
"Técnico agropecuario",
"Técnico de telecomunicaciones",
"Técnico de la Web",
"Técnico de laboratorio médico",
"Técnico de prótesis médicas y dentales",
"Técnico de radiodifusión y grabación audio visual",
"Técnico en aparatos de diagnóstico y tratamiento médico",
"Técnico en asistencia al usuario de tecnología de la información y las comunicaciones",
"Técnico en ciencias biológicas",
"Técnico en ciencias físicas y químicas",
"Técnico en documentación sanitaria",
"Técnico en electrónica",
"Técnico en galerías de arte, museos y bibliotecas",
"Técnico en ingeniería civil",
"Técnico en ingeniería de minas y metalurgia",
"Técnico en ingeniería mecánica",
"Técnico en operaciones de tecnología de la información y las comunicaciones",
"Técnico en optometría y ópticos",
"Técnico en química industrial",
"Técnico en redes y sistemas de computadores",
"Técnico en seguridad aeronáutica",
"Técnico forestal",
"Asistente farmacéutico",
"Asistente fisioterapeuta",
"Asistente veterinario",
"Telefonista",
"Tenedor de libros",
"Trabajador agrícola de subsistencia",
"Trabajador agropecuario de subsistencia",
"Trabajador ambulante de servicios",
"Trabajador comunitario de la salud",
"Trabajador de explotaciones de acuicultura",
"Trabajador de cuidados personales a domicilio",
"Trabajador de cuidados personales en instituciones",
"Trabajador forestal calificado",
"Trabajador pecuario de subsistencia",
"Trabajador social de nivel medio",
"Traductor e intérprete",
"Lingüista",
"Urbanistas e ingenieros de tránsito",
"Vendedor ambulantes de productos comestibles",
"Vendedor ambulantes (excluyendo de comida)",
"Vendedor de comidas al mostrador",
"Vendedor de quioscos y de puestos de mercado",
"Vendedor por teléfono",
"Vendedor puerta a puerta",
"Veterinario",
"Zapatero",
"Miembro del poder legislativo",
)
| Provider |
python | bokeh__bokeh | src/bokeh/core/types.py | {
"start": 2033,
"end": 2151
} | class ____(TypedDict):
type: Literal["rect"]
sx0: float
sx1: float
sy0: float
sy1: float
| RectGeometry |
python | getlogbook__logbook | src/logbook/compat.py | {
"start": 7739,
"end": 9840
} | class ____:
"""A context manager that copies and restores the warnings filter upon
exiting the context, and logs warnings using the logbook system.
The :attr:`~logbook.LogRecord.channel` attribute of the log record will be
the import name of the warning.
Example usage:
.. code-block:: python
from logbook.compat import redirected_warnings
from warnings import warn
with redirected_warnings():
warn(DeprecationWarning("logging should be deprecated"))
"""
def __init__(self):
self._entered = False
def message_to_unicode(self, message):
return str(message)
def make_record(self, message, exception, filename, lineno):
category = exception.__name__
if exception.__module__ not in ("exceptions", "builtins"):
category = exception.__module__ + "." + category
rv = logbook.LogRecord(category, logbook.WARNING, message)
# we don't know the caller, but we get that information from the
# warning system. Just attach them.
rv.filename = filename
rv.lineno = lineno
return rv
def start(self):
if self._entered: # pragma: no cover
raise RuntimeError("Cannot enter %r twice" % self) # noqa: UP031
self._entered = True
self._filters = warnings.filters
warnings.filters = self._filters[:]
self._showwarning = warnings.showwarning
def showwarning(message, category, filename, lineno, file=None, line=None):
message = self.message_to_unicode(message)
record = self.make_record(message, category, filename, lineno)
logbook.dispatch_record(record)
warnings.showwarning = showwarning
def end(self, etype=None, evalue=None, tb=None):
if not self._entered: # pragma: no cover
raise RuntimeError("Cannot exit %r without entering first" % self) # noqa: UP031
warnings.filters = self._filters
warnings.showwarning = self._showwarning
__enter__ = start
__exit__ = end
| redirected_warnings |
python | eriklindernoren__ML-From-Scratch | mlfromscratch/deep_learning/layers.py | {
"start": 20368,
"end": 21400
} | class ____(Layer):
""" Nearest neighbor up sampling of the input. Repeats the rows and
columns of the data by size[0] and size[1] respectively.
Parameters:
-----------
size: tuple
(size_y, size_x) - The number of times each axis will be repeated.
"""
def __init__(self, size=(2,2), input_shape=None):
self.prev_shape = None
self.trainable = True
self.size = size
self.input_shape = input_shape
def forward_pass(self, X, training=True):
self.prev_shape = X.shape
# Repeat each axis as specified by size
X_new = X.repeat(self.size[0], axis=2).repeat(self.size[1], axis=3)
return X_new
def backward_pass(self, accum_grad):
# Down sample input to previous shape
accum_grad = accum_grad[:, :, ::self.size[0], ::self.size[1]]
return accum_grad
def output_shape(self):
channels, height, width = self.input_shape
return channels, self.size[0] * height, self.size[1] * width
| UpSampling2D |
python | cherrypy__cherrypy | cherrypy/test/benchmark.py | {
"start": 3918,
"end": 13634
} | class ____:
"""A session of 'ab', the Apache HTTP server benchmarking tool.
Example output from ab:
This is ApacheBench, Version 2.0.40-dev <$Revision: 1.121.2.1 $> apache-2.0
Copyright (c) 1996 Adam Twiss, Zeus Technology Ltd,
http://www.zeustech.net/
Copyright (c) 1998-2002 The Apache Software Foundation,
http://www.apache.org/
Benchmarking 127.0.0.1 (be patient)
Completed 100 requests
Completed 200 requests
Completed 300 requests
Completed 400 requests
Completed 500 requests
Completed 600 requests
Completed 700 requests
Completed 800 requests
Completed 900 requests
Server Software: CherryPy/3.1beta
Server Hostname: 127.0.0.1
Server Port: 54583
Document Path: /static/index.html
Document Length: 14 bytes
Concurrency Level: 10
Time taken for tests: 9.643867 seconds
Complete requests: 1000
Failed requests: 0
Write errors: 0
Total transferred: 189000 bytes
HTML transferred: 14000 bytes
Requests per second: 103.69 [#/sec] (mean)
Time per request: 96.439 [ms] (mean)
Time per request: 9.644 [ms] (mean, across all concurrent requests)
Transfer rate: 19.08 [Kbytes/sec] received
Connection Times (ms)
min mean[+/-sd] median max
Connect: 0 0 2.9 0 10
Processing: 20 94 7.3 90 130
Waiting: 0 43 28.1 40 100
Total: 20 95 7.3 100 130
Percentage of the requests served within a certain time (ms)
50% 100
66% 100
75% 100
80% 100
90% 100
95% 100
98% 100
99% 110
100% 130 (longest request)
Finished 1000 requests
"""
parse_patterns = [
('complete_requests', 'Completed', rb'^Complete requests:\s*(\d+)'),
('failed_requests', 'Failed', rb'^Failed requests:\s*(\d+)'),
(
'requests_per_second',
'req/sec',
rb'^Requests per second:\s*([0-9.]+)',
),
(
'time_per_request_concurrent',
'msec/req',
rb'^Time per request:\s*([0-9.]+).*concurrent requests\)$',
),
('transfer_rate', 'KB/sec', rb'^Transfer rate:\s*([0-9.]+)'),
]
def __init__(
self,
path=SCRIPT_NAME + '/hello',
requests=1000,
concurrency=10,
):
"""Initialize an Apache Benchmark session."""
self.path = path
self.requests = requests
self.concurrency = concurrency
def args(self):
"""Compute the Apache Benchmark CLI arguments."""
port = cherrypy.server.socket_port
assert self.concurrency > 0
assert self.requests > 0
# Don't use "localhost".
# Cf
# http://mail.python.org/pipermail/python-win32/2008-March/007050.html
return '-k -n %s -c %s http://127.0.0.1:%s%s' % (
self.requests,
self.concurrency,
port,
self.path,
)
def run(self):
"""Run an Apache Benchmark test."""
# Parse output of ab, setting attributes on self
try:
self.output = _cpmodpy.read_process(AB_PATH or 'ab', self.args())
except Exception:
print(_cperror.format_exc())
raise
for attr, name, pattern in self.parse_patterns:
val = re.search(pattern, self.output, re.MULTILINE)
if val:
val = val.group(1)
setattr(self, attr, val)
else:
setattr(self, attr, None)
safe_threads = (25, 50, 100, 200, 400)
if sys.platform in ('win32',):
# For some reason, ab crashes with > 50 threads on my Win2k laptop.
safe_threads = (10, 20, 30, 40, 50)
def thread_report(path=SCRIPT_NAME + '/hello', concurrency=safe_threads):
"""Report Apache Benchmark against a multi-threaded server."""
sess = ABSession(path)
attrs, names, patterns = list(zip(*sess.parse_patterns))
avg = dict.fromkeys(attrs, 0.0)
yield ('threads',) + names
for c in concurrency:
sess.concurrency = c
sess.run()
row = [c]
for attr in attrs:
val = getattr(sess, attr)
if val is None:
print(sess.output)
row = None
break
val = float(val)
avg[attr] += float(val)
row.append(val)
if row:
yield row
# Add a row of averages.
yield ['Average'] + [str(avg[attr] / len(concurrency)) for attr in attrs]
def size_report(
sizes=(10, 100, 1000, 10000, 100000, 100000000),
concurrency=50,
):
"""Report Apache Benchmark against different payload sizes."""
sess = ABSession(concurrency=concurrency)
attrs, names, patterns = list(zip(*sess.parse_patterns))
yield ('bytes',) + names
for sz in sizes:
sess.path = '%s/sizer?size=%s' % (SCRIPT_NAME, sz)
sess.run()
yield [sz] + [getattr(sess, attr) for attr in attrs]
def print_report(rows):
"""Print rows to standard out."""
for row in rows:
print('')
for val in row:
sys.stdout.write(str(val).rjust(10) + ' | ')
print('')
def run_standard_benchmarks():
"""Run Standard Benchmarks."""
print('')
print(
'Client Thread Report (1000 requests, 14 byte response body, '
'%s server threads):' % cherrypy.server.thread_pool,
)
print_report(thread_report())
print('')
print(
'Client Thread Report (1000 requests, 14 bytes via staticdir, '
'%s server threads):' % cherrypy.server.thread_pool,
)
print_report(thread_report('%s/static/index.html' % SCRIPT_NAME))
print('')
print(
'Size Report (1000 requests, 50 client threads, '
'%s server threads):' % cherrypy.server.thread_pool,
)
print_report(size_report())
# modpython and other WSGI #
def startup_modpython(req=None):
"""Start CherryPy app server in 'serverless' mode (for modpython/WSGI)."""
if cherrypy.engine.state == cherrypy._cpengine.STOPPED:
if req:
if 'nullreq' in req.get_options():
cherrypy.engine.request_class = NullRequest
cherrypy.engine.response_class = NullResponse
ab_opt = req.get_options().get('ab', '')
if ab_opt:
global AB_PATH
AB_PATH = ab_opt
cherrypy.engine.start()
if cherrypy.engine.state == cherrypy._cpengine.STARTING:
cherrypy.engine.wait()
return 0 # apache.OK
def run_modpython(use_wsgi=False):
print('Starting mod_python...')
pyopts = []
# Pass the null and ab=path options through Apache
if '--null' in opts:
pyopts.append(('nullreq', ''))
if '--ab' in opts:
pyopts.append(('ab', opts['--ab']))
s = _cpmodpy.ModPythonServer
if use_wsgi:
pyopts.append(('wsgi.application', 'cherrypy::tree'))
pyopts.append(
('wsgi.startup', 'cherrypy.test.benchmark::startup_modpython'),
)
handler = 'modpython_gateway::handler'
s = s(
port=54583,
opts=pyopts,
apache_path=APACHE_PATH,
handler=handler,
)
else:
pyopts.append(
('cherrypy.setup', 'cherrypy.test.benchmark::startup_modpython'),
)
s = s(port=54583, opts=pyopts, apache_path=APACHE_PATH)
try:
s.start()
run()
finally:
s.stop()
if __name__ == '__main__':
init()
longopts = [
'cpmodpy',
'modpython',
'null',
'notests',
'help',
'ab=',
'apache=',
]
try:
switches, args = getopt.getopt(sys.argv[1:], '', longopts)
opts = dict(switches)
except getopt.GetoptError:
print(__doc__)
sys.exit(2)
if '--help' in opts:
print(__doc__)
sys.exit(0)
if '--ab' in opts:
AB_PATH = opts['--ab']
if '--notests' in opts:
# Return without stopping the server, so that the pages
# can be tested from a standard web browser.
def run():
port = cherrypy.server.socket_port
print(
'You may now open http://127.0.0.1:%s%s/'
% (port, SCRIPT_NAME),
)
if '--null' in opts:
print('Using null Request object')
else:
def run():
end = time.time() - start
print('Started in %s seconds' % end)
if '--null' in opts:
print('\nUsing null Request object')
try:
try:
run_standard_benchmarks()
except Exception:
print(_cperror.format_exc())
raise
finally:
cherrypy.engine.exit()
print('Starting CherryPy app server...')
class NullWriter(object):
"""Suppresses the printing of socket errors."""
def write(self, data):
pass
sys.stderr = NullWriter()
start = time.time()
if '--cpmodpy' in opts:
run_modpython()
elif '--modpython' in opts:
run_modpython(use_wsgi=True)
else:
if '--null' in opts:
cherrypy.server.request_class = NullRequest
cherrypy.server.response_class = NullResponse
cherrypy.engine.start_with_callback(run)
cherrypy.engine.block()
| ABSession |
python | google__jax | tests/linalg_sharding_test.py | {
"start": 1588,
"end": 7539
} | class ____(jtu.JaxTestCase):
def setUp(self):
super().setUp()
if jax.device_count() < 2:
self.skipTest("Requires multiple devices")
def get_fun_and_shapes(self, fun_and_shapes, grad=False):
if jtu.test_device_matches(["gpu"]):
if fun_and_shapes not in CPU_AND_GPU_FUN_AND_SHAPES:
self.skipTest(
f"Partitioning {fun_and_shapes[0].__name__} not supported on GPU.")
if (fun_and_shapes[0] in (lax.linalg.cholesky, lax.linalg.triangular_solve)
and not config.use_shardy_partitioner.value):
self.skipTest(
f"Partitioning {fun_and_shapes[0].__name__} only supported on GPU "
"when shardy is enabled.")
if fun_and_shapes[0] == lax.linalg.tridiagonal_solve:
self.skipTest(
f"Partitioning {fun_and_shapes[0].__name__} on GPU, requires a "
"more recent jaxlib version.")
if not grad:
return fun_and_shapes
fun, shapes = fun_and_shapes
if fun in (lax.linalg.schur, lax.linalg.hessenberg, lax.linalg.tridiagonal):
self.skipTest(f"{fun.__name__} does not support differentation")
if jtu.test_device_matches(["gpu"]) and fun in (
lax.linalg.eig, lax.linalg.lu, lax.linalg.qr
) and not config.use_shardy_partitioner.value:
self.skipTest(
f"JVP of {fun.__name__} uses triangular solve on GPU, which doesn't "
"support batch partitioning unless shardy is enabled.")
if fun == lax.linalg.eig:
fun = functools.partial(
fun,
compute_left_eigenvectors=False,
compute_right_eigenvectors=False,
)
if fun == lax.linalg.svd:
fun = functools.partial(fun, full_matrices=False)
return fun, shapes
def get_args(self, shapes, dtype, batch_size=None):
rng = jtu.rand_default(self.rng())
def arg_maker(shape):
if batch_size is not None:
x = rng((batch_size, *shape), dtype)
else:
x = rng(shape, dtype)
if len(shape) == 2 and shape[0] == shape[1]:
x = np.matmul(x, np.swapaxes(np.conj(x), -1, -2))
return x
return tuple(arg_maker(shape) for shape in shapes)
@parameterized.product(
fun_and_shapes=ALL_FUN_AND_SHAPES, dtype=float_types + complex_types
)
@jtu.run_on_devices("gpu", "cpu")
def test_batch_axis_sharding(self, fun_and_shapes, dtype):
fun, shapes = self.get_fun_and_shapes(fun_and_shapes)
args = self.get_args(shapes, dtype, batch_size=8)
mesh = jtu.create_mesh((2,), ("i",))
sharding = jax.NamedSharding(mesh, P("i"))
args_sharded = jax.device_put(args, sharding)
fun_jit = jax.jit(fun)
expected = fun(*args)
actual = fun_jit(*args_sharded)
self.assertAllClose(actual, expected)
self.assertNotIn("all-", fun_jit.lower(*args_sharded).compile().as_text())
vmap_fun = jax.vmap(fun)
vmap_fun_jit = jax.jit(vmap_fun)
actual = vmap_fun_jit(*args_sharded)
self.assertAllClose(actual, expected)
self.assertNotIn(
"all-", vmap_fun_jit.lower(*args_sharded).compile().as_text())
@parameterized.product(
fun_and_shapes=ALL_FUN_AND_SHAPES, dtype=float_types + complex_types
)
@jtu.run_on_devices("gpu", "cpu")
def test_non_batch_axis_sharding(self, fun_and_shapes, dtype):
fun, shapes = self.get_fun_and_shapes(fun_and_shapes)
args = self.get_args(shapes, dtype)
mesh = jtu.create_mesh((2,), ("i",))
sharding = jax.NamedSharding(mesh, P("i"))
args_sharded = jax.device_put(args, sharding)
fun_jit = jax.jit(fun)
expected = fun(*args)
actual = fun_jit(*args_sharded)
self.assertAllClose(actual, expected)
self.assertIn(
"all-gather", fun_jit.lower(*args_sharded).compile().as_text())
@parameterized.product(
fun_and_shapes=ALL_FUN_AND_SHAPES, dtype=float_types + complex_types
)
@jtu.run_on_devices("gpu", "cpu")
def test_batch_axis_sharding_jvp(self, fun_and_shapes, dtype):
fun, shapes = self.get_fun_and_shapes(fun_and_shapes, grad=True)
primals = self.get_args(shapes, dtype, batch_size=8)
tangents = tuple(map(jnp.ones_like, primals))
def jvp_fun(primals, tangents):
return jax.jvp(fun, primals, tangents)
mesh = jtu.create_mesh((2,), ("i",))
sharding = jax.NamedSharding(mesh, P("i"))
primals_sharded = jax.device_put(primals, sharding)
tangents_sharded = jax.device_put(tangents, sharding)
jvp_fun_jit = jax.jit(jvp_fun)
_, expected = jvp_fun(primals, tangents)
for args in [
(primals_sharded, tangents_sharded),
(primals, tangents_sharded),
(primals_sharded, tangents),
]:
_, actual = jvp_fun_jit(*args)
self.assertAllClose(actual, expected, rtol={
np.float32: 1e-4, np.float64: 2e-11, np.complex64: 1e-4,
np.complex128: 1e-11})
hlo = jvp_fun_jit.lower(primals_sharded, tangents_sharded).compile()
self.assertNotIn("all-", hlo.as_text())
@parameterized.product(
fun_and_shapes=ALL_FUN_AND_SHAPES, dtype=float_types + complex_types
)
@jtu.run_on_devices("gpu", "cpu")
def test_batch_axis_sharding_vjp(self, fun_and_shapes, dtype):
fun, shapes = self.get_fun_and_shapes(fun_and_shapes, grad=True)
primals = self.get_args(shapes, dtype, batch_size=8)
out, vjp_fun = jax.vjp(fun, *primals)
tangents = jax.tree.map(jnp.ones_like, out)
mesh = jtu.create_mesh((2,), ("i",))
sharding = jax.NamedSharding(mesh, P("i"))
tangents_sharded = jax.device_put(tangents, sharding)
vjp_fun_jit = jax.jit(vjp_fun)
expected = vjp_fun(tangents)
actual = vjp_fun_jit(tangents_sharded)
self.assertAllClose(actual, expected, rtol={
np.float32: 1e-4, np.float64: 1e-11, np.complex64: 1e-4,
np.complex128: 1e-11})
hlo = vjp_fun_jit.lower(tangents_sharded).compile()
self.assertNotIn("all-", hlo.as_text())
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| LinalgShardingTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 313629,
"end": 314282
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of UnarchiveProjectV2Item"""
__schema__ = github_schema
__field_names__ = ("project_id", "item_id", "client_mutation_id")
project_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="projectId")
"""The ID of the Project to archive the item from."""
item_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="itemId")
"""The ID of the ProjectV2Item to unarchive."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| UnarchiveProjectV2ItemInput |
python | wandb__wandb | wandb/vendor/watchdog_0_9_0/wandb_watchdog/events.py | {
"start": 7529,
"end": 7999
} | class ____(FileSystemEvent):
"""File system event representing directory creation on the file system."""
event_type = EVENT_TYPE_CREATED
is_directory = True
def __init__(self, src_path):
super(DirCreatedEvent, self).__init__(src_path)
def __repr__(self):
return ("<%(class_name)s: src_path=%(src_path)r>"
) % (dict(class_name=self.__class__.__name__,
src_path=self.src_path))
| DirCreatedEvent |
python | langchain-ai__langchain | libs/partners/anthropic/langchain_anthropic/llms.py | {
"start": 4628,
"end": 15356
} | class ____(LLM, _AnthropicCommon):
"""Anthropic text completion large language model (legacy LLM).
To use, you should have the environment variable `ANTHROPIC_API_KEY`
set with your API key, or pass it as a named parameter to the constructor.
Example:
```python
from langchain_anthropic import AnthropicLLM
model = AnthropicLLM(model="claude-sonnet-4-5")
```
"""
model_config = ConfigDict(
populate_by_name=True,
arbitrary_types_allowed=True,
)
@model_validator(mode="before")
@classmethod
def raise_warning(cls, values: dict) -> Any:
"""Raise warning that this class is deprecated."""
warnings.warn(
"This Anthropic LLM is deprecated. "
"Please use `from langchain_anthropic import ChatAnthropic` "
"instead",
stacklevel=2,
)
return values
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "anthropic-llm"
@property
def lc_secrets(self) -> dict[str, str]:
"""Return a mapping of secret keys to environment variables."""
return {"anthropic_api_key": "ANTHROPIC_API_KEY"}
@classmethod
def is_lc_serializable(cls) -> bool:
"""Whether this class can be serialized by langchain."""
return True
@property
def _identifying_params(self) -> dict[str, Any]:
"""Get the identifying parameters."""
return {
"model": self.model,
"max_tokens": self.max_tokens,
"temperature": self.temperature,
"top_k": self.top_k,
"top_p": self.top_p,
"model_kwargs": self.model_kwargs,
"streaming": self.streaming,
"default_request_timeout": self.default_request_timeout,
"max_retries": self.max_retries,
}
def _get_ls_params(
self,
stop: list[str] | None = None,
**kwargs: Any,
) -> LangSmithParams:
"""Get standard params for tracing."""
params = super()._get_ls_params(stop=stop, **kwargs)
identifying_params = self._identifying_params
if max_tokens := kwargs.get(
"max_tokens",
identifying_params.get("max_tokens"),
):
params["ls_max_tokens"] = max_tokens
return params
def _format_messages(self, prompt: str) -> list[dict[str, str]]:
"""Convert prompt to Messages API format."""
messages = []
# Handle legacy prompts that might have HUMAN_PROMPT/AI_PROMPT markers
if self.HUMAN_PROMPT and self.HUMAN_PROMPT in prompt:
# Split on human/assistant turns
parts = prompt.split(self.HUMAN_PROMPT)
for _, part in enumerate(parts):
if not part.strip():
continue
if self.AI_PROMPT and self.AI_PROMPT in part:
# Split human and assistant parts
human_part, assistant_part = part.split(self.AI_PROMPT, 1)
if human_part.strip():
messages.append({"role": "user", "content": human_part.strip()})
if assistant_part.strip():
messages.append(
{"role": "assistant", "content": assistant_part.strip()}
)
# Just human content
elif part.strip():
messages.append({"role": "user", "content": part.strip()})
else:
# Handle modern format or plain text
# Clean prompt for Messages API
content = re.sub(r"^\n*Human:\s*", "", prompt)
content = re.sub(r"\n*Assistant:\s*.*$", "", content)
if content.strip():
messages.append({"role": "user", "content": content.strip()})
# Ensure we have at least one message
if not messages:
messages = [{"role": "user", "content": prompt.strip() or "Hello"}]
return messages
def _call(
self,
prompt: str,
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> str:
r"""Call out to Anthropic's completion endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
run_manager: Optional callback manager for LLM run.
kwargs: Additional keyword arguments to pass to the model.
Returns:
The string generated by the model.
Example:
```python
prompt = "What are the biggest risks facing humanity?"
prompt = f"\n\nHuman: {prompt}\n\nAssistant:"
response = model.invoke(prompt)
```
"""
if self.streaming:
completion = ""
for chunk in self._stream(
prompt=prompt,
stop=stop,
run_manager=run_manager,
**kwargs,
):
completion += chunk.text
return completion
stop = self._get_anthropic_stop(stop)
params = {**self._default_params, **kwargs}
# Remove parameters not supported by Messages API
params = {k: v for k, v in params.items() if k != "max_tokens_to_sample"}
response = self.client.messages.create(
messages=self._format_messages(prompt),
stop_sequences=stop if stop else None,
**params,
)
return response.content[0].text
def convert_prompt(self, prompt: PromptValue) -> str:
"""Convert a `PromptValue` to a string."""
return prompt.to_string()
async def _acall(
self,
prompt: str,
stop: list[str] | None = None,
run_manager: AsyncCallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> str:
"""Call out to Anthropic's completion endpoint asynchronously."""
if self.streaming:
completion = ""
async for chunk in self._astream(
prompt=prompt,
stop=stop,
run_manager=run_manager,
**kwargs,
):
completion += chunk.text
return completion
stop = self._get_anthropic_stop(stop)
params = {**self._default_params, **kwargs}
# Remove parameters not supported by Messages API
params = {k: v for k, v in params.items() if k != "max_tokens_to_sample"}
response = await self.async_client.messages.create(
messages=self._format_messages(prompt),
stop_sequences=stop if stop else None,
**params,
)
return response.content[0].text
def _stream(
self,
prompt: str,
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
r"""Call Anthropic completion_stream and return the resulting generator.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
run_manager: Optional callback manager for LLM run.
kwargs: Additional keyword arguments to pass to the model.
Returns:
A generator representing the stream of tokens from Anthropic.
Example:
```python
prompt = "Write a poem about a stream."
prompt = f"\n\nHuman: {prompt}\n\nAssistant:"
generator = anthropic.stream(prompt)
for token in generator:
yield token
```
"""
stop = self._get_anthropic_stop(stop)
params = {**self._default_params, **kwargs}
# Remove parameters not supported by Messages API
params = {k: v for k, v in params.items() if k != "max_tokens_to_sample"}
with self.client.messages.stream(
messages=self._format_messages(prompt),
stop_sequences=stop if stop else None,
**params,
) as stream:
for event in stream:
if event.type == "content_block_delta" and hasattr(event.delta, "text"):
chunk = GenerationChunk(text=event.delta.text)
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
yield chunk
async def _astream(
self,
prompt: str,
stop: list[str] | None = None,
run_manager: AsyncCallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
r"""Call Anthropic completion_stream and return the resulting generator.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
run_manager: Optional callback manager for LLM run.
kwargs: Additional keyword arguments to pass to the model.
Returns:
A generator representing the stream of tokens from Anthropic.
Example:
```python
prompt = "Write a poem about a stream."
prompt = f"\n\nHuman: {prompt}\n\nAssistant:"
generator = anthropic.stream(prompt)
for token in generator:
yield token
```
"""
stop = self._get_anthropic_stop(stop)
params = {**self._default_params, **kwargs}
# Remove parameters not supported by Messages API
params = {k: v for k, v in params.items() if k != "max_tokens_to_sample"}
async with self.async_client.messages.stream(
messages=self._format_messages(prompt),
stop_sequences=stop if stop else None,
**params,
) as stream:
async for event in stream:
if event.type == "content_block_delta" and hasattr(event.delta, "text"):
chunk = GenerationChunk(text=event.delta.text)
if run_manager:
await run_manager.on_llm_new_token(chunk.text, chunk=chunk)
yield chunk
def get_num_tokens(self, text: str) -> int:
"""Calculate number of tokens."""
msg = (
"Anthropic's legacy count_tokens method was removed in anthropic 0.39.0 "
"and langchain-anthropic 0.3.0. Please use "
"ChatAnthropic.get_num_tokens_from_messages instead."
)
raise NotImplementedError(
msg,
)
| AnthropicLLM |
python | huggingface__transformers | src/transformers/models/qwen2_moe/modular_qwen2_moe.py | {
"start": 3475,
"end": 3688
} | class ____(MixtralExperts):
def __init__(self, config):
super().__init__(config)
self.num_experts = config.num_experts
self.intermediate_dim = config.moe_intermediate_size
| Qwen2MoeExperts |
python | PrefectHQ__prefect | src/prefect/server/schemas/filters.py | {
"start": 70473,
"end": 71259
} | class ____(PrefectFilterBaseModel):
"""Filter by `Worker.status`."""
any_: Optional[list[schemas.statuses.WorkerStatus]] = Field(
default=None, description="A list of worker statuses to include"
)
not_any_: Optional[list[schemas.statuses.WorkerStatus]] = Field(
default=None, description="A list of worker statuses to exclude"
)
def _get_filter_list(
self, db: "PrefectDBInterface"
) -> Iterable[sa.ColumnExpressionArgument[bool]]:
filters: list[sa.ColumnExpressionArgument[bool]] = []
if self.any_ is not None:
filters.append(db.Worker.status.in_(self.any_))
if self.not_any_ is not None:
filters.append(db.Worker.status.notin_(self.not_any_))
return filters
| WorkerFilterStatus |
python | allegroai__clearml | clearml/backend_api/session/jsonmodels/fields.py | {
"start": 12706,
"end": 13674
} | class ____(StringField):
"""Date field."""
types = (datetime.date,)
default_format = "%Y-%m-%d"
def __init__(self, str_format: str = None, *args: Any, **kwargs: Any) -> None:
"""Init.
:param str str_format: Format to cast date to (if `None` - casting to
%Y-%m-%d format).
"""
self.str_format = str_format
super(DateField, self).__init__(*args, **kwargs)
def to_struct(self, value: datetime.date) -> str:
"""Cast `date` object to string."""
if self.str_format:
return value.strftime(self.str_format)
return value.strftime(self.default_format)
def parse_value(self, value: Union[None, datetime.date, str]) -> Union[None, datetime.date]:
"""Parse string into instance of `date`."""
if value is None:
return value
if isinstance(value, datetime.date):
return value
return parse(value).date()
| DateField |
python | python-openxml__python-docx | src/docx/oxml/numbering.py | {
"start": 1901,
"end": 2659
} | class ____(BaseOxmlElement):
"""A ``<w:numPr>`` element, a container for numbering properties applied to a
paragraph."""
ilvl = ZeroOrOne("w:ilvl", successors=("w:numId", "w:numberingChange", "w:ins"))
numId = ZeroOrOne("w:numId", successors=("w:numberingChange", "w:ins"))
# @ilvl.setter
# def _set_ilvl(self, val):
# """
# Get or add a <w:ilvl> child and set its ``w:val`` attribute to `val`.
# """
# ilvl = self.get_or_add_ilvl()
# ilvl.val = val
# @numId.setter
# def numId(self, val):
# """
# Get or add a <w:numId> child and set its ``w:val`` attribute to
# `val`.
# """
# numId = self.get_or_add_numId()
# numId.val = val
| CT_NumPr |
python | Netflix__metaflow | metaflow/_vendor/v3_7/typeguard/_transformer.py | {
"start": 10002,
"end": 15935
} | class ____(NodeTransformer):
type_substitutions: ClassVar[dict[str, tuple[str, str]]] = {
"builtins.dict": ("typing", "Dict"),
"builtins.list": ("typing", "List"),
"builtins.tuple": ("typing", "Tuple"),
"builtins.set": ("typing", "Set"),
"builtins.frozenset": ("typing", "FrozenSet"),
}
def __init__(self, transformer: TypeguardTransformer):
self.transformer = transformer
self._memo = transformer._memo
self._level = 0
def visit(self, node: AST) -> Any:
self._level += 1
new_node = super().visit(node)
self._level -= 1
if isinstance(new_node, Expression) and not hasattr(new_node, "body"):
return None
# Return None if this new node matches a variation of typing.Any
if (
self._level == 0
and isinstance(new_node, expr)
and self._memo.name_matches(new_node, *anytype_names)
):
return None
return new_node
def generic_visit(self, node: AST) -> AST:
if isinstance(node, expr) and self._memo.name_matches(node, *literal_names):
return node
return super().generic_visit(node)
def visit_BinOp(self, node: BinOp) -> Any:
self.generic_visit(node)
if isinstance(node.op, BitOr):
# Return Any if either side is Any
if self._memo.name_matches(node.left, *anytype_names):
return node.left
elif self._memo.name_matches(node.right, *anytype_names):
return node.right
if sys.version_info < (3, 10):
union_name = self.transformer._get_import("typing", "Union")
return Subscript(
value=union_name,
slice=Index(
Tuple(elts=[node.left, node.right], ctx=Load()), ctx=Load()
),
ctx=Load(),
)
return node
def visit_Attribute(self, node: Attribute) -> Any:
if self._memo.is_ignored_name(node):
return None
return node
def visit_Subscript(self, node: Subscript) -> Any:
if self._memo.is_ignored_name(node.value):
return None
# The subscript of typing(_extensions).Literal can be any arbitrary string, so
# don't try to evaluate it as code
if node.slice:
if isinstance(node.slice, Index):
# Python 3.7 and 3.8
slice_value = node.slice.value # type: ignore[attr-defined]
else:
slice_value = node.slice
if isinstance(slice_value, Tuple):
if self._memo.name_matches(node.value, *annotated_names):
# Only treat the first argument to typing.Annotated as a potential
# forward reference
items = cast(
typing.List[expr],
[self.generic_visit(slice_value.elts[0])]
+ slice_value.elts[1:],
)
else:
items = cast(
typing.List[expr],
[self.generic_visit(item) for item in slice_value.elts],
)
# If this is a Union and any of the items is Any, erase the entire
# annotation
if self._memo.name_matches(node.value, "typing.Union") and any(
isinstance(item, expr)
and self._memo.name_matches(item, *anytype_names)
for item in items
):
return None
# If all items in the subscript were Any, erase the subscript entirely
if all(item is None for item in items):
return node.value
for index, item in enumerate(items):
if item is None:
items[index] = self.transformer._get_import("typing", "Any")
slice_value.elts = items
else:
self.generic_visit(node)
# If the transformer erased the slice entirely, just return the node
# value without the subscript (unless it's Optional, in which case erase
# the node entirely
if self._memo.name_matches(node.value, "typing.Optional"):
return None
elif sys.version_info >= (3, 9) and not hasattr(node, "slice"):
return node.value
elif sys.version_info < (3, 9) and not hasattr(node.slice, "value"):
return node.value
return node
def visit_Name(self, node: Name) -> Any:
if self._memo.is_ignored_name(node):
return None
if sys.version_info < (3, 9):
for typename, substitute in self.type_substitutions.items():
if self._memo.name_matches(node, typename):
new_node = self.transformer._get_import(*substitute)
return copy_location(new_node, node)
return node
def visit_Call(self, node: Call) -> Any:
# Don't recurse into calls
return node
def visit_Constant(self, node: Constant) -> Any:
if isinstance(node.value, str):
expression = ast.parse(node.value, mode="eval")
new_node = self.visit(expression)
if new_node:
return copy_location(new_node.body, node)
else:
return None
return node
def visit_Str(self, node: Str) -> Any:
# Only used on Python 3.7
expression = ast.parse(node.s, mode="eval")
new_node = self.visit(expression)
if new_node:
return copy_location(new_node.body, node)
else:
return None
| AnnotationTransformer |
python | marshmallow-code__marshmallow | tests/test_decorators.py | {
"start": 8109,
"end": 13228
} | class ____:
def test_validates(self):
class VSchema(Schema):
s = fields.String()
@validates("s")
def validate_string(self, data, **kwargs):
raise ValidationError("nope")
with pytest.raises(ValidationError) as excinfo:
VSchema().load({"s": "bar"})
assert excinfo.value.messages == {"s": ["nope"]}
# Regression test for https://github.com/marshmallow-code/marshmallow/issues/350
def test_validates_with_attribute(self):
class S1(Schema):
s = fields.String(attribute="string_name")
@validates("s")
def validate_string(self, data, **kwargs):
raise ValidationError("nope")
with pytest.raises(ValidationError) as excinfo:
S1().load({"s": "foo"})
assert excinfo.value.messages == {"s": ["nope"]}
with pytest.raises(ValidationError):
S1(many=True).load([{"s": "foo"}])
def test_validates_decorator(self):
schema = ValidatesSchema()
errors = schema.validate({"foo": 41})
assert "foo" in errors
assert errors["foo"][0] == "The answer to life the universe and everything."
errors = schema.validate({"foo": 42})
assert errors == {}
errors = schema.validate([{"foo": 42}, {"foo": 43}], many=True)
assert "foo" in errors[1]
assert len(errors[1]["foo"]) == 1
assert errors[1]["foo"][0] == "The answer to life the universe and everything."
errors = schema.validate([{"foo": 42}, {"foo": 42}], many=True)
assert errors == {}
errors = schema.validate({})
assert errors == {}
with pytest.raises(ValidationError) as excinfo:
schema.load({"foo": 41})
assert excinfo.value.messages
result = excinfo.value.valid_data
assert result == {}
with pytest.raises(ValidationError) as excinfo:
schema.load([{"foo": 42}, {"foo": 43}], many=True)
error_messages = excinfo.value.messages
result = excinfo.value.valid_data
assert isinstance(result, list)
assert len(result) == 2
assert result[0] == {"foo": 42}
assert result[1] == {}
assert 1 in error_messages
assert "foo" in error_messages[1]
assert error_messages[1]["foo"] == [
"The answer to life the universe and everything."
]
def test_field_not_present(self):
class BadSchema(ValidatesSchema):
@validates("bar")
def validate_bar(self, value, **kwargs):
raise ValidationError("Never raised.")
schema = BadSchema()
with pytest.raises(ValueError, match='"bar" field does not exist.'):
schema.validate({"foo": 42})
def test_precedence(self):
class Schema2(ValidatesSchema):
foo = fields.Int(validate=predicate(lambda n: n != 42))
bar = fields.Int(validate=validate.Equal(1))
@validates("bar")
def validate_bar(self, value, **kwargs):
if value != 2:
raise ValidationError("Must be 2")
schema = Schema2()
errors = schema.validate({"foo": 42})
assert "foo" in errors
assert len(errors["foo"]) == 1
assert "Invalid value." in errors["foo"][0]
errors = schema.validate({"bar": 3})
assert "bar" in errors
assert len(errors["bar"]) == 1
assert "Must be equal to 1." in errors["bar"][0]
errors = schema.validate({"bar": 1})
assert "bar" in errors
assert len(errors["bar"]) == 1
assert errors["bar"][0] == "Must be 2"
# Regression test for https://github.com/marshmallow-code/marshmallow/issues/748
def test_validates_with_data_key(self):
class BadSchema(Schema):
foo = fields.String(data_key="foo-name")
@validates("foo")
def validate_string(self, data, **kwargs):
raise ValidationError("nope")
schema = BadSchema()
errors = schema.validate({"foo-name": "data"})
assert "foo-name" in errors
assert errors["foo-name"] == ["nope"]
schema = BadSchema()
errors = schema.validate(
[{"foo-name": "data"}, {"foo-name": "data2"}], many=True
)
assert errors == {0: {"foo-name": ["nope"]}, 1: {"foo-name": ["nope"]}}
def test_validates_accepts_multiple_fields(self):
class BadSchema(Schema):
foo = fields.String()
bar = fields.String(data_key="Bar")
@validates("foo", "bar")
def validate_string(self, data: str, data_key: str):
raise ValidationError(f"'{data}' is invalid for {data_key}.")
schema = BadSchema()
with pytest.raises(ValidationError) as excinfo:
schema.load({"foo": "data", "Bar": "data2"})
assert excinfo.value.messages == {
"foo": ["'data' is invalid for foo."],
"Bar": ["'data2' is invalid for Bar."],
}
| TestValidatesDecorator |
python | huggingface__transformers | src/transformers/models/sam2/modeling_sam2.py | {
"start": 28417,
"end": 29581
} | class ____(nn.Module):
def __init__(self, config: Sam2PromptEncoderConfig):
super().__init__()
self.scale = config.scale
positional_embedding = self.scale * torch.randn((2, config.hidden_size // 2))
self.register_buffer("positional_embedding", positional_embedding)
def forward(self, input_coords, input_shape=None):
"""Positionally encode points that are normalized to [0,1]."""
coordinates = input_coords.clone()
if input_shape is not None:
coordinates[:, :, :, 0] = coordinates[:, :, :, 0] / input_shape[1]
coordinates[:, :, :, 1] = coordinates[:, :, :, 1] / input_shape[0]
coordinates.to(torch.float32)
# assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape
coordinates = 2 * coordinates - 1
coordinates = coordinates.to(self.positional_embedding.dtype)
coordinates = coordinates @ self.positional_embedding
coordinates = 2 * np.pi * coordinates
# outputs d_1 x ... x d_n x channel shape
return torch.cat([torch.sin(coordinates), torch.cos(coordinates)], dim=-1)
| Sam2PositionalEmbedding |
python | google__jax | tests/mosaic/gpu_test_distributed.py | {
"start": 1464,
"end": 2470
} | class ____(parameterized.TestCase):
def setUp(self):
if (not jtu.test_device_matches(["cuda"]) or
not jtu.is_cuda_compute_capability_at_least("9.0")):
self.skipTest("Only works on GPU with capability >= sm90")
if not mgpu.supports_cross_device_collectives():
self.skipTest("NVSHMEM library unavailable.")
if os.environ.get("XLA_PYTHON_CLIENT_ALLOCATOR", "") == "platform":
self.skipTest("NVSHMEM doesn't work with the platform allocator.")
if jax.process_count() == 1:
self.skipTest("Test requires multiple processes.")
if jax.device_count() != jax.process_count():
self.skipTest("Need 1 device per process")
super().setUp()
self.prng = np.random.default_rng(1234)
self.context = mlir.make_ir_context()
if mgpu_dialect is not None:
mgpu_dialect.register_dialect(self.context)
self.enter_context(config.traceback_filtering("off"))
self.enter_context(self.context)
self.enter_context(ir.Location.unknown())
| TestCase |
python | apache__airflow | providers/slack/tests/unit/slack/operators/test_slack_webhook.py | {
"start": 1033,
"end": 4494
} | class ____:
def setup_method(self):
self.default_op_kwargs = {
"slack_webhook_conn_id": "test_conn_id",
"channel": None,
"username": None,
"icon_emoji": None,
"icon_url": None,
}
@mock.patch("airflow.providers.slack.operators.slack_webhook.SlackWebhookHook")
@pytest.mark.parametrize(
("slack_op_kwargs", "hook_extra_kwargs"),
[
pytest.param({}, DEFAULT_HOOKS_PARAMETERS, id="default-hook-parameters"),
pytest.param(
{"timeout": 42, "proxy": "http://spam.egg", "retry_handlers": []},
{"timeout": 42, "proxy": "http://spam.egg", "retry_handlers": []},
id="with-extra-hook-parameters",
),
],
)
def test_hook(self, mock_slackwebhook_cls, slack_op_kwargs, hook_extra_kwargs):
"""Test get cached ``SlackWebhookHook`` hook."""
op = SlackWebhookOperator(
task_id="test_hook", slack_webhook_conn_id="test_conn_id", **slack_op_kwargs
)
hook = op.hook
assert hook is op.hook, "Expected cached hook"
mock_slackwebhook_cls.assert_called_once_with(
slack_webhook_conn_id="test_conn_id", **hook_extra_kwargs
)
def test_assert_templated_fields(self):
"""Test expected templated fields."""
operator = SlackWebhookOperator(task_id="test_assert_templated_fields", **self.default_op_kwargs)
template_fields = (
"message",
"attachments",
"blocks",
"channel",
"username",
"proxy",
)
assert operator.template_fields == template_fields
@pytest.mark.parametrize(
("message", "blocks", "attachments"),
[
("Test Text", ["Dummy Block"], ["Test Attachments"]),
("Test Text", ["Dummy Block"], None),
("Test Text", None, None),
(None, ["Dummy Block"], None),
(None, ["Dummy Block"], ["Test Attachments"]),
(None, None, ["Test Attachments"]),
],
)
@pytest.mark.parametrize(
("channel", "username", "icon_emoji", "icon_url"),
[
(None, None, None, None),
("legacy-channel", "legacy-username", "legacy-icon_emoji", "legacy-icon-url"),
],
ids=["webhook-attrs", "legacy-webhook-attrs"],
)
@mock.patch("airflow.providers.slack.operators.slack_webhook.SlackWebhookHook")
def test_execute_operator(
self, mock_slackwebhook_cls, message, blocks, attachments, channel, username, icon_emoji, icon_url
):
mock_slackwebhook = mock_slackwebhook_cls.return_value
mock_slackwebhook_send = mock_slackwebhook.send
op = SlackWebhookOperator(
task_id="test_execute",
slack_webhook_conn_id="test_conn_id",
message=message,
blocks=blocks,
attachments=attachments,
channel=channel,
username=username,
icon_emoji=icon_emoji,
icon_url=icon_url,
)
op.execute(mock.MagicMock())
mock_slackwebhook_send.assert_called_once_with(
text=message,
blocks=blocks,
attachments=attachments,
channel=channel,
username=username,
icon_emoji=icon_emoji,
icon_url=icon_url,
)
| TestSlackWebhookOperator |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/invalid_return_type_str.py | {
"start": 515,
"end": 588
} | class ____:
def __str__(self):
x = "ruff"
return x
| Str2 |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/log/test_s3_task_handler.py | {
"start": 7738,
"end": 14618
} | class ____:
def clear_db(self):
clear_db_dags()
clear_db_runs()
if AIRFLOW_V_3_0_PLUS:
clear_db_dag_bundles()
@pytest.fixture(autouse=True)
def setup_tests(self, create_log_template, tmp_path_factory, session, testing_dag_bundle):
with conf_vars({("logging", "remote_log_conn_id"): "aws_default"}):
self.remote_log_base = "s3://bucket/remote/log/location"
self.remote_log_location = "s3://bucket/remote/log/location/1.log"
self.remote_log_key = "remote/log/location/1.log"
self.local_log_location = str(tmp_path_factory.mktemp("local-s3-log-location"))
create_log_template("{try_number}.log")
self.s3_task_handler = S3TaskHandler(self.local_log_location, self.remote_log_base)
# Verify the hook now with the config override
assert self.s3_task_handler.io.hook is not None
date = datetime(2016, 1, 1)
self.dag = DAG("dag_for_testing_s3_task_handler", schedule=None, start_date=date)
task = EmptyOperator(task_id="task_for_testing_s3_log_handler", dag=self.dag)
if AIRFLOW_V_3_0_PLUS:
scheduler_dag = sync_dag_to_db(self.dag)
dag_run = DagRun(
dag_id=self.dag.dag_id,
logical_date=date,
run_id="test",
run_type="manual",
)
else:
scheduler_dag = self.dag
dag_run = DagRun(
dag_id=self.dag.dag_id,
execution_date=date,
run_id="test",
run_type="manual",
)
session.add(dag_run)
session.commit()
session.refresh(dag_run)
if AIRFLOW_V_3_0_PLUS:
from airflow.models.dag_version import DagVersion
dag_version = DagVersion.get_latest_version(self.dag.dag_id)
self.ti = TaskInstance(task=task, run_id=dag_run.run_id, dag_version_id=dag_version.id)
else:
self.ti = TaskInstance(task=task, run_id=dag_run.run_id)
self.ti.dag_run = dag_run
self.ti.try_number = 1
self.ti.state = State.RUNNING
session.add(self.ti)
session.commit()
self.conn = boto3.client("s3")
self.conn.create_bucket(Bucket="bucket")
yield
scheduler_dag.clear()
self.clear_db()
if self.s3_task_handler.handler:
with contextlib.suppress(Exception):
os.remove(self.s3_task_handler.handler.baseFilename)
def test_set_context_raw(self):
self.ti.raw = True
mock_open = mock.mock_open()
with mock.patch("airflow.providers.amazon.aws.log.s3_task_handler.open", mock_open):
self.s3_task_handler.set_context(self.ti)
assert not self.s3_task_handler.upload_on_close
mock_open.assert_not_called()
def test_set_context_not_raw(self):
mock_open = mock.mock_open()
with mock.patch("airflow.providers.amazon.aws.log.s3_task_handler.open", mock_open):
self.s3_task_handler.set_context(self.ti)
assert self.s3_task_handler.upload_on_close
mock_open.assert_called_once_with(os.path.join(self.local_log_location, "1.log"), "w")
mock_open().write.assert_not_called()
def test_read(self):
# Test what happens when we have two log files to read
self.conn.put_object(Bucket="bucket", Key=self.remote_log_key, Body=b"Log line\nLine 2\n")
self.conn.put_object(
Bucket="bucket", Key=self.remote_log_key + ".trigger.log", Body=b"Log line 3\nLine 4\n"
)
ti = copy.copy(self.ti)
ti.state = TaskInstanceState.SUCCESS
log, metadata = self.s3_task_handler.read(ti)
expected_s3_uri = f"s3://bucket/{self.remote_log_key}"
if AIRFLOW_V_3_0_PLUS:
log = list(log)
assert log[0].event == "::group::Log message source details"
assert expected_s3_uri in log[0].sources
assert log[1].event == "::endgroup::"
assert log[2].event == "Log line"
assert log[3].event == "Line 2"
assert log[4].event == "Log line 3"
assert log[5].event == "Line 4"
assert metadata == {"end_of_log": True, "log_pos": 4}
else:
actual = log[0][0][-1]
assert f"*** Found logs in s3:\n*** * {expected_s3_uri}\n" in actual
assert actual.endswith("Line 4")
assert metadata == [{"end_of_log": True, "log_pos": 33}]
def test_read_when_s3_log_missing(self):
ti = copy.copy(self.ti)
ti.state = TaskInstanceState.SUCCESS
self.s3_task_handler._read_from_logs_server = mock.Mock(return_value=([], []))
log, metadata = self.s3_task_handler.read(ti)
if AIRFLOW_V_3_0_PLUS:
log = list(log)
assert len(log) == 2
assert metadata == {"end_of_log": True, "log_pos": 0}
else:
assert len(log) == 1
assert len(log) == len(metadata)
actual = log[0][0][-1]
expected = "*** No logs found on s3 for ti=<TaskInstance: dag_for_testing_s3_task_handler.task_for_testing_s3_log_handler test [success]>\n"
assert expected in actual
assert metadata[0] == {"end_of_log": True, "log_pos": 0}
def test_close(self):
self.s3_task_handler.set_context(self.ti)
assert self.s3_task_handler.upload_on_close
self.s3_task_handler.close()
# Should not raise
boto3.resource("s3").Object("bucket", self.remote_log_key).get()
def test_close_no_upload(self):
self.ti.raw = True
self.s3_task_handler.set_context(self.ti)
assert not self.s3_task_handler.upload_on_close
self.s3_task_handler.close()
with pytest.raises(ClientError):
boto3.resource("s3").Object("bucket", self.remote_log_key).get()
@pytest.mark.parametrize(
("delete_local_copy", "expected_existence_of_local_copy"),
[(True, False), (False, True)],
)
def test_close_with_delete_local_logs_conf(self, delete_local_copy, expected_existence_of_local_copy):
with conf_vars({("logging", "delete_local_logs"): str(delete_local_copy)}):
handler = S3TaskHandler(self.local_log_location, self.remote_log_base)
handler.log.info("test")
handler.set_context(self.ti)
assert handler.upload_on_close
handler.close()
assert os.path.exists(handler.handler.baseFilename) == expected_existence_of_local_copy
def test_filename_template_for_backward_compatibility(self):
# filename_template arg support for running the latest provider on airflow 2
S3TaskHandler(self.local_log_location, self.remote_log_base, filename_template=None)
| TestS3TaskHandler |
python | huggingface__transformers | src/transformers/models/dinat/modeling_dinat.py | {
"start": 17550,
"end": 19073
} | class ____(nn.Module):
def __init__(self, config, dim, depth, num_heads, dilations, drop_path_rate, downsample):
super().__init__()
self.config = config
self.dim = dim
self.layers = nn.ModuleList(
[
DinatLayer(
config=config,
dim=dim,
num_heads=num_heads,
dilation=dilations[i],
drop_path_rate=drop_path_rate[i],
)
for i in range(depth)
]
)
# patch merging layer
if downsample is not None:
self.downsample = downsample(dim=dim, norm_layer=nn.LayerNorm)
else:
self.downsample = None
self.pointing = False
def forward(
self,
hidden_states: torch.Tensor,
output_attentions: Optional[bool] = False,
) -> tuple[torch.Tensor]:
_, height, width, _ = hidden_states.size()
for i, layer_module in enumerate(self.layers):
layer_outputs = layer_module(hidden_states, output_attentions)
hidden_states = layer_outputs[0]
hidden_states_before_downsampling = hidden_states
if self.downsample is not None:
hidden_states = self.downsample(hidden_states_before_downsampling)
stage_outputs = (hidden_states, hidden_states_before_downsampling)
if output_attentions:
stage_outputs += layer_outputs[1:]
return stage_outputs
| DinatStage |
python | kamyu104__LeetCode-Solutions | Python/minimum-depth-of-binary-tree.py | {
"start": 181,
"end": 550
} | class ____(object):
# @param root, a tree node
# @return an integer
def minDepth(self, root):
if root is None:
return 0
if root.left and root.right:
return min(self.minDepth(root.left), self.minDepth(root.right)) + 1
else:
return max(self.minDepth(root.left), self.minDepth(root.right)) + 1
| Solution |
python | django__django | django/tasks/exceptions.py | {
"start": 437,
"end": 525
} | class ____(TaskException):
"""The requested TaskResult is invalid."""
| TaskResultMismatch |
python | spyder-ide__spyder | spyder/plugins/ipythonconsole/plugin.py | {
"start": 1489,
"end": 42589
} | class ____(SpyderDockablePlugin, RunExecutor):
"""
IPython Console plugin
This is a widget with tabs where each one is a ClientWidget
"""
NAME = 'ipython_console'
REQUIRES = [Plugins.Application, Plugins.Console, Plugins.Preferences]
OPTIONAL = [
Plugins.Editor,
Plugins.History,
Plugins.MainInterpreter,
Plugins.MainMenu,
Plugins.Projects,
Plugins.PythonpathManager,
Plugins.RemoteClient,
Plugins.Run,
Plugins.StatusBar,
Plugins.WorkingDirectory,
]
TABIFY = [Plugins.History]
WIDGET_CLASS = IPythonConsoleWidget
CONF_SECTION = NAME
CONF_WIDGET_CLASS = IPythonConsoleConfigPage
CONF_FILE = False
DISABLE_ACTIONS_WHEN_HIDDEN = False
RAISE_AND_FOCUS = True
CAN_HANDLE_EDIT_ACTIONS = True
CAN_HANDLE_SEARCH_ACTIONS = True
# Signals
sig_append_to_history_requested = Signal(str, str)
"""
This signal is emitted when the plugin requires to add commands to a
history file.
Parameters
----------
filename: str
History file filename.
text: str
Text to append to the history file.
"""
sig_history_requested = Signal(str)
"""
This signal is emitted when the plugin wants a specific history file
to be shown.
Parameters
----------
path: str
Path to history file.
"""
sig_focus_changed = Signal()
"""
This signal is emitted when the plugin focus changes.
"""
sig_edit_goto_requested = Signal(str, int, str)
"""
This signal will request to open a file in a given row and column
using a code editor.
Parameters
----------
path: str
Path to file.
row: int
Cursor starting row position.
word: str
Word to select on given row.
"""
sig_edit_new = Signal(str)
"""
This signal will request to create a new file in a code editor.
Parameters
----------
path: str
Path to file.
"""
sig_shellwidget_created = Signal(object)
"""
This signal is emitted when a shellwidget is connected to
a kernel.
Parameters
----------
shellwidget: spyder.plugins.ipyconsole.widgets.shell.ShellWidget
The shellwigdet.
"""
sig_shellwidget_deleted = Signal(object)
"""
This signal is emitted when a shellwidget is disconnected from
a kernel.
Parameters
----------
shellwidget: spyder.plugins.ipyconsole.widgets.shell.ShellWidget
The shellwigdet.
"""
sig_shellwidget_changed = Signal(object)
"""
This signal is emitted when the current shellwidget changes.
Parameters
----------
shellwidget: spyder.plugins.ipyconsole.widgets.shell.ShellWidget
The shellwigdet.
"""
sig_shellwidget_errored = Signal(object)
"""
This signal is emitted when the current shellwidget failed to start.
Parameters
----------
shellwidget: spyder.plugins.ipyconsole.widgets.shell.ShellWidget
The shellwigdet.
"""
sig_render_plain_text_requested = Signal(str)
"""
This signal is emitted to request a plain text help render.
Parameters
----------
plain_text: str
The plain text to render.
"""
sig_render_rich_text_requested = Signal(str, bool)
"""
This signal is emitted to request a rich text help render.
Parameters
----------
rich_text: str
The rich text.
collapse: bool
If the text contains collapsed sections, show them closed (True) or
open (False).
"""
sig_help_requested = Signal(dict)
"""
This signal is emitted to request help on a given object `name`.
Parameters
----------
help_data: dict
Example `{'name': str, 'ignore_unknown': bool}`.
"""
sig_current_directory_changed = Signal(str, str)
"""
This signal is emitted when the current directory of the active shell
widget has changed.
Parameters
----------
working_directory: str
The new working directory path.
server_id: str
The server identification from where the working directory is reachable.
"""
sig_interpreter_changed = Signal(str)
"""
This signal is emitted when the interpreter of the active shell widget has
changed.
Parameters
----------
path: str
Path to the new interpreter.
"""
# ---- SpyderDockablePlugin API
# -------------------------------------------------------------------------
@staticmethod
def get_name():
return _('IPython console')
@staticmethod
def get_description():
return _(
"Run Python files, cells, code and commands interactively."
)
@classmethod
def get_icon(cls):
return cls.create_icon('ipython_console')
def on_initialize(self):
widget = self.get_widget()
self._is_remote_consoles_menu_added = False
# Main widget signals
# Connect signal to open preferences
widget.sig_open_preferences_requested.connect(
self._open_interpreter_preferences
)
widget.sig_append_to_history_requested.connect(
self.sig_append_to_history_requested)
widget.sig_switch_to_plugin_requested.connect(self.switch_to_plugin)
widget.sig_history_requested.connect(self.sig_history_requested)
widget.sig_edit_goto_requested.connect(self.sig_edit_goto_requested)
widget.sig_edit_new.connect(self.sig_edit_new)
widget.sig_shellwidget_created.connect(self.sig_shellwidget_created)
widget.sig_shellwidget_deleted.connect(self.sig_shellwidget_deleted)
widget.sig_shellwidget_changed.connect(self.sig_shellwidget_changed)
widget.sig_shellwidget_errored.connect(self.sig_shellwidget_errored)
widget.sig_render_plain_text_requested.connect(
self.sig_render_plain_text_requested)
widget.sig_render_rich_text_requested.connect(
self.sig_render_rich_text_requested)
widget.sig_help_requested.connect(self.sig_help_requested)
widget.sig_current_directory_changed.connect(
self.sig_current_directory_changed)
widget.sig_interpreter_changed.connect(
self.sig_interpreter_changed
)
# Run configurations
self.cython_editor_run_configuration = {
'origin': self.NAME,
'extension': 'pyx',
'contexts': [
{'name': 'File'}
]
}
self.python_editor_run_configuration = {
'origin': self.NAME,
'extension': 'py',
'contexts': [
{'name': 'File'},
{'name': 'Cell'},
{'name': 'Selection'},
]
}
self.ipython_editor_run_configuration = {
'origin': self.NAME,
'extension': 'ipy',
'contexts': [
{'name': 'File'},
{'name': 'Cell'},
{'name': 'Selection'},
]
}
self.pyw_editor_run_configuration = {
'origin': self.NAME,
'extension': 'pyw',
'contexts': [
{'name': 'File'},
{'name': 'Cell'},
{'name': 'Selection'},
]
}
self.executor_configuration = [
{
'input_extension': 'py',
'context': {'name': 'File'},
'output_formats': [],
'configuration_widget': IPythonConfigOptions,
'requires_cwd': True,
'priority': 0
},
{
'input_extension': 'ipy',
'context': {'name': 'File'},
'output_formats': [],
'configuration_widget': IPythonConfigOptions,
'requires_cwd': True,
'priority': 0
},
{
'input_extension': 'pyw',
'context': {'name': 'File'},
'output_formats': [],
'configuration_widget': IPythonConfigOptions,
'requires_cwd': True,
'priority': 0
},
{
'input_extension': 'py',
'context': {'name': 'Cell'},
'output_formats': [],
'configuration_widget': None,
'requires_cwd': True,
'priority': 0
},
{
'input_extension': 'ipy',
'context': {'name': 'Cell'},
'output_formats': [],
'configuration_widget': None,
'requires_cwd': True,
'priority': 0
},
{
'input_extension': 'pyw',
'context': {'name': 'Cell'},
'output_formats': [],
'configuration_widget': None,
'requires_cwd': True,
'priority': 0
},
{
'input_extension': 'py',
'context': {'name': 'Selection'},
'output_formats': [],
'configuration_widget': None,
'requires_cwd': True,
'priority': 0
},
{
'input_extension': 'ipy',
'context': {'name': 'Selection'},
'output_formats': [],
'configuration_widget': None,
'requires_cwd': True,
'priority': 0
},
{
'input_extension': 'pyw',
'context': {'name': 'Selection'},
'output_formats': [],
'configuration_widget': None,
'requires_cwd': True,
'priority': 0
},
{
'input_extension': 'pyx',
'context': {'name': 'File'},
'output_formats': [],
'configuration_widget': IPythonConfigOptions,
'requires_cwd': True,
'priority': 0
},
]
@on_plugin_available(plugin=Plugins.Application)
def on_application_available(self) -> None:
widget = self.get_widget()
widget.sig_edit_action_enabled.connect(self._enable_edit_action)
# Enable Select All edit action
self._enable_edit_action(ApplicationActions.SelectAll, True)
# Setup Search actions
self._enable_search_action(ApplicationActions.FindText, True)
self._enable_search_action(ApplicationActions.FindNext, True)
self._enable_search_action(ApplicationActions.FindPrevious, True)
# Replace action is set disabled since the `FindReplace` widget created
# by the main widget has `enable_replace=False`
self._enable_search_action(ApplicationActions.ReplaceText, False)
@on_plugin_teardown(plugin=Plugins.Application)
def on_application_teardown(self) -> None:
widget = self.get_widget()
widget.sig_edit_action_enabled.disconnect(self._enable_edit_action)
@on_plugin_available(plugin=Plugins.StatusBar)
def on_statusbar_available(self):
# Add status widgets
statusbar = self.get_plugin(Plugins.StatusBar)
pythonenv_status = self.get_widget().pythonenv_status
statusbar.add_status_widget(pythonenv_status)
pythonenv_status.register_ipythonconsole(self)
matplotlib_status = self.get_widget().matplotlib_status
statusbar.add_status_widget(matplotlib_status)
matplotlib_status.register_ipythonconsole(self)
@on_plugin_teardown(plugin=Plugins.StatusBar)
def on_statusbar_teardown(self):
# Remove status widgets
statusbar = self.get_plugin(Plugins.StatusBar)
pythonenv_status = self.get_widget().pythonenv_status
pythonenv_status.unregister_ipythonconsole(self)
statusbar.remove_status_widget(pythonenv_status.ID)
matplotlib_status = self.get_widget().matplotlib_status
matplotlib_status.unregister_ipythonconsole(self)
statusbar.remove_status_widget(matplotlib_status.ID)
@on_plugin_available(plugin=Plugins.Preferences)
def on_preferences_available(self):
# Register conf page
preferences = self.get_plugin(Plugins.Preferences)
preferences.register_plugin_preferences(self)
@on_plugin_available(plugin=Plugins.MainMenu)
def on_main_menu_available(self):
widget = self.get_widget()
mainmenu = self.get_plugin(Plugins.MainMenu)
# Connect state check/update logic for edit actions
edit_menu = mainmenu.get_application_menu(ApplicationMenus.Edit)
edit_menu.aboutToShow.connect(widget.update_edit_menu)
# Add signal to update actions state before showing the menu
console_menu = mainmenu.get_application_menu(
ApplicationMenus.Consoles)
console_menu.aboutToShow.connect(widget.update_actions)
if sys.platform == "darwin":
# Avoid changing the aspect of the tabs context menu when it's
# visible and the user shows the console menu at the same time.
console_menu.aboutToShow.connect(
lambda: widget.tabwidget.menu.hide()
)
# Main menu actions for the IPython Console
new_consoles_actions = [
widget.create_client_action,
widget.console_environment_menu,
widget.special_console_menu,
widget.connect_to_kernel_action
]
restart_connect_consoles_actions = [
widget.interrupt_action,
widget.restart_action,
widget.reset_action
]
# Console menu
for console_new_action in new_consoles_actions:
mainmenu.add_item_to_application_menu(
console_new_action,
menu_id=ApplicationMenus.Consoles,
section=ConsolesMenuSections.New,
)
for console_action in restart_connect_consoles_actions:
mainmenu.add_item_to_application_menu(
console_action,
menu_id=ApplicationMenus.Consoles,
section=ConsolesMenuSections.Restart,
)
# IPython documentation
mainmenu.add_item_to_application_menu(
self.get_widget().ipython_menu,
menu_id=ApplicationMenus.Help,
section=HelpMenuSections.ExternalDocumentation,
before_section=HelpMenuSections.Support,
)
# Add remote console submenu
if (
self.is_plugin_available(Plugins.RemoteClient)
and not self._is_remote_consoles_menu_added
):
self._add_remote_consoles_menu()
@on_plugin_available(plugin=Plugins.Editor)
def on_editor_available(self):
editor = self.get_plugin(Plugins.Editor)
self.sig_edit_goto_requested.connect(editor.load)
self.sig_edit_new.connect(editor.new)
for run_config in [
self.python_editor_run_configuration,
self.ipython_editor_run_configuration,
self.cython_editor_run_configuration,
self.pyw_editor_run_configuration
]:
editor.add_supported_run_configuration(run_config)
@on_plugin_available(plugin=Plugins.Projects)
def on_projects_available(self):
projects = self.get_plugin(Plugins.Projects)
projects.sig_project_loaded.connect(self._on_project_loaded)
projects.sig_project_closed.connect(self._on_project_closed)
@on_plugin_available(plugin=Plugins.Run)
def on_run_available(self):
run = self.get_plugin(Plugins.Run)
run.register_executor_configuration(self, self.executor_configuration)
@on_plugin_available(plugin=Plugins.WorkingDirectory)
def on_working_directory_available(self):
working_directory = self.get_plugin(Plugins.WorkingDirectory)
working_directory.sig_current_directory_changed.connect(
self._save_working_directory
)
working_directory.sig_current_directory_changed.connect(
self.set_current_client_working_directory
)
@on_plugin_available(plugin=Plugins.PythonpathManager)
def on_pythonpath_manager_available(self):
pythonpath_manager = self.get_plugin(Plugins.PythonpathManager)
pythonpath_manager.sig_pythonpath_changed.connect(self.update_path)
@on_plugin_available(plugin=Plugins.RemoteClient)
def on_remote_client_available(self):
self._remote_client.sig_server_stopped.connect(
self._close_remote_clients
)
self._remote_client.sig_server_renamed.connect(
self._rename_remote_clients
)
self._remote_client.sig_server_changed.connect(
self._on_remote_server_changed
)
self._remote_client.sig_connection_established.connect(
self._on_remote_server_connected
)
self._remote_client.sig_connection_lost.connect(
self._on_remote_server_disconnected
)
if (
self.is_plugin_available(Plugins.MainMenu)
and not self._is_remote_consoles_menu_added
):
self._add_remote_consoles_menu()
@on_plugin_available(plugin=Plugins.MainInterpreter)
def on_main_interpreter_available(self):
main_interpreter = self.get_plugin(Plugins.MainInterpreter)
main_interpreter.sig_environments_updated.connect(self._update_envs)
@on_plugin_teardown(plugin=Plugins.Preferences)
def on_preferences_teardown(self):
# Register conf page
preferences = self.get_plugin(Plugins.Preferences)
preferences.deregister_plugin_preferences(self)
@on_plugin_teardown(plugin=Plugins.MainMenu)
def on_main_menu_teardown(self):
widget = self.get_widget()
mainmenu = self.get_plugin(Plugins.MainMenu)
mainmenu.remove_application_menu(ApplicationMenus.Consoles)
# Disconnect state check/update logic for edit actions
edit_menu = mainmenu.get_application_menu(ApplicationMenus.Edit)
edit_menu.aboutToShow.disconnect(widget.update_edit_menu)
# IPython documentation menu
mainmenu.remove_item_from_application_menu(
IPythonConsoleWidgetMenus.Documentation,
menu_id=ApplicationMenus.Help
)
if self._is_remote_consoles_menu_added:
mainmenu.remove_item_from_application_menu(
RemoteConsolesMenus.RemoteConsoles,
menu_id=ApplicationMenus.Consoles,
)
@on_plugin_teardown(plugin=Plugins.Editor)
def on_editor_teardown(self):
editor = self.get_plugin(Plugins.Editor)
self.sig_edit_goto_requested.disconnect(editor.load)
self.sig_edit_new.disconnect(editor.new)
for run_config in [
self.python_editor_run_configuration,
self.ipython_editor_run_configuration,
self.cython_editor_run_configuration,
self.pyw_editor_run_configuration
]:
editor.remove_supported_run_configuration(run_config)
@on_plugin_teardown(plugin=Plugins.Projects)
def on_projects_teardown(self):
projects = self.get_plugin(Plugins.Projects)
projects.sig_project_loaded.disconnect(self._on_project_loaded)
projects.sig_project_closed.disconnect(self._on_project_closed)
@on_plugin_teardown(plugin=Plugins.Run)
def on_run_teardown(self):
run = self.get_plugin(Plugins.Run)
run.deregister_executor_configuration(
self, self.executor_configuration)
@on_plugin_teardown(plugin=Plugins.WorkingDirectory)
def on_working_directory_teardown(self):
working_directory = self.get_plugin(Plugins.WorkingDirectory)
working_directory.sig_current_directory_changed.disconnect(
self._save_working_directory
)
working_directory.sig_current_directory_changed.disconnect(
self.set_current_client_working_directory
)
@on_plugin_teardown(plugin=Plugins.PythonpathManager)
def on_pythonpath_manager_teardown(self):
pythonpath_manager = self.get_plugin(Plugins.PythonpathManager)
pythonpath_manager.sig_pythonpath_changed.disconnect(self.update_path)
@on_plugin_teardown(plugin=Plugins.RemoteClient)
def on_remote_client_teardown(self):
self._remote_client.sig_server_stopped.disconnect(
self._close_remote_clients
)
self._remote_client.sig_server_renamed.disconnect(
self._rename_remote_clients
)
self._remote_client.sig_server_changed.disconnect(
self._on_remote_server_changed
)
@on_plugin_teardown(plugin=Plugins.MainInterpreter)
def on_main_interpreter_teardown(self):
main_interpreter = self.get_plugin(Plugins.MainInterpreter)
main_interpreter.sig_environments_updated.disconnect(self._update_envs)
def update_font(self):
"""Update font from Preferences"""
font = self.get_font(SpyderFontType.Monospace)
app_font = self.get_font(SpyderFontType.Interface)
self.get_widget().update_font(font, app_font)
def on_close(self, cancelable=False):
"""Perform actions when plugin is closed"""
self.get_widget().mainwindow_close = True
return self.get_widget().close_all_clients()
def on_mainwindow_visible(self):
"""
Connect to an existing kernel if a `kernel-*.json` file is given via
command line options. Otherwise create a new client.
"""
cli_options = self.get_command_line_options()
connection_file = cli_options.connection_file
if connection_file is not None:
cf_path = self.get_widget().find_connection_file(connection_file)
if cf_path is None:
# Show an error if the connection file passed on the command
# line doesn't exist (find_connection_file returns None in that
# case).
self.create_new_client(give_focus=False)
client = self.get_current_client()
client.show_kernel_connection_error()
else:
self.create_client_for_kernel(cf_path, give_focus=False)
else:
self.create_new_client(give_focus=False)
# ---- Private methods
# -------------------------------------------------------------------------
def _on_project_loaded(self, path):
self.get_widget().update_active_project_path(path)
def _on_project_closed(self):
self.get_widget().update_active_project_path(None)
def _update_envs(self, envs):
self.get_widget().update_envs(envs)
def _open_interpreter_preferences(self):
"""Open the Preferences dialog in the main interpreter section."""
self._main.show_preferences()
preferences = self._main.preferences
container = preferences.get_container()
dlg = container.dialog
index = dlg.get_index_by_name("main_interpreter")
dlg.set_current_index(index)
def _save_working_directory(self, dirname):
"""
Save current working directory on the main widget to start new clients.
Parameters
----------
new_dir: str
Path to the new current working directory.
"""
self.get_widget().save_working_directory(dirname)
# ---- Public API
# -------------------------------------------------------------------------
# ---- Spyder Kernels handlers registry functionality
def register_spyder_kernel_call_handler(self, handler_id, handler):
"""
Register a callback for it to be available for the kernels of new
clients.
Parameters
----------
handler_id : str
Handler name to be registered and that will be used to
call the respective handler in the Spyder kernel.
handler : func
Callback function that will be called when the kernel calls
the handler.
Returns
-------
None.
"""
self.get_widget().register_spyder_kernel_call_handler(
handler_id, handler)
def unregister_spyder_kernel_call_handler(self, handler_id):
"""
Unregister/remove a handler for not be added to new clients kernels
Parameters
----------
handler_id : str
Handler name that was registered and that will be removed
from the Spyder kernel available handlers.
Returns
-------
None.
"""
self.get_widget().unregister_spyder_kernel_call_handler(handler_id)
# ---- For client widgets
def get_clients(self):
"""Return clients list"""
return self.get_widget().clients
def get_focus_client(self):
"""Return current client with focus, if any"""
return self.get_widget().get_focus_client()
def get_current_client(self):
"""Return the currently selected client"""
return self.get_widget().get_current_client()
def get_current_shellwidget(self):
"""Return the shellwidget of the current client"""
return self.get_widget().get_current_shellwidget()
def set_current_shellwidget(self, shellwidget):
"""Activate client associated to given shellwidget."""
self.get_widget().select_tab(shellwidget)
def rename_client_tab(self, client, given_name):
"""
Rename a client's tab.
Parameters
----------
client: spyder.plugins.ipythonconsole.widgets.client.ClientWidget
Client to rename.
given_name: str
New name to be given to the client's tab.
Returns
-------
None.
"""
self.get_widget().rename_client_tab(client, given_name)
def create_new_client(self, give_focus=True, filename='', special=None,
given_name=None, path_to_custom_interpreter=None):
"""
Create a new client.
Parameters
----------
give_focus : bool, optional
True if the new client should gain the window
focus, False otherwise. The default is True.
filename : str, optional
Filename associated with the client. The default is ''.
special : str, optional
Type of special support to preload. It can be "pylab", "cython",
"sympy", or None.
given_name : str, optional
Initial name displayed in the tab of the client.
The default is None.
path_to_custom_interpreter : str, optional
Path to a custom interpreter the client should use regardless of
the interpreter selected in Spyder Preferences.
The default is None.
Returns
-------
None.
"""
self.get_widget().create_new_client(
give_focus=give_focus,
filename=filename,
special=special,
given_name=given_name,
path_to_custom_interpreter=path_to_custom_interpreter)
def create_client_for_file(self, filename, is_cython=False):
"""
Create a client widget to execute code related to a file.
Parameters
----------
filename : str
File to be executed.
is_cython : bool, optional
If the execution is for a Cython file. The default is False.
Returns
-------
None.
"""
self.get_widget().create_client_for_file(filename, is_cython=is_cython)
def create_client_for_kernel(
self,
connection_file,
hostname=None,
sshkey=None,
password=None,
server_id=None,
give_focus=False,
can_close=True,
):
"""
Create a client connected to an existing kernel.
Parameters
----------
connection_file: str
Json file that has the kernel's connection info.
hostname: str, optional
Name or IP address of the remote machine where the kernel was
started. When this is provided, it's also necessary to pass either
the ``sshkey`` or ``password`` arguments.
sshkey: str, optional
SSH key file to connect to the remote machine where the kernel is
running.
password: str, optional
Password to authenticate to the remote machine where the kernel is
running.
server_id: str, optional
The remote server id to which this client is connected to.
give_focus : bool, optional
True if the new client should gain the window
focus, False otherwise. The default is True.
can_close: bool, optional
Whether the client can be closed. This is useful to prevent closing
the client that will be connected to a remote kernel before the
connection is established.
Returns
-------
client: ClientWidget
The created client.
"""
return self.get_widget().create_client_for_kernel(
connection_file,
hostname,
sshkey,
password,
server_id,
give_focus,
can_close,
)
def get_client_for_file(self, filename):
"""Get client associated with a given file name."""
return self.get_widget().get_client_for_file(filename)
def create_client_from_path(self, path):
"""
Create a new console with `path` set as the current working directory.
Parameters
----------
path: str
Path to use as working directory in new console.
"""
self.get_widget().create_client_from_path(path)
def close_client(self, index=None, client=None, ask_recursive=True):
"""Close client tab from index or client (or close current tab)"""
self.get_widget().close_client(index=index, client=client,
ask_recursive=ask_recursive)
def undo(self) -> None:
return self.get_widget().current_client_undo()
def redo(self) -> None:
return self.get_widget().current_client_redo()
def cut(self) -> None:
return self.get_widget().current_client_cut()
def copy(self) -> None:
return self.get_widget().current_client_copy()
def paste(self) -> None:
return self.get_widget().current_client_paste()
def select_all(self) -> None:
return self.get_widget().current_client_select_all()
def find(self) -> None:
find_widget = self.get_widget().find_widget
find_widget.show()
find_widget.search_text.setFocus()
def find_next(self) -> None:
self.get_widget().find_widget.find_next()
def find_previous(self) -> None:
self.get_widget().find_widget.find_previous()
# ---- For execution
@run_execute(context=RunContext.File)
def exec_files(
self,
input: RunConfiguration,
conf: ExtendedRunExecutionParameters
) -> List[RunResult]:
exec_params = conf['params']
cwd_opts = exec_params['working_dir']
params: IPythonConsolePyConfiguration = exec_params['executor_params']
run_input: FileRun = input['run_input']
filename = run_input['path']
wdir = cwd_opts['path']
args = params['python_args']
post_mortem = params['post_mortem']
current_client = params['current']
clear_variables = params['clear_namespace']
console_namespace = params['console_namespace']
run_method = params.get('run_method', 'runfile')
self.run_script(
filename,
wdir,
args,
post_mortem,
current_client,
clear_variables,
console_namespace,
method=run_method,
)
return []
@run_execute(context=RunContext.Selection)
def exec_selection(
self,
input: RunConfiguration,
conf: ExtendedRunExecutionParameters
) -> List[RunResult]:
run_input: SelectionRun = input['run_input']
text = run_input['selection']
self.run_selection(text)
@run_execute(context=RunContext.Cell)
def exec_cell(
self,
input: RunConfiguration,
conf: ExtendedRunExecutionParameters
) -> List[RunResult]:
run_input: CellRun = input['run_input']
cell_text = run_input['cell']
if run_input['copy']:
cell_text = re.sub(r'(^\s*\n)|(\n\s*$)', '', cell_text)
self.run_selection(cell_text)
return
cell_name = run_input['cell_name']
filename = run_input['path']
exec_params = conf['params']
params: IPythonConsolePyConfiguration = exec_params['executor_params']
run_method = params.get('run_method', 'runcell')
self.run_cell(cell_text, cell_name, filename,
method=run_method)
# ---- For execution and debugging
def run_script(self, filename, wdir, args='',
post_mortem=False, current_client=True,
clear_variables=False, console_namespace=False,
method=None):
"""
Run script in current or dedicated client.
Parameters
----------
filename : str
Path to file that will be run.
wdir : str
Working directory from where the file should be run.
args : str, optional
Arguments defined to run the file.
post_mortem : bool, optional
True if in case of error the execution should enter in
post-mortem mode, False otherwise.
current_client : bool, optional
True if the execution should be done in the current client,
False if the execution needs to be done in a dedicated client.
clear_variables : bool, optional
True if all the variables should be removed before execution,
False otherwise.
console_namespace : bool, optional
True if the console namespace should be used, False otherwise.
method : str or None
Method to run the file. It must accept the same arguments as
`runfile`.
Returns
-------
None.
"""
self.sig_unmaximize_plugin_requested.emit()
self.get_widget().run_script(
filename,
wdir,
args,
post_mortem,
current_client,
clear_variables,
console_namespace,
method
)
def run_cell(self, code, cell_name, filename, method='runcell'):
"""
Run cell in current or dedicated client.
Parameters
----------
code : str
Piece of code to run that corresponds to a cell.
cell_name : str or int
Cell name or index.
filename : str
Path of the file where the cell to execute is located.
method : str, optional
Name handler of the kernel function to be used to execute the cell.
The default is 'runcell'.
Returns
-------
None.
"""
self.sig_unmaximize_plugin_requested.emit()
self.get_widget().run_cell(code, cell_name, filename, method=method)
def execute_code(self, lines, current_client=True, clear_variables=False):
"""
Execute code instructions.
Parameters
----------
lines : str
Code lines to execute.
current_client : bool, optional
True if the execution should be done in the current client.
The default is True.
clear_variables : bool, optional
True if before the execution the variables should be cleared.
The default is False.
Returns
-------
None.
"""
self.get_widget().execute_code(
lines,
current_client=current_client,
clear_variables=clear_variables)
def run_selection(self, lines):
"""Execute selected lines in the current console."""
self.sig_unmaximize_plugin_requested.emit()
self.get_widget().execute_code(lines)
# ---- For working directory and path management
@qdebounced(timeout=100)
def set_current_client_working_directory(
self, directory: str,
sender_plugin: Optional[str] = None,
server_id: Optional[str] = None,
):
"""
Set current client working directory.
Parameters
----------
directory : str
Path for the new current working directory.
sender_plugin: str
Name of the plugin that requested changing the working directory.
Default is None, which means this plugin did it.
Returns
-------
None.
"""
# Only update the cwd if this plugin didn't request changing it
if sender_plugin != self.NAME:
self.get_widget().set_current_client_working_directory(
directory, server_id
)
def update_path(self, new_path, prioritize):
"""
Update path on consoles.
Both parameters have as keys paths and as value if the path
should be used/is active (True) or not (False)
Parameters
----------
new_path : list of str
New state of the Python path handled by Spyder.
prioritize : bool
Whether to prioritize Python path in sys.path
Returns
-------
None.
"""
self.get_widget().update_path(new_path, prioritize)
# ---- For restarts
def restart(self):
"""
Restart the console.
This is needed when we switch projects to update PYTHONPATH
and the selected interpreter.
"""
self.get_widget().restart()
def restart_kernel(self):
"""
Restart the current client's kernel.
Returns
-------
None.
"""
self.get_widget().restart_kernel()
# ---- For documentation and help
def show_intro(self):
"""Show intro to IPython help."""
self.get_widget().show_intro()
def show_guiref(self):
"""Show qtconsole help."""
self.get_widget().show_guiref()
def show_quickref(self):
"""Show IPython Cheat Sheet."""
self.get_widget().show_quickref()
# ---- For the Remote client plugin
# -------------------------------------------------------------------------
@cached_property
def _remote_client(self):
return self.get_plugin(Plugins.RemoteClient)
def _add_remote_consoles_menu(self):
"""Add remote consoles submenu to the Consoles menu."""
widget = self.get_widget()
widget.setup_remote_consoles_submenu(render=False)
menu = widget.get_menu(RemoteConsolesMenus.RemoteConsoles)
mainmenu = self.get_plugin(Plugins.MainMenu)
mainmenu.add_item_to_application_menu(
menu,
menu_id=ApplicationMenus.Consoles,
section=ConsolesMenuSections.New,
before=IPythonConsoleWidgetActions.ConnectToKernel,
)
self._is_remote_consoles_menu_added = True
@Slot(str)
def _close_remote_clients(self, server_id):
self.get_widget().close_remote_clients(server_id)
@Slot(str)
def _rename_remote_clients(self, server_id):
self.get_widget().rename_remote_clients(server_id)
@Slot()
def _on_remote_server_changed(self):
self.get_widget().setup_remote_consoles_submenu()
@Slot(str)
def _on_remote_server_connected(self, server_id):
self.get_widget().setup_server_consoles_submenu(server_id)
@Slot(str)
def _on_remote_server_disconnected(self, server_id):
self.get_widget().clear_server_consoles_submenu(server_id)
# ---- Methods related to the Application plugin
# ------------------------------------------------------------------------
def _enable_edit_action(self, action_name: str, enabled: bool) -> None:
"""Enable or disable edit action for this plugin."""
application = self.get_plugin(Plugins.Application, error=False)
if application:
application.enable_edit_action(action_name, enabled, self.NAME)
def _enable_search_action(self, action_name: str, enabled: bool) -> None:
"""Enable or disable search action for this plugin."""
application = self.get_plugin(Plugins.Application, error=False)
if application:
application.enable_search_action(action_name, enabled, self.NAME)
| IPythonConsole |
python | langchain-ai__langchain | libs/core/langchain_core/prompts/few_shot.py | {
"start": 836,
"end": 3666
} | class ____(BaseModel):
"""Prompt template that contains few shot examples."""
examples: list[dict] | None = None
"""Examples to format into the prompt.
Either this or example_selector should be provided."""
example_selector: BaseExampleSelector | None = None
"""ExampleSelector to choose the examples to format into the prompt.
Either this or examples should be provided."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def check_examples_and_selector(cls, values: dict) -> Any:
"""Check that one and only one of examples/example_selector are provided.
Args:
values: The values to check.
Returns:
The values if they are valid.
Raises:
ValueError: If neither or both examples and example_selector are provided.
ValueError: If both examples and example_selector are provided.
"""
examples = values.get("examples")
example_selector = values.get("example_selector")
if examples and example_selector:
msg = "Only one of 'examples' and 'example_selector' should be provided"
raise ValueError(msg)
if examples is None and example_selector is None:
msg = "One of 'examples' and 'example_selector' should be provided"
raise ValueError(msg)
return values
def _get_examples(self, **kwargs: Any) -> list[dict]:
"""Get the examples to use for formatting the prompt.
Args:
**kwargs: Keyword arguments to be passed to the example selector.
Returns:
List of examples.
Raises:
ValueError: If neither examples nor example_selector are provided.
"""
if self.examples is not None:
return self.examples
if self.example_selector is not None:
return self.example_selector.select_examples(kwargs)
msg = "One of 'examples' and 'example_selector' should be provided"
raise ValueError(msg)
async def _aget_examples(self, **kwargs: Any) -> list[dict]:
"""Async get the examples to use for formatting the prompt.
Args:
**kwargs: Keyword arguments to be passed to the example selector.
Returns:
List of examples.
Raises:
ValueError: If neither examples nor example_selector are provided.
"""
if self.examples is not None:
return self.examples
if self.example_selector is not None:
return await self.example_selector.aselect_examples(kwargs)
msg = "One of 'examples' and 'example_selector' should be provided"
raise ValueError(msg)
| _FewShotPromptTemplateMixin |
python | huggingface__transformers | tests/models/pix2struct/test_modeling_pix2struct.py | {
"start": 11116,
"end": 12824
} | class ____(ModelTesterMixin, unittest.TestCase):
all_model_classes = (Pix2StructTextModel,) if is_torch_available() else ()
def setUp(self):
self.model_tester = Pix2StructTextModelTester(self)
self.config_tester = ConfigTester(self, config_class=Pix2StructTextConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip(reason="Training is tested directly on `Pix2StructTextImageModelTest`")
def test_training(self):
pass
@unittest.skip(reason="Training is tested directly on `Pix2StructTextImageModelTest`")
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@unittest.skip(reason="Pix2Struct does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@slow
def test_model_from_pretrained(self):
model_name = "google/pix2struct-textcaps-base"
model = Pix2StructTextModel.from_pretrained(model_name)
self.assertIsNotNone(model)
| Pix2StructTextModelTest |
python | pytorch__pytorch | tools/linter/adapters/gb_registry_linter.py | {
"start": 532,
"end": 9372
} | class ____(NamedTuple):
path: str | None
line: int | None
char: int | None
code: str
severity: LintSeverity
name: str
original: str | None
replacement: str | None
description: str | None
def _collect_all_calls(
dynamo_dir: Path,
) -> dict[str, list[tuple[dict[str, Any], Path]]]:
"""Return mapping *gb_type → list[(call_info, file_path)]* for all occurrences."""
gb_type_calls: dict[str, list[tuple[dict[str, Any], Path]]] = {}
for py_file in dynamo_dir.rglob("*.py"):
for call in find_unimplemented_calls(py_file, dynamo_dir):
gb_type = call["gb_type"]
if gb_type not in gb_type_calls:
gb_type_calls[gb_type] = []
gb_type_calls[gb_type].append((call, py_file))
return gb_type_calls
def _create_registry_entry(
gb_type: str, context: str, explanation: str, hints: list[str]
) -> dict[str, Any]:
"""Create a registry entry with consistent format."""
return {
"Gb_type": gb_type,
"Context": context,
"Explanation": explanation,
"Hints": hints or [],
}
def _update_registry_with_changes(
registry: dict,
calls: dict[str, tuple[dict[str, Any], Path]],
renames: dict[str, str] | None = None,
) -> dict:
"""Calculate what the updated registry should look like."""
renames = renames or {}
updated_registry = dict(registry)
latest_entry: dict[str, Any] = {
entries[0]["Gb_type"]: entries[0] for entries in registry.values()
}
gb_type_to_key: dict[str, str] = {
entries[0]["Gb_type"]: key for key, entries in registry.items()
}
# Method for determining add vs. update:
# - If gb_type exists in registry but content differs: UPDATE (append new entry to preserve history)
# - If gb_type is new but content matches existing entry: RENAME (append new entry with new gb_type)
# - If gb_type is completely new: ADD (create new registry entry with a new GBID)
for old_gb_type, new_gb_type in renames.items():
registry_key = gb_type_to_key[old_gb_type]
old_entry = updated_registry[registry_key][0]
new_entry = _create_registry_entry(
new_gb_type,
old_entry["Context"],
old_entry["Explanation"],
old_entry["Hints"],
)
updated_registry[registry_key] = [new_entry] + updated_registry[registry_key]
latest_entry[new_gb_type] = new_entry
gb_type_to_key[new_gb_type] = registry_key
del latest_entry[old_gb_type]
del gb_type_to_key[old_gb_type]
for gb_type, (call, file_path) in calls.items():
if gb_type in latest_entry:
existing_entry = latest_entry[gb_type]
if not (
call["context"] == existing_entry["Context"]
and call["explanation"] == existing_entry["Explanation"]
and sorted(call["hints"]) == sorted(existing_entry["Hints"])
):
registry_key = gb_type_to_key[gb_type]
new_entry = _create_registry_entry(
gb_type, call["context"], call["explanation"], call["hints"]
)
updated_registry[registry_key] = [new_entry] + updated_registry[
registry_key
]
else:
new_key = next_gb_id(updated_registry)
new_entry = _create_registry_entry(
gb_type, call["context"], call["explanation"], call["hints"]
)
updated_registry[new_key] = [new_entry]
return updated_registry
def check_registry_sync(dynamo_dir: Path, registry_path: Path) -> list[LintMessage]:
"""Check registry sync and return lint messages."""
lint_messages = []
all_calls = _collect_all_calls(dynamo_dir)
duplicates = []
for gb_type, call_list in all_calls.items():
if len(call_list) > 1:
first_call = call_list[0][0]
for call, file_path in call_list[1:]:
if (
call["context"] != first_call["context"]
or call["explanation"] != first_call["explanation"]
or sorted(call["hints"]) != sorted(first_call["hints"])
):
duplicates.append({"gb_type": gb_type, "calls": call_list})
break
for dup in duplicates:
gb_type = dup["gb_type"]
calls = dup["calls"]
description = f"The gb_type '{gb_type}' is used {len(calls)} times with different content. "
description += "Each gb_type must be unique across your entire codebase."
lint_messages.append(
LintMessage(
path=str(calls[0][1]),
line=None,
char=None,
code=LINTER_CODE,
severity=LintSeverity.ERROR,
name="Duplicate gb_type",
original=None,
replacement=None,
description=description,
)
)
if duplicates:
return lint_messages
calls = {gb_type: calls[0] for gb_type, calls in all_calls.items()}
registry = load_registry(registry_path)
latest_entry: dict[str, Any] = {
entries[0]["Gb_type"]: entries[0] for entries in registry.values()
}
renames: dict[str, str] = {}
remaining_calls = dict(calls)
for gb_type, (call, file_path) in calls.items():
if gb_type not in latest_entry:
for existing_gb_type, existing_entry in latest_entry.items():
if (
call["context"] == existing_entry["Context"]
and call["explanation"] == existing_entry["Explanation"]
and sorted(call["hints"]) == sorted(existing_entry["Hints"])
):
renames[existing_gb_type] = gb_type
del remaining_calls[gb_type]
break
needs_update = bool(renames)
for gb_type, (call, file_path) in remaining_calls.items():
if gb_type in latest_entry:
existing_entry = latest_entry[gb_type]
if not (
call["context"] == existing_entry["Context"]
and call["explanation"] == existing_entry["Explanation"]
and sorted(call["hints"] or []) == sorted(existing_entry["Hints"] or [])
):
needs_update = True
break
else:
needs_update = True
break
if needs_update:
updated_registry = _update_registry_with_changes(
registry, remaining_calls, renames
)
original_content = registry_path.read_text(encoding="utf-8")
replacement_content = (
json.dumps(updated_registry, indent=2, ensure_ascii=False) + "\n"
)
changes = []
if renames:
for old, new in renames.items():
changes.append(f"renamed '{old}' → '{new}'")
if remaining_calls:
new_count = sum(
1 for gb_type in remaining_calls if gb_type not in latest_entry
)
if new_count:
changes.append(f"added {new_count} new gb_types")
description = f"Registry sync needed ({', '.join(changes)}). Run `lintrunner -a` to apply changes."
lint_messages.append(
LintMessage(
path=str(registry_path),
line=None,
char=None,
code=LINTER_CODE,
severity=LintSeverity.WARNING,
name="Registry sync needed",
original=original_content,
replacement=replacement_content,
description=description,
)
)
return lint_messages
if __name__ == "__main__":
script_dir = Path(__file__).resolve()
repo_root = script_dir.parents[3]
default_registry_path = (
repo_root / "torch" / "_dynamo" / "graph_break_registry.json"
)
default_dynamo_dir = repo_root / "torch" / "_dynamo"
parser = argparse.ArgumentParser(
description="Auto-sync graph break registry with source code"
)
parser.add_argument(
"--dynamo-dir",
type=Path,
default=default_dynamo_dir,
help=f"Path to the dynamo directory (default: {default_dynamo_dir})",
)
parser.add_argument(
"--registry-path",
type=Path,
default=default_registry_path,
help=f"Path to the registry file (default: {default_registry_path})",
)
args = parser.parse_args()
lint_messages = check_registry_sync(
dynamo_dir=args.dynamo_dir, registry_path=args.registry_path
)
for lint_message in lint_messages:
print(json.dumps(lint_message._asdict()), flush=True)
| LintMessage |
python | cython__cython | Cython/Compiler/Nodes.py | {
"start": 336076,
"end": 346686
} | class ____(LoopNode, StatNode):
# for name from expr rel name rel expr
#
# target NameNode
# bound1 ExprNode
# relation1 string
# relation2 string
# bound2 ExprNode
# step ExprNode or None
# body StatNode
# else_clause StatNode or None
#
# Used internally:
#
# from_range bool
# is_py_target bool
# loopvar_node ExprNode (usually a NameNode or temp node)
# py_loopvar_node PyTempNode or None
child_attrs = ["target", "bound1", "bound2", "step", "body", "else_clause"]
is_py_target = False
loopvar_node = None
py_loopvar_node = None
from_range = False
gil_message = "For-loop using object bounds or target"
def nogil_check(self, env):
for x in (self.target, self.bound1, self.bound2):
if x.type.is_pyobject:
self.gil_error()
def analyse_declarations(self, env):
self.target.analyse_target_declaration(env)
self.body.analyse_declarations(env)
if self.else_clause:
self.else_clause.analyse_declarations(env)
def analyse_expressions(self, env):
from . import ExprNodes
self.target = self.target.analyse_target_types(env)
self.bound1 = self.bound1.analyse_types(env)
self.bound2 = self.bound2.analyse_types(env)
if self.step is not None:
if isinstance(self.step, ExprNodes.UnaryMinusNode):
warning(self.step.pos, "Probable infinite loop in for-from-by statement. "
"Consider switching the directions of the relations.", 2)
self.step = self.step.analyse_types(env)
self.set_up_loop(env)
target_type = self.target.type
if not (target_type.is_pyobject or target_type.is_numeric):
error(self.target.pos, "for-from loop variable must be c numeric type or Python object")
self.body = self.body.analyse_expressions(env)
if self.else_clause:
self.else_clause = self.else_clause.analyse_expressions(env)
return self
def set_up_loop(self, env):
from . import ExprNodes
target_type = self.target.type
if target_type.is_numeric:
loop_type = target_type
else:
if target_type.is_enum:
warning(self.target.pos,
"Integer loops over enum values are fragile. Please cast to a safe integer type instead.")
loop_type = PyrexTypes.c_long_type if target_type.is_pyobject else PyrexTypes.c_int_type
if not self.bound1.type.is_pyobject:
loop_type = PyrexTypes.widest_numeric_type(loop_type, self.bound1.type)
if not self.bound2.type.is_pyobject:
loop_type = PyrexTypes.widest_numeric_type(loop_type, self.bound2.type)
if self.step is not None and not self.step.type.is_pyobject:
loop_type = PyrexTypes.widest_numeric_type(loop_type, self.step.type)
self.bound1 = self.bound1.coerce_to(loop_type, env)
self.bound2 = self.bound2.coerce_to(loop_type, env)
if not self.bound2.is_literal:
self.bound2 = self.bound2.coerce_to_temp(env)
if self.step is not None:
self.step = self.step.coerce_to(loop_type, env)
if not self.step.is_literal:
self.step = self.step.coerce_to_temp(env)
if target_type.is_numeric or target_type.is_enum:
self.is_py_target = False
if isinstance(self.target, ExprNodes.BufferIndexNode):
raise error(self.pos, "Buffer or memoryview slicing/indexing not allowed as for-loop target.")
self.loopvar_node = self.target
self.py_loopvar_node = None
else:
self.is_py_target = True
c_loopvar_node = ExprNodes.TempNode(self.pos, loop_type, env)
self.loopvar_node = c_loopvar_node
self.py_loopvar_node = ExprNodes.CloneNode(c_loopvar_node).coerce_to_pyobject(env)
def generate_execution_code(self, code):
code.mark_pos(self.pos)
old_loop_labels = code.new_loop_labels()
from_range = self.from_range
self.bound1.generate_evaluation_code(code)
self.bound2.generate_evaluation_code(code)
offset, incop = self.relation_table[self.relation1]
if self.step is not None:
self.step.generate_evaluation_code(code)
step = self.step.result()
incop = "%s=%s" % (incop[0], step) # e.g. '++' => '+= STEP'
else:
step = '1'
from . import ExprNodes
if isinstance(self.loopvar_node, ExprNodes.TempNode):
self.loopvar_node.allocate(code)
if isinstance(self.py_loopvar_node, ExprNodes.TempNode):
self.py_loopvar_node.allocate(code)
loopvar_type = PyrexTypes.c_long_type if self.target.type.is_enum else self.target.type
if from_range and not self.is_py_target:
loopvar_name = code.funcstate.allocate_temp(loopvar_type, False)
else:
loopvar_name = self.loopvar_node.result()
if loopvar_type.is_int and not loopvar_type.signed and self.relation2[0] == '>':
# Handle the case where the endpoint of an unsigned int iteration
# is within step of 0.
code.putln("for (%s = %s%s + %s; %s %s %s + %s; ) { %s%s;" % (
loopvar_name,
self.bound1.result(), offset, step,
loopvar_name, self.relation2, self.bound2.result(), step,
loopvar_name, incop))
else:
code.putln("for (%s = %s%s; %s %s %s; %s%s) {" % (
loopvar_name,
self.bound1.result(), offset,
loopvar_name, self.relation2, self.bound2.result(),
loopvar_name, incop))
coerced_loopvar_node = self.py_loopvar_node
if coerced_loopvar_node is None and from_range:
coerced_loopvar_node = ExprNodes.RawCNameExprNode(self.target.pos, loopvar_type, loopvar_name)
if coerced_loopvar_node is not None:
coerced_loopvar_node.generate_evaluation_code(code)
self.target.generate_assignment_code(coerced_loopvar_node, code)
code.write_trace_line(self.pos)
self.body.generate_execution_code(code)
code.put_label(code.continue_label)
if not from_range and self.py_loopvar_node:
# This mess is to make for..from loops with python targets behave
# exactly like those with C targets with regards to re-assignment
# of the loop variable.
if self.target.entry.is_pyglobal:
# We know target is a NameNode, this is the only ugly case.
target_node = ExprNodes.PyTempNode(self.target.pos, None)
target_node.allocate(code)
interned_cname = code.intern_identifier(self.target.entry.name)
if self.target.entry.scope.is_module_scope:
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetModuleGlobalName", "ObjectHandling.c"))
lookup_func = '__Pyx_GetModuleGlobalName(%s, %s); %s'
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetNameInClass", "ObjectHandling.c"))
lookup_func = '__Pyx_GetNameInClass(%s, {}, %s); %s'.format(
self.target.entry.scope.namespace_cname)
code.putln(lookup_func % (
target_node.result(),
interned_cname,
code.error_goto_if_null(target_node.result(), self.target.pos)))
target_node.generate_gotref(code)
else:
target_node = self.target
from_py_node = ExprNodes.CoerceFromPyTypeNode(
self.loopvar_node.type, target_node, self.target.entry.scope)
from_py_node.temp_code = loopvar_name
from_py_node.generate_result_code(code)
if self.target.entry.is_pyglobal:
code.put_decref(target_node.result(), target_node.type)
target_node.release(code)
code.putln("}")
if not from_range and self.py_loopvar_node:
# This is potentially wasteful, but we don't want the semantics to
# depend on whether or not the loop is a python type.
self.py_loopvar_node.generate_evaluation_code(code)
self.target.generate_assignment_code(self.py_loopvar_node, code)
if from_range and not self.is_py_target:
code.funcstate.release_temp(loopvar_name)
break_label = code.break_label
code.set_loop_labels(old_loop_labels)
if self.else_clause:
code.putln("/*else*/ {")
self.else_clause.generate_execution_code(code)
code.putln("}")
code.put_label(break_label)
self.bound1.generate_disposal_code(code)
self.bound1.free_temps(code)
self.bound2.generate_disposal_code(code)
self.bound2.free_temps(code)
if isinstance(self.loopvar_node, ExprNodes.TempNode):
self.loopvar_node.release(code)
if isinstance(self.py_loopvar_node, ExprNodes.TempNode):
self.py_loopvar_node.release(code)
if self.step is not None:
self.step.generate_disposal_code(code)
self.step.free_temps(code)
relation_table = {
# {relop : (initial offset, increment op)}
'<=': ("", "++"),
'<' : ("+1", "++"),
'>=': ("", "--"),
'>' : ("-1", "--"),
}
def generate_function_definitions(self, env, code):
self.target.generate_function_definitions(env, code)
self.bound1.generate_function_definitions(env, code)
self.bound2.generate_function_definitions(env, code)
if self.step is not None:
self.step.generate_function_definitions(env, code)
self.body.generate_function_definitions(env, code)
if self.else_clause is not None:
self.else_clause.generate_function_definitions(env, code)
def annotate(self, code):
self.target.annotate(code)
self.bound1.annotate(code)
self.bound2.annotate(code)
if self.step:
self.step.annotate(code)
self.body.annotate(code)
if self.else_clause:
self.else_clause.annotate(code)
| ForFromStatNode |
python | rapidsai__cudf | python/cudf/cudf/core/resample.py | {
"start": 3409,
"end": 3470
} | class ____(_Resampler, SeriesGroupBy):
pass
| SeriesResampler |
python | pytorch__pytorch | torch/_inductor/codegen/wrapper_fxir.py | {
"start": 3910,
"end": 8519
} | class ____(PythonWrapperCodegen):
"""
Backend to generate wrapper code as an FX IR graph.
"""
supports_caching = False
def __init__(self, *args: Any, **kwargs: Any):
super().__init__(*args, **kwargs)
self.subgms: dict[str, torch.fx.GraphModule] = {}
def codegen_inputs(self) -> None:
"""
This would generate code for symbolic input shapes, strides, etc.
Since the FX converter handles this, do nothing here.
"""
def codegen_conditional(self, conditional: ir.Conditional) -> None:
"""
Conditional codegen normally emits a number of different wrapper lines.
Instead, FX conversion uses a dedicated line for the whole conditional.
"""
self.writeline(ConditionalLine(self, conditional))
for subgraph in (conditional.true_subgraph, conditional.false_subgraph):
self.codegen_subgraph_common(subgraph)
def define_subgraph_launcher_fn(
self, name: str, subgraph_code: Union[ValueWithLineMap, FileBackedGraphModule]
) -> None:
"""
Record subgms as they're generated.
"""
assert isinstance(subgraph_code, FileBackedGraphModule)
self.subgms[name] = subgraph_code.gm
@property
@cache_property_on_self
def is_subgraph(self) -> bool:
return isinstance(self, SubgraphPythonWrapperCodegen)
def get_fx_graph_inputs(
self,
) -> dict[str, Union[ir.TensorBox, ir.TorchBindObject, sympy.Expr, None]]:
"""
Get the input nodes corresponding to FX graph placeholders.
"""
# pyrefly: ignore [missing-argument]
if V.aot_compilation and not self.is_subgraph:
# AOT graphs must match the signature of the input module.
return {
node.name: V.graph.graph_inputs.get(node.name)
for node in V.graph.module.graph.find_nodes(op="placeholder") # type: ignore[operator, union-attr]
}
return self.get_graph_inputs()
def _generate(self, is_inference: bool) -> tuple[FileBackedGraphModule, None]:
self.run_wrapper_ir_passes(is_inference)
prologue = "\n".join(
[
self.imports.getvalue(),
self.header.getvalue(),
]
)
gm = FxConverter(
lines=self.lines,
prologue=prologue,
graph_inputs=self.get_fx_graph_inputs(),
graph_outputs=self.get_graph_outputs(),
subgms=self.subgms,
# pyrefly: ignore [missing-argument]
is_subgraph=self.is_subgraph,
).generate()
compiled_fn = self.compile_graph(gm)
return FileBackedGraphModule(gm, compiled_fn), None
def compile_graph(self, gm: GraphModule) -> Callable[..., Any]:
"""
Converts the graph module into a runnable function. The default implementation
is simply an interpreter calling kernels in eager mode. Derived backends can
override this to do further compilation.
"""
return gm.forward
def write_header(self) -> None:
"""
Python subgraphs normally lack headers.
Override this behavior to generate prologues for FX subgraphs.
"""
PythonWrapperCodegen.write_header(self)
@classmethod
def create(
cls: type["WrapperFxCodegen"],
is_subgraph: bool,
subgraph_name: Optional[str],
parent_wrapper: Optional[PythonWrapperCodegen],
partition_signatures: Optional[ir.GraphPartitionSignature] = None,
) -> "WrapperFxCodegen":
if is_subgraph:
assert subgraph_name is not None
assert parent_wrapper is not None
# Subgraphs override some methods of PythonWrapperCodegen.
# Apply these overrides to the user-provided class, with priority given to
# user-provided methods.
class SubgraphFxWrapperCodegen(cls, SubgraphPythonWrapperCodegen): # type: ignore[misc,valid-type]
def compile_graph(self, gm: GraphModule) -> Callable[..., Any]:
"""
Skip graph compilation for subgraphs.
"""
def crash_if_run(*args: Any) -> None:
raise NotImplementedError("Cannot run a subgraph in isolation!")
return crash_if_run
return SubgraphFxWrapperCodegen(
subgraph_name, parent_wrapper, partition_signatures
)
return cls()
@dataclasses.dataclass
| WrapperFxCodegen |
python | anthropics__anthropic-sdk-python | src/anthropic/lib/streaming/_beta_types.py | {
"start": 1772,
"end": 1956
} | class ____(BetaRawMessageStopEvent, GenericModel, Generic[ResponseFormatT]):
type: Literal["message_stop"]
message: ParsedBetaMessage[ResponseFormatT]
| ParsedBetaMessageStopEvent |
python | getsentry__sentry | src/sentry/sentry_apps/token_exchange/validator.py | {
"start": 510,
"end": 2684
} | class ____:
"""
Validates general authorization params for all types of token exchanges.
"""
install: SentryAppInstallation
client_id: str
user: User
def run(self) -> bool:
self._validate_is_sentry_app_making_request()
self._validate_app_is_owned_by_user()
self._validate_installation()
return True
def _validate_is_sentry_app_making_request(self) -> None:
if not self.user.is_sentry_app:
raise SentryAppIntegratorError(
"User is not a Sentry App(custom integration)",
webhook_context={
"user": self.user.name,
},
)
def _validate_app_is_owned_by_user(self) -> None:
if self.sentry_app.proxy_user != self.user:
raise SentryAppIntegratorError(
"Integration does not belong to given user",
webhook_context={
"user": self.user.name,
"integration": self.sentry_app.slug,
"installation_uuid": self.install.uuid,
},
)
def _validate_installation(self) -> None:
if self.install.sentry_app.id != self.sentry_app.id:
raise SentryAppIntegratorError(
f"Given installation is not for integration: {self.sentry_app.slug}",
webhook_context={"installation_uuid": self.install.uuid},
)
@cached_property
def sentry_app(self) -> SentryApp:
try:
return self.application.sentry_app
except SentryApp.DoesNotExist:
raise SentryAppSentryError(
"Integration does not exist",
webhook_context={"application_id": self.application.id},
)
@cached_property
def application(self) -> ApiApplication:
try:
return ApiApplication.objects.get(client_id=self.client_id)
except ApiApplication.DoesNotExist:
raise SentryAppSentryError(
"Application does not exist",
webhook_context={"client_id": self.client_id[:SENSITIVE_CHARACTER_LIMIT]},
)
| Validator |
python | rq__rq | tests/test_cli.py | {
"start": 722,
"end": 1418
} | class ____(RQTestCase):
def setUp(self):
super().setUp()
db_num = self.connection.connection_pool.connection_kwargs['db']
self.redis_url = 'redis://127.0.0.1:6379/%d' % db_num
self.connection: Redis = Redis.from_url(self.redis_url)
def tearDown(self):
self.connection.close()
def assert_normal_execution(self, result):
if result.exit_code == 0:
return True
else:
print('Non normal execution')
print(f'Exit Code: {result.exit_code}')
print(f'Output: {result.output}')
print(f'Exception: {result.exception}')
self.assertEqual(result.exit_code, 0)
| CLITestCase |
python | tensorflow__tensorflow | tensorflow/compiler/tests/matrix_inverse_op_test.py | {
"start": 1044,
"end": 3050
} | class ____(xla_test.XLATestCase):
def _verifyInverse(self, x, np_type):
for adjoint in False, True:
y = x.astype(np_type)
with self.session() as sess:
# Verify that x^{-1} * x == Identity matrix.
p = array_ops.placeholder(dtypes.as_dtype(y.dtype), y.shape, name="x")
with self.test_scope():
inv = linalg_ops.matrix_inverse(p, adjoint=adjoint)
tf_ans = math_ops.matmul(inv, p, adjoint_b=adjoint)
np_ans = np.identity(y.shape[-1])
if x.ndim > 2:
tiling = list(y.shape)
tiling[-2:] = [1, 1]
np_ans = np.tile(np_ans, tiling)
out = sess.run(tf_ans, feed_dict={p: y})
self.assertAllClose(np_ans, out, rtol=1e-3, atol=1e-3)
self.assertShapeEqual(y, tf_ans)
def _verifyInverseReal(self, x):
for np_type in self.float_types & {np.float64, np.float32}:
self._verifyInverse(x, np_type)
def _makeBatch(self, matrix1, matrix2):
matrix_batch = np.concatenate(
[np.expand_dims(matrix1, 0),
np.expand_dims(matrix2, 0)])
matrix_batch = np.tile(matrix_batch, [2, 3, 1, 1])
return matrix_batch
def testNonsymmetric(self):
# 2x2 matrices
matrix1 = np.array([[1., 2.], [3., 4.]])
matrix2 = np.array([[1., 3.], [3., 5.]])
self._verifyInverseReal(matrix1)
self._verifyInverseReal(matrix2)
# A multidimensional batch of 2x2 matrices
self._verifyInverseReal(self._makeBatch(matrix1, matrix2))
def testSymmetricPositiveDefinite(self):
# 2x2 matrices
matrix1 = np.array([[2., 1.], [1., 2.]])
matrix2 = np.array([[3., -1.], [-1., 3.]])
self._verifyInverseReal(matrix1)
self._verifyInverseReal(matrix2)
# A multidimensional batch of 2x2 matrices
self._verifyInverseReal(self._makeBatch(matrix1, matrix2))
def testEmpty(self):
self._verifyInverseReal(np.empty([0, 2, 2]))
self._verifyInverseReal(np.empty([2, 0, 0]))
if __name__ == "__main__":
googletest.main()
| InverseOpTest |
python | psf__requests | src/requests/models.py | {
"start": 6788,
"end": 9458
} | class ____(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach to the request. If a dictionary or
list of tuples ``[(key, value)]`` is provided, form-encoding will
take place.
:param json: json for the body to attach to the request (if files or data is not specified).
:param params: URL parameters to append to the URL. If a dictionary or
list of tuples ``[(key, value)]`` is provided, form-encoding will
take place.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'https://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(
self,
method=None,
url=None,
headers=None,
files=None,
data=None,
params=None,
auth=None,
cookies=None,
hooks=None,
json=None,
):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for k, v in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.json = json
self.params = params
self.auth = auth
self.cookies = cookies
def __repr__(self):
return f"<Request [{self.method}]>"
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare(
method=self.method,
url=self.url,
headers=self.headers,
files=self.files,
data=self.data,
json=self.json,
params=self.params,
auth=self.auth,
cookies=self.cookies,
hooks=self.hooks,
)
return p
| Request |
python | gevent__gevent | src/gevent/tests/test__joinall.py | {
"start": 57,
"end": 296
} | class ____(greentest.TestCase):
def test(self):
def func():
pass
a = gevent.spawn(func)
b = gevent.spawn(func)
gevent.joinall([a, b, a])
if __name__ == '__main__':
greentest.main()
| Test |
python | ray-project__ray | python/ray/tests/test_client_reconnect.py | {
"start": 2586,
"end": 3630
} | class ____(ray_client_pb2_grpc.RayletLogStreamerServicer):
"""
Forwards all requests to the real log servicer. Useful for injecting
errors between a client and server pair.
"""
def __init__(self, on_response: Optional[Hook] = None):
"""
Args:
on_response: Optional hook to inject errors before sending back a
response
"""
self.stub = None
self.on_response = on_response
def set_channel(self, channel: grpc.Channel) -> None:
self.stub = ray_client_pb2_grpc.RayletLogStreamerStub(channel)
def Logstream(self, request_iterator, context):
try:
for response in self.stub.Logstream(
request_iterator, metadata=context.invocation_metadata()
):
if self.on_response:
self.on_response(response)
yield response
except grpc.RpcError as e:
context.set_code(e.code())
context.set_details(e.details())
| MiddlemanLogServicer |
python | readthedocs__readthedocs.org | readthedocs/rtd_tests/tests/test_resolver.py | {
"start": 26627,
"end": 26746
} | class ____(ResolverAltSetUp, ResolverTests):
pass
@override_settings(PUBLIC_DOMAIN="readthedocs.io")
| ResolverTestsAlt |
python | python-openxml__python-docx | src/docx/opc/oxml.py | {
"start": 3652,
"end": 4505
} | class ____(BaseOxmlElement):
"""``<Override>`` element, specifying the content type to be applied for a part with
the specified partname."""
@property
def content_type(self):
"""String held in the ``ContentType`` attribute of this ``<Override>``
element."""
return self.get("ContentType")
@staticmethod
def new(partname, content_type):
"""Return a new ``<Override>`` element with attributes set to parameter values."""
xml = '<Override xmlns="%s"/>' % nsmap["ct"]
override = parse_xml(xml)
override.set("PartName", partname)
override.set("ContentType", content_type)
return override
@property
def partname(self):
"""String held in the ``PartName`` attribute of this ``<Override>`` element."""
return self.get("PartName")
| CT_Override |
python | python-excel__xlwt | xlwt/BIFFRecords.py | {
"start": 10560,
"end": 10928
} | class ____(BiffRecord):
"""
This record is part of the worksheet/workbook protection. It
determines whether the scenarios of the current sheet are protected.
Scenario protection is not active, if this record is omitted.
"""
_REC_ID = 0x00DD
def __init__(self, scenprotect):
self._rec_data = pack('<H', scenprotect)
| ScenProtectRecord |
python | GoogleCloudPlatform__python-docs-samples | media-translation/snippets/translate_from_mic.py | {
"start": 994,
"end": 5408
} | class ____:
"""Opens a recording stream as a generator yielding the audio chunks."""
def __init__(self, rate, chunk):
self._rate = rate
self._chunk = chunk
# Create a thread-safe buffer of audio data
self._buff = queue.Queue()
self.closed = True
def __enter__(self):
self._audio_interface = pyaudio.PyAudio()
self._audio_stream = self._audio_interface.open(
format=pyaudio.paInt16,
channels=1,
rate=self._rate,
input=True,
frames_per_buffer=self._chunk,
# Run the audio stream asynchronously to fill the buffer object.
# This is necessary so that the input device's buffer doesn't
# overflow while the calling thread makes network requests, etc.
stream_callback=self._fill_buffer,
)
self.closed = False
return self
def __exit__(self, type=None, value=None, traceback=None):
self._audio_stream.stop_stream()
self._audio_stream.close()
self.closed = True
# Signal the generator to terminate so that the client's
# streaming_recognize method will not block the process termination.
self._buff.put(None)
self._audio_interface.terminate()
def _fill_buffer(self, in_data, frame_count, time_info, status_flags):
"""Continuously collect data from the audio stream, into the buffer."""
self._buff.put(in_data)
return None, pyaudio.paContinue
def exit(self):
self.__exit__()
def generator(self):
while not self.closed:
# Use a blocking get() to ensure there's at least one chunk of
# data, and stop iteration if the chunk is None, indicating the
# end of the audio stream.
chunk = self._buff.get()
if chunk is None:
return
data = [chunk]
# Now consume whatever other data's still buffered.
while True:
try:
chunk = self._buff.get(block=False)
if chunk is None:
return
data.append(chunk)
except queue.Empty:
break
yield b"".join(data)
def listen_print_loop(responses):
"""Iterates through server responses and prints them.
The responses passed is a generator that will block until a response
is provided by the server.
"""
translation = ""
for response in responses:
# Once the transcription settles, the response contains the
# END_OF_SINGLE_UTTERANCE event.
if response.speech_event_type == SpeechEventType.END_OF_SINGLE_UTTERANCE:
print(f"\nFinal translation: {translation}")
return 0
result = response.result
translation = result.text_translation_result.translation
print(f"\nPartial translation: {translation}")
def do_translation_loop():
print("Begin speaking...")
client = media.SpeechTranslationServiceClient()
speech_config = media.TranslateSpeechConfig(
audio_encoding="linear16",
source_language_code="en-US",
target_language_code="es-ES",
)
config = media.StreamingTranslateSpeechConfig(
audio_config=speech_config, single_utterance=True
)
# The first request contains the configuration.
# Note that audio_content is explicitly set to None.
first_request = media.StreamingTranslateSpeechRequest(streaming_config=config)
with MicrophoneStream(RATE, CHUNK) as stream:
audio_generator = stream.generator()
mic_requests = (
media.StreamingTranslateSpeechRequest(audio_content=content)
for content in audio_generator
)
requests = itertools.chain(iter([first_request]), mic_requests)
responses = client.streaming_translate_speech(requests)
# Print the translation responses as they arrive
result = listen_print_loop(responses)
if result == 0:
stream.exit()
def main():
while True:
print()
option = input("Press any key to translate or 'q' to quit: ")
if option.lower() == "q":
break
do_translation_loop()
if __name__ == "__main__":
main()
# [END mediatranslation_translate_from_mic]
| MicrophoneStream |
python | dateutil__dateutil | tests/test_parser.py | {
"start": 17234,
"end": 29977
} | class ____(unittest.TestCase):
@classmethod
def setup_class(cls):
cls.tzinfos = {"BRST": -10800}
cls.brsttz = tzoffset("BRST", -10800)
cls.default = datetime(2003, 9, 25)
# Parser should be able to handle bytestring and unicode
cls.uni_str = '2014-05-01 08:00:00'
cls.str_str = cls.uni_str.encode()
def testParserParseStr(self):
from dateutil.parser import parser
assert parser().parse(self.str_str) == parser().parse(self.uni_str)
def testParseUnicodeWords(self):
class rus_parserinfo(parserinfo):
MONTHS = [("янв", "Январь"),
("фев", "Февраль"),
("мар", "Март"),
("апр", "Апрель"),
("май", "Май"),
("июн", "Июнь"),
("июл", "Июль"),
("авг", "Август"),
("сен", "Сентябрь"),
("окт", "Октябрь"),
("ноя", "Ноябрь"),
("дек", "Декабрь")]
expected = datetime(2015, 9, 10, 10, 20)
res = parse('10 Сентябрь 2015 10:20', parserinfo=rus_parserinfo())
assert res == expected
def testParseWithNulls(self):
# This relies on the from __future__ import unicode_literals, because
# explicitly specifying a unicode literal is a syntax error in Py 3.2
# May want to switch to u'...' if we ever drop Python 3.2 support.
pstring = '\x00\x00August 29, 1924'
assert parse(pstring) == datetime(1924, 8, 29)
def testDateCommandFormat(self):
self.assertEqual(parse("Thu Sep 25 10:36:28 BRST 2003",
tzinfos=self.tzinfos),
datetime(2003, 9, 25, 10, 36, 28,
tzinfo=self.brsttz))
def testDateCommandFormatReversed(self):
self.assertEqual(parse("2003 10:36:28 BRST 25 Sep Thu",
tzinfos=self.tzinfos),
datetime(2003, 9, 25, 10, 36, 28,
tzinfo=self.brsttz))
def testDateCommandFormatWithLong(self):
if PY2:
self.assertEqual(parse("Thu Sep 25 10:36:28 BRST 2003",
tzinfos={"BRST": long(-10800)}),
datetime(2003, 9, 25, 10, 36, 28,
tzinfo=self.brsttz))
def testISOFormatStrip2(self):
self.assertEqual(parse("2003-09-25T10:49:41+03:00"),
datetime(2003, 9, 25, 10, 49, 41,
tzinfo=tzoffset(None, 10800)))
def testISOStrippedFormatStrip2(self):
self.assertEqual(parse("20030925T104941+0300"),
datetime(2003, 9, 25, 10, 49, 41,
tzinfo=tzoffset(None, 10800)))
def testAMPMNoHour(self):
with pytest.raises(ParserError):
parse("AM")
with pytest.raises(ParserError):
parse("Jan 20, 2015 PM")
def testAMPMRange(self):
with pytest.raises(ParserError):
parse("13:44 AM")
with pytest.raises(ParserError):
parse("January 25, 1921 23:13 PM")
def testPertain(self):
self.assertEqual(parse("Sep 03", default=self.default),
datetime(2003, 9, 3))
self.assertEqual(parse("Sep of 03", default=self.default),
datetime(2003, 9, 25))
def testFuzzy(self):
s = "Today is 25 of September of 2003, exactly " \
"at 10:49:41 with timezone -03:00."
self.assertEqual(parse(s, fuzzy=True),
datetime(2003, 9, 25, 10, 49, 41,
tzinfo=self.brsttz))
def testFuzzyWithTokens(self):
s1 = "Today is 25 of September of 2003, exactly " \
"at 10:49:41 with timezone -03:00."
self.assertEqual(parse(s1, fuzzy_with_tokens=True),
(datetime(2003, 9, 25, 10, 49, 41,
tzinfo=self.brsttz),
('Today is ', 'of ', ', exactly at ',
' with timezone ', '.')))
s2 = "http://biz.yahoo.com/ipo/p/600221.html"
self.assertEqual(parse(s2, fuzzy_with_tokens=True),
(datetime(2060, 2, 21, 0, 0, 0),
('http://biz.yahoo.com/ipo/p/', '.html')))
def testFuzzyAMPMProblem(self):
# Sometimes fuzzy parsing results in AM/PM flag being set without
# hours - if it's fuzzy it should ignore that.
s1 = "I have a meeting on March 1, 1974."
s2 = "On June 8th, 2020, I am going to be the first man on Mars"
# Also don't want any erroneous AM or PMs changing the parsed time
s3 = "Meet me at the AM/PM on Sunset at 3:00 AM on December 3rd, 2003"
s4 = "Meet me at 3:00AM on December 3rd, 2003 at the AM/PM on Sunset"
self.assertEqual(parse(s1, fuzzy=True), datetime(1974, 3, 1))
self.assertEqual(parse(s2, fuzzy=True), datetime(2020, 6, 8))
self.assertEqual(parse(s3, fuzzy=True), datetime(2003, 12, 3, 3))
self.assertEqual(parse(s4, fuzzy=True), datetime(2003, 12, 3, 3))
def testFuzzyIgnoreAMPM(self):
s1 = "Jan 29, 1945 14:45 AM I going to see you there?"
with pytest.warns(UnknownTimezoneWarning):
res = parse(s1, fuzzy=True)
self.assertEqual(res, datetime(1945, 1, 29, 14, 45))
def testRandomFormat24(self):
self.assertEqual(parse("0:00 PM, PST", default=self.default,
ignoretz=True),
datetime(2003, 9, 25, 12, 0))
def testRandomFormat26(self):
with pytest.warns(UnknownTimezoneWarning):
res = parse("5:50 A.M. on June 13, 1990")
self.assertEqual(res, datetime(1990, 6, 13, 5, 50))
def testUnspecifiedDayFallback(self):
# Test that for an unspecified day, the fallback behavior is correct.
self.assertEqual(parse("April 2009", default=datetime(2010, 1, 31)),
datetime(2009, 4, 30))
def testUnspecifiedDayFallbackFebNoLeapYear(self):
self.assertEqual(parse("Feb 2007", default=datetime(2010, 1, 31)),
datetime(2007, 2, 28))
def testUnspecifiedDayFallbackFebLeapYear(self):
self.assertEqual(parse("Feb 2008", default=datetime(2010, 1, 31)),
datetime(2008, 2, 29))
def testErrorType01(self):
with pytest.raises(ParserError):
parse('shouldfail')
def testCorrectErrorOnFuzzyWithTokens(self):
assertRaisesRegex(self, ParserError, 'Unknown string format',
parse, '04/04/32/423', fuzzy_with_tokens=True)
assertRaisesRegex(self, ParserError, 'Unknown string format',
parse, '04/04/04 +32423', fuzzy_with_tokens=True)
assertRaisesRegex(self, ParserError, 'Unknown string format',
parse, '04/04/0d4', fuzzy_with_tokens=True)
def testIncreasingCTime(self):
# This test will check 200 different years, every month, every day,
# every hour, every minute, every second, and every weekday, using
# a delta of more or less 1 year, 1 month, 1 day, 1 minute and
# 1 second.
delta = timedelta(days=365+31+1, seconds=1+60+60*60)
dt = datetime(1900, 1, 1, 0, 0, 0, 0)
for i in range(200):
assert parse(dt.ctime()) == dt
dt += delta
def testIncreasingISOFormat(self):
delta = timedelta(days=365+31+1, seconds=1+60+60*60)
dt = datetime(1900, 1, 1, 0, 0, 0, 0)
for i in range(200):
assert parse(dt.isoformat()) == dt
dt += delta
def testMicrosecondsPrecisionError(self):
# Skip found out that sad precision problem. :-(
dt1 = parse("00:11:25.01")
dt2 = parse("00:12:10.01")
assert dt1.microsecond == 10000
assert dt2.microsecond == 10000
def testMicrosecondPrecisionErrorReturns(self):
# One more precision issue, discovered by Eric Brown. This should
# be the last one, as we're no longer using floating points.
for ms in [100001, 100000, 99999, 99998,
10001, 10000, 9999, 9998,
1001, 1000, 999, 998,
101, 100, 99, 98]:
dt = datetime(2008, 2, 27, 21, 26, 1, ms)
assert parse(dt.isoformat()) == dt
def testCustomParserInfo(self):
# Custom parser info wasn't working, as Michael Elsdörfer discovered.
from dateutil.parser import parserinfo, parser
class myparserinfo(parserinfo):
MONTHS = parserinfo.MONTHS[:]
MONTHS[0] = ("Foo", "Foo")
myparser = parser(myparserinfo())
dt = myparser.parse("01/Foo/2007")
assert dt == datetime(2007, 1, 1)
def testCustomParserShortDaynames(self):
# Horacio Hoyos discovered that day names shorter than 3 characters,
# for example two letter German day name abbreviations, don't work:
# https://github.com/dateutil/dateutil/issues/343
from dateutil.parser import parserinfo, parser
class GermanParserInfo(parserinfo):
WEEKDAYS = [("Mo", "Montag"),
("Di", "Dienstag"),
("Mi", "Mittwoch"),
("Do", "Donnerstag"),
("Fr", "Freitag"),
("Sa", "Samstag"),
("So", "Sonntag")]
myparser = parser(GermanParserInfo())
dt = myparser.parse("Sa 21. Jan 2017")
self.assertEqual(dt, datetime(2017, 1, 21))
def testNoYearFirstNoDayFirst(self):
dtstr = '090107'
# Should be MMDDYY
self.assertEqual(parse(dtstr),
datetime(2007, 9, 1))
self.assertEqual(parse(dtstr, yearfirst=False, dayfirst=False),
datetime(2007, 9, 1))
def testYearFirst(self):
dtstr = '090107'
# Should be MMDDYY
self.assertEqual(parse(dtstr, yearfirst=True),
datetime(2009, 1, 7))
self.assertEqual(parse(dtstr, yearfirst=True, dayfirst=False),
datetime(2009, 1, 7))
def testDayFirst(self):
dtstr = '090107'
# Should be DDMMYY
self.assertEqual(parse(dtstr, dayfirst=True),
datetime(2007, 1, 9))
self.assertEqual(parse(dtstr, yearfirst=False, dayfirst=True),
datetime(2007, 1, 9))
def testDayFirstYearFirst(self):
dtstr = '090107'
# Should be YYDDMM
self.assertEqual(parse(dtstr, yearfirst=True, dayfirst=True),
datetime(2009, 7, 1))
def testUnambiguousYearFirst(self):
dtstr = '2015 09 25'
self.assertEqual(parse(dtstr, yearfirst=True),
datetime(2015, 9, 25))
def testUnambiguousDayFirst(self):
dtstr = '2015 09 25'
self.assertEqual(parse(dtstr, dayfirst=True),
datetime(2015, 9, 25))
def testUnambiguousDayFirstYearFirst(self):
dtstr = '2015 09 25'
self.assertEqual(parse(dtstr, dayfirst=True, yearfirst=True),
datetime(2015, 9, 25))
def test_mstridx(self):
# See GH408
dtstr = '2015-15-May'
self.assertEqual(parse(dtstr),
datetime(2015, 5, 15))
def test_idx_check(self):
dtstr = '2017-07-17 06:15:'
# Pre-PR, the trailing colon will cause an IndexError at 824-825
# when checking `i < len_l` and then accessing `l[i+1]`
res = parse(dtstr, fuzzy=True)
assert res == datetime(2017, 7, 17, 6, 15)
def test_hmBY(self):
# See GH#483
dtstr = '02:17NOV2017'
res = parse(dtstr, default=self.default)
assert res == datetime(2017, 11, self.default.day, 2, 17)
def test_validate_hour(self):
# See GH353
invalid = "201A-01-01T23:58:39.239769+03:00"
with pytest.raises(ParserError):
parse(invalid)
def test_era_trailing_year(self):
dstr = 'AD2001'
res = parse(dstr)
assert res.year == 2001, res
def test_includes_timestr(self):
timestr = "2020-13-97T44:61:83"
try:
parse(timestr)
except ParserError as e:
assert e.args[1] == timestr
else:
pytest.fail("Failed to raise ParserError")
| ParserTest |
python | PyCQA__pylint | pylint/config/_breaking_changes/__init__.py | {
"start": 405,
"end": 630
} | class ____(enum.Enum):
KEEP = "Keep the same behavior"
USE_DEFAULT = "Use the new default behavior"
# This could/should always be automated
FIX_CONF = "Fix the configuration to become consistent again"
| Intention |
python | milvus-io__pymilvus | pymilvus/client/types.py | {
"start": 5352,
"end": 6029
} | class ____(IntEnum):
"""
UndefiedState: Unknown
Executing: indicating this compaction has undone plans.
Completed: indicating all the plans of this compaction are done,
no matter successful or not.
"""
UndefiedState = 0
Executing = 1
Completed = 2
@staticmethod
def new(s: int):
if s == State.Executing:
return State.Executing
if s == State.Completed:
return State.Completed
return State.UndefiedState
def __repr__(self) -> str:
return f"<{self.__class__.__name__}: {self._name_}>"
def __str__(self) -> str:
return self._name_
| State |
python | pytorch__pytorch | torch/distributed/pipelining/schedules.py | {
"start": 19348,
"end": 24623
} | class ____(_PipelineSchedule):
"""
Base class for single-stage schedules.
Implements the `step` method.
Derived classes should implement `_step_microbatches`.
Gradients are scaled by num_microbatches depending on the `scale_grads` argument, defaulting to True. This setting
should match the configuration of your loss_fn, which may either average losses (scale_grads=True)
or sum losses (scale_grads=False).
"""
def __init__(
self,
stage: _PipelineStageBase,
n_microbatches: int,
loss_fn: Callable | None = None,
args_chunk_spec: tuple[TensorChunkSpec, ...] | None = None,
kwargs_chunk_spec: dict[str, TensorChunkSpec] | None = None,
output_merge_spec: dict[str, Any] | tuple[Any] | None = None,
scale_grads: bool = True,
):
# Init parent
super().__init__(
n_microbatches=n_microbatches,
loss_fn=loss_fn,
args_chunk_spec=args_chunk_spec,
kwargs_chunk_spec=kwargs_chunk_spec,
output_merge_spec=output_merge_spec,
scale_grads=scale_grads,
)
# Self attributes
self._stage = stage
self._num_stages = stage.num_stages
self._stage_forward_initialized = False
self._stage_backward_initialized = False
if n_microbatches < self._num_stages:
raise ValueError(
f"Number of microbatches ({n_microbatches}) must be greater than \
or equal to the number of stages ({self._num_stages})."
)
self.pipeline_order: dict[int, list[_Action | None]] | None = (
self._get_pipeline_order()
)
def _initialize_stage(self, args, kwargs):
if not self._stage_forward_initialized:
# Prepare the communication needed for the pipeline schedule execution
# This is needed because during execution we always perform a series of batch P2P ops
# The first call of the batched P2P needs to involve the global group
all_ops: list[dist.P2POp] = []
all_ops.extend(self._stage._get_init_p2p_neighbors_ops())
_wait_batch_p2p(_batch_p2p(all_ops))
self._stage._prepare_forward_infra(self._n_microbatches, args, kwargs)
self._stage_forward_initialized = True
if self._has_backward and not self._stage_backward_initialized:
self._stage._prepare_backward_infra(self._n_microbatches)
self._stage_backward_initialized = True
def step(
self,
*args,
target=None,
losses: list | None = None,
return_outputs: bool = True,
**kwargs,
):
"""
Run one iteration of the pipeline schedule with *whole-batch* input.
Will chunk the input into microbatches automatically, and go through the
microbatches according to the schedule implementation.
args: positional arguments to the model (as in non-pipeline case).
kwargs: keyword arguments to the model (as in non-pipeline case).
target: target for the loss function.
losses: a list to store the losses for each microbatch.
return_outputs: whether to return the outputs from the last stage.
"""
if self._has_backward and not torch.is_grad_enabled():
raise RuntimeError(
"step() requires gradients to be enabled for backward computation; "
"it should not be used under torch.no_grad() context. "
"Please call eval() instead."
)
# Set the same has_backward flag for stage object
self._stage.has_backward = self._has_backward
# Clean per iteration
self._stage.clear_runtime_states()
# Split inputs into microbatches
args_split, kwargs_split = self._split_inputs(args, kwargs)
# Split target into microbatches
if target is not None:
targets_split = list(torch.tensor_split(target, self._n_microbatches))
else:
targets_split = None
# Run microbatches
self._step_microbatches(
args_split, kwargs_split, targets_split, losses, return_outputs
)
# Return merged results per original format
if self._stage.is_last and return_outputs:
return self._merge_outputs(self._stage.output_chunks)
else:
return None
def _get_pipeline_order(self) -> dict[int, list[_Action | None]] | None:
"""
Returns the pipeline execution order as a schedule IR.
The returned IR is a dictionary mapping rank IDs to lists of actions.
Each action is either an _Action object representing computation to perform,
or None representing a deliberate idle step.
The None values are used to represent pipeline bubbles where a rank
must wait for dependencies from other ranks before proceeding. However
during execution, with the _PipelineScheduleRuntime, these Nones are
skipped since the relevant communication (send/recv) will be scheduled and waited on.
Returns:
A dictionary mapping rank -> list of actions
"""
return None
| PipelineScheduleSingle |
python | getsentry__sentry | tests/sentry/integrations/jira_server/test_ticket_action.py | {
"start": 707,
"end": 9682
} | class ____(RuleTestCase, BaseAPITestCase):
rule_cls = JiraServerCreateTicketAction
def setUp(self) -> None:
super().setUp()
self.integration, org_integration = self.create_provider_integration_for(
self.organization,
self.user,
provider="jira_server",
name="Jira Server",
metadata={"base_url": "https://jira.example.com", "verify_ssl": True},
)
identity = self.create_identity(
user=self.user,
external_id="jiraserver:123",
identity_provider=self.create_identity_provider(integration=self.integration),
data={
"consumer_key": "sentry-test",
"private_key": EXAMPLE_PRIVATE_KEY,
"access_token": "access-token",
"access_token_secret": "access-token-secret",
},
)
with assume_test_silo_mode(SiloMode.CONTROL):
org_integration.default_auth_id = identity.id
org_integration.save()
self.installation = self.integration.get_installation(self.organization.id)
self.login_as(user=self.user)
def trigger(self, event: GroupEvent, rule_object: Rule) -> object:
action = rule_object.data.get("actions", ())[0]
action_inst = self.get_rule(data=action, rule=rule_object)
results = list(action_inst.after(event=event))
assert len(results) == 1
rule_future = RuleFuture(rule=rule_object, kwargs=results[0].kwargs)
return results[0].callback(event, futures=[rule_future])
def get_key(self, event: GroupEvent) -> str:
return ExternalIssue.objects.get_linked_issues(event, self.integration).values_list(
"key", flat=True
)[0]
@responses.activate()
def test_ticket_rules(self) -> None:
project = "10000"
issueType = "1"
key = "external_issue_key"
sample_description = "sample bug report"
responses.add(
method=responses.GET,
url=f"https://jira.example.com/rest/api/2/issue/createmeta/{project}/issuetypes/{issueType}",
json={
"maxResults": 50,
"startAt": 0,
"total": 19,
"isLast": True,
"values": [
{
"required": True,
"schema": {"type": "issuetype", "system": "issuetype"},
"name": "Issue Type",
"fieldId": "issuetype",
"hasDefaultValue": False,
"operations": [],
"allowedValues": [
{
"self": "https://jira.example.com/rest/api/2/issuetype/10004",
"id": "10004",
"description": "A problem which impairs or prevents the functions of the product.",
"iconUrl": "https://jira.example.com/secure/viewavatar?size=xsmall&avatarId=10303&avatarType=issuetype",
"name": "Bug",
"subtask": False,
"avatarId": 10303,
}
],
},
{
"required": True,
"schema": {"type": "project", "system": "project"},
"name": "Project",
"fieldId": "project",
"hasDefaultValue": False,
"operations": ["set"],
"allowedValues": [
{
"self": "https://jira.example.com/rest/api/2/project/10000",
"id": "10000",
"key": "IS",
"name": "Initech Software",
"projectTypeKey": "software",
"avatarUrls": {
"48x48": "https://jira.example.com/secure/projectavatar?avatarId=10324",
},
}
],
},
{
"required": True,
"schema": {"type": "user", "system": "reporter"},
"name": "Reporter",
"fieldId": "reporter",
"autoCompleteUrl": "https://jira.example.com/rest/api/latest/user/search?username=",
"hasDefaultValue": False,
"operations": ["set"],
},
{
"required": True,
"schema": {"type": "string", "system": "summary"},
"name": "Summary",
"fieldId": "summary",
"hasDefaultValue": False,
"operations": ["set"],
},
],
},
status=200,
)
responses.add(
method=responses.POST,
url="https://jira.example.com/rest/api/2/issue",
json={"key": key},
status=200,
)
responses.add(
method=responses.GET,
url="https://jira.example.com/rest/api/2/issue/external_issue_key",
json={
"key": key,
"fields": {"summary": "example summary", "description": sample_description},
},
status=200,
)
# Create a new Rule
response = self.client.post(
reverse(
"sentry-api-0-project-rules",
kwargs={
"organization_id_or_slug": self.organization.slug,
"project_id_or_slug": self.project.slug,
},
),
format="json",
data={
"name": "hello world",
"owner": self.user.id,
"environment": None,
"actionMatch": "any",
"frequency": 5,
"actions": [
{
"id": "sentry.integrations.jira_server.notify_action.JiraServerCreateTicketAction",
"integration": self.integration.id,
"dynamic_form_fields": [{"name": "project"}],
"issuetype": "1",
"name": "Create a Jira ticket in the Jira Cloud account",
"project": "10000",
}
],
"conditions": [],
},
)
assert response.status_code == 200
# Get the rule from DB
rule_object = Rule.objects.get(id=response.data["id"])
event = self.get_group_event()
# Trigger its `after`
self.trigger(event, rule_object)
# assert ticket created in DB
key = self.get_key(event)
external_issue_count = len(ExternalIssue.objects.filter(key=key))
assert external_issue_count == 1
# assert ticket created on jira server
assert isinstance(self.installation, JiraServerIntegration)
data = self.installation.get_issue(key)
assert sample_description in data["description"]
# Trigger its `after` _again_
self.trigger(event, rule_object)
# assert new ticket NOT created in DB
assert ExternalIssue.objects.count() == external_issue_count
def test_fails_validation(self) -> None:
"""
Test that the absence of dynamic_form_fields in the action fails validation
"""
# Create a new Rule
response = self.client.post(
reverse(
"sentry-api-0-project-rules",
kwargs={
"organization_id_or_slug": self.organization.slug,
"project_id_or_slug": self.project.slug,
},
),
format="json",
data={
"name": "hello world",
"environment": None,
"actionMatch": "any",
"frequency": 5,
"actions": [
{
"id": "sentry.integrations.jira_server.notify_action.JiraServerCreateTicketAction",
"integration": self.integration.id,
"issuetype": "1",
"name": "Create a Jira ticket in the Jira Server account",
"project": "10000",
}
],
"conditions": [],
},
)
assert response.status_code == 400
assert response.data["actions"][0] == "Must configure issue link settings."
| JiraServerTicketRulesTestCase |
python | numba__numba | numba/core/compiler_machinery.py | {
"start": 731,
"end": 2447
} | class ____(metaclass=ABCMeta):
""" The base class for all compiler passes.
"""
@abstractmethod
def __init__(self, *args, **kwargs):
self._analysis = None
self._pass_id = None
@classmethod
def name(cls):
"""
Returns the name of the pass
"""
return cls._name
@property
def pass_id(self):
"""
The ID of the pass
"""
return self._pass_id
@pass_id.setter
def pass_id(self, val):
"""
Sets the ID of the pass
"""
self._pass_id = val
@property
def analysis(self):
"""
Analysis data for the pass
"""
return self._analysis
@analysis.setter
def analysis(self, val):
"""
Set the analysis data for the pass
"""
self._analysis = val
def run_initialization(self, *args, **kwargs):
"""
Runs the initialization sequence for the pass, will run before
`run_pass`.
"""
return False
@abstractmethod
def run_pass(self, *args, **kwargs):
"""
Runs the pass itself. Must return True/False depending on whether
statement level modification took place.
"""
pass
def run_finalizer(self, *args, **kwargs):
"""
Runs the initialization sequence for the pass, will run before
`run_pass`.
"""
return False
def get_analysis_usage(self, AU):
""" Override to set analysis usage
"""
pass
def get_analysis(self, pass_name):
"""
Gets the analysis from a given pass
"""
return self._analysis[pass_name]
| CompilerPass |
python | numba__numba | numba/core/typing/npdatetime.py | {
"start": 481,
"end": 770
} | class ____(AbstractTemplate):
def generic(self, args, kws):
if len(args) == 2:
# Guard against binary + and -
return
op, = args
if not isinstance(op, types.NPTimedelta):
return
return signature(op, op)
| TimedeltaUnaryOp |
python | joke2k__faker | faker/providers/phone_number/bn_BD/__init__.py | {
"start": 119,
"end": 5733
} | class ____(PhoneNumberProvider):
"""
Implement phone number provider for ``bn_BD`` locale.
Sources:
- https://en.wikipedia.org/wiki/Telephone_numbers_in_Bangladesh
"""
country_calling_codes = (
"+৯৩",
"+৩৫৮ ১৮",
"+৩৫",
"+২১৩",
"+১ ৬৮৪",
"+৩৭৬",
"+২৪",
"+১ ২৬৪",
"+১ ২৬৮",
"+৫৪",
"+৩৭৪",
"+২৯৭",
"+২৪৭",
"+৬১",
"+৬৭২ ১",
"+৬৭২",
"+৪৩",
"+৯৪",
"+১ ২৪২",
"+৯৭৩",
"+৮০",
"+১ ২৪৬",
"+১ ২৬৮",
"+৩৭৫",
"+৩২",
"+৫০১",
"+২৯",
"+১ ৪১",
"+৯৭৫",
"+৫৯১",
"+৫৯ ৭",
"+৩৮৭",
"+২৬৭",
"+৫",
"+২৪৬",
"+১ ২৮৪",
"+৬৭৩",
"+৩৫৯",
"+২৬",
"+২৫৭",
"+৮৫",
"+২৩৭",
"+১",
"+২৩৮",
"+৫৯ ৩",
"+৫৯ ৪",
"+৫৯ ৭",
"+১ ৩৪৫",
"+২৩৬",
"+২৩৫",
"+৬৪",
"+৫৬",
"+৮৬",
"+৬১ ৮৯১৬৪",
"+৬১ ৮৯১৬২",
"+৫৭",
"+২৬৯",
"+২৪২",
"+২৪৩",
"+৬৮২",
"+৫০৬",
"+৩৮৫",
"+৫৩",
"+৫৯ ৯",
"+৩৫৭",
"+৪২০",
"+৪৫",
"+২৪৬",
"+২৫৩",
"+১ ৭৬৭",
"+১ ৮০৯",
"+১ ৮২৯",
"+১ ৮৪৯",
"+৬৭০",
"+৫৬",
"+৫৯৩",
"+২০",
"+৫০৩",
"+৮১ ২",
"+৮১ ৩",
"+৮২ ১৩",
"+২৪০",
"+২৯১",
"+৩৭২",
"+২৬৮",
"+২৫১",
"+৫০",
"+২৯৮",
"+৬৭৯",
"+৩৫৮",
"+৩",
"+৫৯৬",
"+৫৯৪",
"+৬৮৯",
"+২৪১",
"+২০",
"+৯৫",
"+৪৯",
"+২৩",
"+৩৫০",
"+৮১",
"+৮১ ৮",
"+৮১ ৯",
"+৩০",
"+২৯",
"+১ ৪৭৩",
"+৫৯০",
"+১ ৬৭১",
"+৫০২",
"+৪ ১৪৮১",
"+৪ ৭৮১",
"+৪ ৭৮৩৯",
"+৪ ৭৯১",
"+২৪",
"+২৪৫",
"+৫৯২",
"+৫০৯",
"+৫০৪",
"+৮৫২",
"+৩৬",
"+৩৫৪",
"+৮১ ০",
"+৮১ ১",
"+৯১",
"+৬২",
"+৮৭০",
"+৮০",
"+৮২",
"+৮৩",
"+৯৭৯",
"+৮০৮",
"+৯৮",
"+৯৬৪",
"+৩৫৩",
"+৮১ ৬",
"+৮১ ৭",
"+৪ ১৬২৪",
"+৪ ৭৫২৪",
"+৪ ৭৬২৪",
"+৪ ৭৯২৪",
"+৯৭২",
"+৩৯",
"+২৫",
"+১ ৮৭৬",
"+৪৭ ৭৯",
"+৮১",
"+৪ ১৫৩৪",
"+৯৬২",
"+৭ ৬",
"+৭ ৭",
"+২৫৪",
"+৬৮৬",
"+৮৫০",
"+৮২",
"+৩৮৩",
"+৯৬৫",
"+৯৬",
"+৮৫৬",
"+৩৭১",
"+৯৬১",
"+২৬",
"+২৩১",
"+২১৮",
"+৪২৩",
"+৩৭০",
"+৩৫২",
"+৮৫৩",
"+২৬১",
"+২৬৫",
"+৬০",
"+৯৬০",
"+২৩",
"+৩৫৬",
"+৬৯২",
"+৫৯৬",
"+২",
"+২৩০",
"+২৬২ ২৬৯",
"+২৬২ ৬৩৯",
"+৫২",
"+৬৯১",
"+১ ৮০৮",
"+৩৭৩",
"+৩৭",
"+৯৭৬",
"+৩৮২",
"+১ ৬৪",
"+২১২",
"+২৫৮",
"+৯৫",
"+৩৭৪ ৪৭",
"+৩৭৪ ৯৭",
"+২৬৪",
"+৬৭৪",
"+৯৭",
"+৩১",
"+১ ৮৬৯",
"+৬৮৭",
"+৬৪",
"+৫০৫",
"+২৭",
"+২৩৪",
"+৬৮৩",
"+৬৭২ ৩",
"+৩৮৯",
"+৯০ ৩৯২",
"+৪ ২৮",
"+১ ৬৭০",
"+৪৭",
"+৯৬৮",
"+৯২",
"+৬৮০",
"+৯৭০",
"+৫০৭",
"+৬৭৫",
"+৫৯৫",
"+৫১",
"+৬৩",
"+৬৪",
"+৪৮",
"+৩৫১",
"+১ ৭৮৭",
"+১ ৯৩৯",
"+৯৭৪",
"+২৬২",
"+৪০",
"+৭",
"+২৫০",
"+৫৯ ৪",
"+৫৯০",
"+২৯০",
"+১ ৮৬৯",
"+১ ৭৫৮",
"+৫৯০",
"+৫০৮",
"+১ ৭৮৪",
"+৬৮৫",
"+৩৭৮",
"+২৩৯",
"+৯৬",
"+২১",
"+৩৮১",
"+২৪৮",
"+২৩২",
"+৬৫",
"+৫৯ ৩",
"+১ ৭২১",
"+৪২১",
"+৩৮৬",
"+৬৭",
"+২৫২",
"+২৭",
"+৫০",
"+৯৫ ৩৪",
"+২১",
"+৩৪",
"+৯৪",
"+২৪৯",
"+৫৯৭",
"+৪৭ ৭৯",
"+৪৬",
"+৪১",
"+৯৬৩",
"+৮৬",
"+৯২",
"+২৫",
"+৮",
"+৬",
"+৮২ ১৬",
"+২৮",
"+৬৯০",
"+৬৭৬",
"+৩৭৩ ২",
"+৩৭৩ ৫",
"+১ ৮৬৮",
"+২৯০ ৮",
"+২১৬",
"+৯০",
"+৯৩",
"+১ ৬৪৯",
"+৬৮",
"+২৫৬",
"+৩৮০",
"+৯৭১",
"+৪",
"+১",
"+৮৭৮",
"+৫৯৮",
"+১ ৩৪০",
"+৯৮",
"+৬৭৮",
"+৩৯ ০৬ ৬৯৮",
"+৩৭৯",
"+৫৮",
"+৮৪",
"+১ ৮০৮",
"+৬৮১",
"+৯৬৭",
"+২৬০",
"+২৫ ২৪",
"+২৬৩",
)
formats = (
"01 ### ######",
"01###-######",
"01#########",
"+880 1### ######",
"+880-1###-######",
"+8801#########",
"+880-2-#-####-####",
"+880-###-###-###",
)
def phone_number(self) -> str:
res = super(self.__class__, self).phone_number()
return translate_to_bengali_digits(res)
def msisdn(self) -> str:
res = super(self.__class__, self).msisdn()
return translate_to_bengali_digits(res)
| Provider |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_task_instances.py | {
"start": 64807,
"end": 76844
} | class ____(TestTaskInstanceEndpoint):
@pytest.mark.parametrize(
("task_instances", "update_extras", "payload", "expected_ti_count"),
[
pytest.param(
[
{"queue": "test_queue_1"},
{"queue": "test_queue_2"},
{"queue": "test_queue_3"},
],
True,
{"queue": ["test_queue_1", "test_queue_2"]},
2,
id="test queue filter",
),
pytest.param(
[
{"executor": "test_exec_1"},
{"executor": "test_exec_2"},
{"executor": "test_exec_3"},
],
True,
{"executor": ["test_exec_1", "test_exec_2"]},
2,
id="test executor filter",
),
pytest.param(
[
{"duration": 100},
{"duration": 150},
{"duration": 200},
],
True,
{"duration_gte": 100, "duration_lte": 200},
3,
id="test duration filter",
),
pytest.param(
[
{"logical_date": DEFAULT_DATETIME_1},
{"logical_date": DEFAULT_DATETIME_1 + dt.timedelta(days=1)},
{"logical_date": DEFAULT_DATETIME_1 + dt.timedelta(days=2)},
{"logical_date": DEFAULT_DATETIME_1 + dt.timedelta(days=3)},
{"logical_date": DEFAULT_DATETIME_1 + dt.timedelta(days=4)},
{"logical_date": DEFAULT_DATETIME_1 + dt.timedelta(days=5)},
],
False,
{
"logical_date_gte": DEFAULT_DATETIME_1.isoformat(),
"logical_date_lte": (DEFAULT_DATETIME_1 + dt.timedelta(days=2)).isoformat(),
},
3,
id="with logical date filter",
),
pytest.param(
[
{"logical_date": DEFAULT_DATETIME_1},
{"logical_date": DEFAULT_DATETIME_1 + dt.timedelta(days=1)},
{"logical_date": DEFAULT_DATETIME_1 + dt.timedelta(days=2)},
{"logical_date": DEFAULT_DATETIME_1 + dt.timedelta(days=3)},
],
False,
{
"dag_run_ids": ["TEST_DAG_RUN_ID_0", "TEST_DAG_RUN_ID_1"],
},
2,
id="test dag run id filter",
),
pytest.param(
[
{"logical_date": DEFAULT_DATETIME_1},
{"logical_date": DEFAULT_DATETIME_1 + dt.timedelta(days=1)},
{"logical_date": DEFAULT_DATETIME_1 + dt.timedelta(days=2)},
{"logical_date": DEFAULT_DATETIME_1 + dt.timedelta(days=3)},
],
False,
{
"task_ids": ["print_the_context", "log_sql_query"],
},
2,
id="test task id filter",
),
],
)
def test_should_respond_200(
self, test_client, task_instances, update_extras, payload, expected_ti_count, session
):
self.create_task_instances(
session,
update_extras=update_extras,
task_instances=task_instances,
)
with assert_queries_count(4):
response = test_client.post(
"/dags/~/dagRuns/~/taskInstances/list",
json=payload,
)
body = response.json()
assert response.status_code == 200, body
assert expected_ti_count == body["total_entries"]
assert expected_ti_count == len(body["task_instances"])
check_last_log(session, dag_id="~", event="get_task_instances_batch", logical_date=None)
def test_should_respond_200_for_order_by(self, test_client, session):
dag_id = "example_python_operator"
self.create_task_instances(
session,
task_instances=[
{"start_date": DEFAULT_DATETIME_1 + dt.timedelta(minutes=(i + 1))} for i in range(10)
],
dag_id=dag_id,
)
ti_count = session.query(TaskInstance).filter(TaskInstance.dag_id == dag_id).count()
# Ascending order
response_asc = test_client.post(
"/dags/~/dagRuns/~/taskInstances/list",
json={"order_by": "start_date", "dag_ids": [dag_id]},
)
assert response_asc.status_code == 200, response_asc.json()
assert response_asc.json()["total_entries"] == ti_count
assert len(response_asc.json()["task_instances"]) == ti_count
# Descending order
response_desc = test_client.post(
"/dags/~/dagRuns/~/taskInstances/list",
json={"order_by": "-start_date", "dag_ids": [dag_id]},
)
assert response_desc.status_code == 200, response_desc.json()
assert response_desc.json()["total_entries"] == ti_count
assert len(response_desc.json()["task_instances"]) == ti_count
# Compare
start_dates_asc = [ti["start_date"] for ti in response_asc.json()["task_instances"]]
assert len(start_dates_asc) == ti_count
start_dates_desc = [ti["start_date"] for ti in response_desc.json()["task_instances"]]
assert len(start_dates_desc) == ti_count
assert start_dates_asc == list(reversed(start_dates_desc))
@pytest.mark.parametrize(
("task_instances", "payload", "expected_ti_count"),
[
pytest.param(
[
{"task": "test_1"},
{"task": "test_2"},
],
{"dag_ids": ["latest_only"]},
2,
id="task_instance properties",
),
],
)
def test_should_respond_200_when_task_instance_properties_are_none(
self, test_client, task_instances, payload, expected_ti_count, session
):
self.ti_extras.update(
{
"start_date": None,
"end_date": None,
"state": None,
}
)
self.create_task_instances(
session,
dag_id="latest_only",
task_instances=task_instances,
)
response = test_client.post(
"/dags/~/dagRuns/~/taskInstances/list",
json=payload,
)
body = response.json()
assert response.status_code == 200, body
assert expected_ti_count == body["total_entries"]
assert expected_ti_count == len(body["task_instances"])
@pytest.mark.parametrize(
("payload", "expected_ti", "total_ti"),
[
pytest.param(
{"dag_ids": ["example_python_operator", "example_skip_dag"]},
17,
17,
id="with dag filter",
),
],
)
def test_should_respond_200_dag_ids_filter(self, test_client, payload, expected_ti, total_ti, session):
self.create_task_instances(session)
self.create_task_instances(session, dag_id="example_skip_dag")
response = test_client.post(
"/dags/~/dagRuns/~/taskInstances/list",
json=payload,
)
assert response.status_code == 200
assert len(response.json()["task_instances"]) == expected_ti
assert response.json()["total_entries"] == total_ti
def test_should_raise_400_for_no_json(self, test_client):
response = test_client.post(
"/dags/~/dagRuns/~/taskInstances/list",
)
assert response.status_code == 422
assert response.json()["detail"] == [
{
"input": None,
"loc": ["body"],
"msg": "Field required",
"type": "missing",
},
]
def test_should_respond_401(self, unauthenticated_test_client):
response = unauthenticated_test_client.post(
"/dags/~/dagRuns/~/taskInstances/list",
json={},
)
assert response.status_code == 401
def test_should_respond_403(self, unauthorized_test_client):
response = unauthorized_test_client.post(
"/dags/~/dagRuns/~/taskInstances/list",
json={},
)
assert response.status_code == 403
def test_should_respond_422_for_non_wildcard_path_parameters(self, test_client):
response = test_client.post(
"/dags/non_wildcard/dagRuns/~/taskInstances/list",
)
assert response.status_code == 422
assert "Input should be '~'" in str(response.json()["detail"])
response = test_client.post(
"/dags/~/dagRuns/non_wildcard/taskInstances/list",
)
assert response.status_code == 422
assert "Input should be '~'" in str(response.json()["detail"])
@pytest.mark.parametrize(
("payload", "expected"),
[
({"end_date_lte": "2020-11-10T12:42:39.442973"}, "Input should have timezone info"),
({"end_date_gte": "2020-11-10T12:42:39.442973"}, "Input should have timezone info"),
({"start_date_lte": "2020-11-10T12:42:39.442973"}, "Input should have timezone info"),
({"start_date_gte": "2020-11-10T12:42:39.442973"}, "Input should have timezone info"),
({"logical_date_gte": "2020-11-10T12:42:39.442973"}, "Input should have timezone info"),
({"logical_date_lte": "2020-11-10T12:42:39.442973"}, "Input should have timezone info"),
],
)
def test_should_raise_400_for_naive_and_bad_datetime(self, test_client, payload, expected, session):
self.create_task_instances(session)
response = test_client.post(
"/dags/~/dagRuns/~/taskInstances/list",
json=payload,
)
assert response.status_code == 422
assert expected in str(response.json()["detail"])
def test_should_respond_200_for_pagination(self, test_client, session):
dag_id = "example_python_operator"
self.create_task_instances(
session,
task_instances=[
{"start_date": DEFAULT_DATETIME_1 + dt.timedelta(minutes=(i + 1))} for i in range(10)
],
dag_id=dag_id,
)
# First 5 items
response_batch1 = test_client.post(
"/dags/~/dagRuns/~/taskInstances/list",
json={"page_limit": 5, "page_offset": 0},
)
assert response_batch1.status_code == 200, response_batch1.json()
num_entries_batch1 = len(response_batch1.json()["task_instances"])
assert num_entries_batch1 == 5
assert len(response_batch1.json()["task_instances"]) == 5
# 5 items after that
response_batch2 = test_client.post(
"/dags/~/dagRuns/~/taskInstances/list",
json={"page_limit": 5, "page_offset": 5},
)
assert response_batch2.status_code == 200, response_batch2.json()
num_entries_batch2 = len(response_batch2.json()["task_instances"])
assert num_entries_batch2 > 0
assert len(response_batch2.json()["task_instances"]) > 0
# Match
ti_count = 9
assert response_batch1.json()["total_entries"] == response_batch2.json()["total_entries"] == ti_count
assert (num_entries_batch1 + num_entries_batch2) == ti_count
assert response_batch1 != response_batch2
# default limit and offset
response_batch3 = test_client.post(
"/dags/~/dagRuns/~/taskInstances/list",
json={},
)
num_entries_batch3 = len(response_batch3.json()["task_instances"])
assert num_entries_batch3 == ti_count
assert len(response_batch3.json()["task_instances"]) == ti_count
| TestGetTaskInstancesBatch |
python | ray-project__ray | python/ray/serve/tests/test_cli_3.py | {
"start": 2778,
"end": 3026
} | class ____(BaseModel):
message: str = "DEFAULT"
def build_echo_app_typed(args: TypedArgs):
return Echo.bind(args.message)
k8sFNode = global_f.options(
num_replicas=2, ray_actor_options={"num_cpus": 2, "num_gpus": 1}
).bind()
| TypedArgs |
python | huggingface__transformers | examples/pytorch/image-pretraining/run_mim.py | {
"start": 7384,
"end": 18319
} | class ____:
"""
A class to generate boolean masks for the pretraining task.
A mask is a 1D tensor of shape (model_patch_size**2,) where the value is either 0 or 1,
where 1 indicates "masked".
"""
def __init__(self, input_size=192, mask_patch_size=32, model_patch_size=4, mask_ratio=0.6):
self.input_size = input_size
self.mask_patch_size = mask_patch_size
self.model_patch_size = model_patch_size
self.mask_ratio = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("Input size must be divisible by mask patch size")
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("Mask patch size must be divisible by model patch size")
self.rand_size = self.input_size // self.mask_patch_size
self.scale = self.mask_patch_size // self.model_patch_size
self.token_count = self.rand_size**2
self.mask_count = int(np.ceil(self.token_count * self.mask_ratio))
def __call__(self):
mask_idx = np.random.permutation(self.token_count)[: self.mask_count]
mask = np.zeros(self.token_count, dtype=int)
mask[mask_idx] = 1
mask = mask.reshape((self.rand_size, self.rand_size))
mask = mask.repeat(self.scale, axis=0).repeat(self.scale, axis=1)
return torch.tensor(mask.flatten())
def collate_fn(examples):
pixel_values = torch.stack([example["pixel_values"] for example in examples])
mask = torch.stack([example["mask"] for example in examples])
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_process_index}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
+ f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# Initialize our dataset.
ds = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
data_files=data_args.data_files,
cache_dir=model_args.cache_dir,
token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
)
# If we don't have a validation split, split off a percentage of train as validation.
data_args.train_val_split = None if "validation" in ds else data_args.train_val_split
if isinstance(data_args.train_val_split, float) and data_args.train_val_split > 0.0:
split = ds["train"].train_test_split(data_args.train_val_split)
ds["train"] = split["train"]
ds["validation"] = split["test"]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config_kwargs = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"token": model_args.token,
"trust_remote_code": model_args.trust_remote_code,
}
if model_args.config_name_or_path:
config = AutoConfig.from_pretrained(model_args.config_name_or_path, **config_kwargs)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}")
config.update_from_string(model_args.config_overrides)
logger.info(f"New config: {config}")
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(config, "decoder_type"):
config.decoder_type = "simmim"
# adapt config
model_args.image_size = model_args.image_size if model_args.image_size is not None else config.image_size
model_args.patch_size = model_args.patch_size if model_args.patch_size is not None else config.patch_size
model_args.encoder_stride = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"image_size": model_args.image_size,
"patch_size": model_args.patch_size,
"encoder_stride": model_args.encoder_stride,
}
)
# create image processor
if model_args.image_processor_name:
image_processor = AutoImageProcessor.from_pretrained(model_args.image_processor_name, **config_kwargs)
elif model_args.model_name_or_path:
image_processor = AutoImageProcessor.from_pretrained(model_args.model_name_or_path, **config_kwargs)
else:
IMAGE_PROCESSOR_TYPES = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
image_processor = IMAGE_PROCESSOR_TYPES[model_args.model_type][-1]()
# create model
if model_args.model_name_or_path:
model = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
)
else:
logger.info("Training new model from scratch")
model = AutoModelForMaskedImageModeling.from_config(config, trust_remote_code=model_args.trust_remote_code)
if training_args.do_train:
column_names = ds["train"].column_names
else:
column_names = ds["validation"].column_names
if data_args.image_column_name is not None:
image_column_name = data_args.image_column_name
elif "image" in column_names:
image_column_name = "image"
elif "img" in column_names:
image_column_name = "img"
else:
image_column_name = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
transforms = Compose(
[
Lambda(lambda img: img.convert("RGB") if img.mode != "RGB" else img),
RandomResizedCrop(model_args.image_size, scale=(0.67, 1.0), ratio=(3.0 / 4.0, 4.0 / 3.0)),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean, std=image_processor.image_std),
]
)
# create mask generator
mask_generator = MaskGenerator(
input_size=model_args.image_size,
mask_patch_size=data_args.mask_patch_size,
model_patch_size=model_args.patch_size,
mask_ratio=data_args.mask_ratio,
)
def preprocess_images(examples):
"""Preprocess a batch of images by applying transforms + creating a corresponding mask, indicating
which patches to mask."""
examples["pixel_values"] = [transforms(image) for image in examples[image_column_name]]
examples["mask"] = [mask_generator() for i in range(len(examples[image_column_name]))]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset")
if data_args.max_train_samples is not None:
ds["train"] = ds["train"].shuffle(seed=training_args.seed).select(range(data_args.max_train_samples))
# Set the training transforms
ds["train"].set_transform(preprocess_images)
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset")
if data_args.max_eval_samples is not None:
ds["validation"] = (
ds["validation"].shuffle(seed=training_args.seed).select(range(data_args.max_eval_samples))
)
# Set the validation transforms
ds["validation"].set_transform(preprocess_images)
# Initialize our trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=ds["train"] if training_args.do_train else None,
eval_dataset=ds["validation"] if training_args.do_eval else None,
processing_class=image_processor,
data_collator=collate_fn,
)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model()
trainer.log_metrics("train", train_result.metrics)
trainer.save_metrics("train", train_result.metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
metrics = trainer.evaluate()
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
# Write model card and (optionally) push to hub
kwargs = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "masked-image-modeling",
"dataset": data_args.dataset_name,
"tags": ["masked-image-modeling"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
if __name__ == "__main__":
main()
| MaskGenerator |
python | numba__numba | numba/core/typeinfer.py | {
"start": 32134,
"end": 33027
} | class ____(object):
def __init__(self, target, index, loc):
self.target = target
self.index = index
self.loc = loc
def __call__(self, typeinfer):
with new_error_context("typing of delitem at {loc}", loc=self.loc):
typevars = typeinfer.typevars
if not all(typevars[var.name].defined
for var in (self.target, self.index)):
return
targetty = typevars[self.target.name].getone()
idxty = typevars[self.index.name].getone()
sig = typeinfer.context.resolve_delitem(targetty, idxty)
if sig is None:
raise TypingError("Cannot resolve delitem: %s[%s]" %
(targetty, idxty), loc=self.loc)
self.signature = sig
def get_call_signature(self):
return self.signature
| DelItemConstraint |
python | pydantic__pydantic | pydantic/v1/decorator.py | {
"start": 1773,
"end": 10339
} | class ____:
def __init__(self, function: 'AnyCallableT', config: 'ConfigType'): # noqa C901
from inspect import Parameter, signature
parameters: Mapping[str, Parameter] = signature(function).parameters
if parameters.keys() & {ALT_V_ARGS, ALT_V_KWARGS, V_POSITIONAL_ONLY_NAME, V_DUPLICATE_KWARGS}:
raise ConfigError(
f'"{ALT_V_ARGS}", "{ALT_V_KWARGS}", "{V_POSITIONAL_ONLY_NAME}" and "{V_DUPLICATE_KWARGS}" '
f'are not permitted as argument names when using the "{validate_arguments.__name__}" decorator'
)
self.raw_function = function
self.arg_mapping: Dict[int, str] = {}
self.positional_only_args = set()
self.v_args_name = 'args'
self.v_kwargs_name = 'kwargs'
type_hints = get_all_type_hints(function)
takes_args = False
takes_kwargs = False
fields: Dict[str, Tuple[Any, Any]] = {}
for i, (name, p) in enumerate(parameters.items()):
if p.annotation is p.empty:
annotation = Any
else:
annotation = type_hints[name]
default = ... if p.default is p.empty else p.default
if p.kind == Parameter.POSITIONAL_ONLY:
self.arg_mapping[i] = name
fields[name] = annotation, default
fields[V_POSITIONAL_ONLY_NAME] = List[str], None
self.positional_only_args.add(name)
elif p.kind == Parameter.POSITIONAL_OR_KEYWORD:
self.arg_mapping[i] = name
fields[name] = annotation, default
fields[V_DUPLICATE_KWARGS] = List[str], None
elif p.kind == Parameter.KEYWORD_ONLY:
fields[name] = annotation, default
elif p.kind == Parameter.VAR_POSITIONAL:
self.v_args_name = name
fields[name] = Tuple[annotation, ...], None
takes_args = True
else:
assert p.kind == Parameter.VAR_KEYWORD, p.kind
self.v_kwargs_name = name
fields[name] = Dict[str, annotation], None # type: ignore
takes_kwargs = True
# these checks avoid a clash between "args" and a field with that name
if not takes_args and self.v_args_name in fields:
self.v_args_name = ALT_V_ARGS
# same with "kwargs"
if not takes_kwargs and self.v_kwargs_name in fields:
self.v_kwargs_name = ALT_V_KWARGS
if not takes_args:
# we add the field so validation below can raise the correct exception
fields[self.v_args_name] = List[Any], None
if not takes_kwargs:
# same with kwargs
fields[self.v_kwargs_name] = Dict[Any, Any], None
self.create_model(fields, takes_args, takes_kwargs, config)
def init_model_instance(self, *args: Any, **kwargs: Any) -> BaseModel:
values = self.build_values(args, kwargs)
return self.model(**values)
def call(self, *args: Any, **kwargs: Any) -> Any:
m = self.init_model_instance(*args, **kwargs)
return self.execute(m)
def build_values(self, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> Dict[str, Any]:
values: Dict[str, Any] = {}
if args:
arg_iter = enumerate(args)
while True:
try:
i, a = next(arg_iter)
except StopIteration:
break
arg_name = self.arg_mapping.get(i)
if arg_name is not None:
values[arg_name] = a
else:
values[self.v_args_name] = [a] + [a for _, a in arg_iter]
break
var_kwargs: Dict[str, Any] = {}
wrong_positional_args = []
duplicate_kwargs = []
fields_alias = [
field.alias
for name, field in self.model.__fields__.items()
if name not in (self.v_args_name, self.v_kwargs_name)
]
non_var_fields = set(self.model.__fields__) - {self.v_args_name, self.v_kwargs_name}
for k, v in kwargs.items():
if k in non_var_fields or k in fields_alias:
if k in self.positional_only_args:
wrong_positional_args.append(k)
if k in values:
duplicate_kwargs.append(k)
values[k] = v
else:
var_kwargs[k] = v
if var_kwargs:
values[self.v_kwargs_name] = var_kwargs
if wrong_positional_args:
values[V_POSITIONAL_ONLY_NAME] = wrong_positional_args
if duplicate_kwargs:
values[V_DUPLICATE_KWARGS] = duplicate_kwargs
return values
def execute(self, m: BaseModel) -> Any:
d = {k: v for k, v in m._iter() if k in m.__fields_set__ or m.__fields__[k].default_factory}
var_kwargs = d.pop(self.v_kwargs_name, {})
if self.v_args_name in d:
args_: List[Any] = []
in_kwargs = False
kwargs = {}
for name, value in d.items():
if in_kwargs:
kwargs[name] = value
elif name == self.v_args_name:
args_ += value
in_kwargs = True
else:
args_.append(value)
return self.raw_function(*args_, **kwargs, **var_kwargs)
elif self.positional_only_args:
args_ = []
kwargs = {}
for name, value in d.items():
if name in self.positional_only_args:
args_.append(value)
else:
kwargs[name] = value
return self.raw_function(*args_, **kwargs, **var_kwargs)
else:
return self.raw_function(**d, **var_kwargs)
def create_model(self, fields: Dict[str, Any], takes_args: bool, takes_kwargs: bool, config: 'ConfigType') -> None:
pos_args = len(self.arg_mapping)
class CustomConfig:
pass
if not TYPE_CHECKING: # pragma: no branch
if isinstance(config, dict):
CustomConfig = type('Config', (), config) # noqa: F811
elif config is not None:
CustomConfig = config # noqa: F811
if hasattr(CustomConfig, 'fields') or hasattr(CustomConfig, 'alias_generator'):
raise ConfigError(
'Setting the "fields" and "alias_generator" property on custom Config for '
'@validate_arguments is not yet supported, please remove.'
)
class DecoratorBaseModel(BaseModel):
@validator(self.v_args_name, check_fields=False, allow_reuse=True)
def check_args(cls, v: Optional[List[Any]]) -> Optional[List[Any]]:
if takes_args or v is None:
return v
raise TypeError(f'{pos_args} positional arguments expected but {pos_args + len(v)} given')
@validator(self.v_kwargs_name, check_fields=False, allow_reuse=True)
def check_kwargs(cls, v: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
if takes_kwargs or v is None:
return v
plural = '' if len(v) == 1 else 's'
keys = ', '.join(map(repr, v.keys()))
raise TypeError(f'unexpected keyword argument{plural}: {keys}')
@validator(V_POSITIONAL_ONLY_NAME, check_fields=False, allow_reuse=True)
def check_positional_only(cls, v: Optional[List[str]]) -> None:
if v is None:
return
plural = '' if len(v) == 1 else 's'
keys = ', '.join(map(repr, v))
raise TypeError(f'positional-only argument{plural} passed as keyword argument{plural}: {keys}')
@validator(V_DUPLICATE_KWARGS, check_fields=False, allow_reuse=True)
def check_duplicate_kwargs(cls, v: Optional[List[str]]) -> None:
if v is None:
return
plural = '' if len(v) == 1 else 's'
keys = ', '.join(map(repr, v))
raise TypeError(f'multiple values for argument{plural}: {keys}')
class Config(CustomConfig):
extra = getattr(CustomConfig, 'extra', Extra.forbid)
self.model = create_model(to_camel(self.raw_function.__name__), __base__=DecoratorBaseModel, **fields)
| ValidatedFunction |
python | django__django | tests/test_client/views.py | {
"start": 9536,
"end": 12711
} | class ____:
@method_decorator(login_required)
def login_protected_view(self, request):
t = Template(
"This is a login protected test using a method. "
"Username is {{ user.username }}.",
name="Login Method Template",
)
c = Context({"user": request.user})
return HttpResponse(t.render(c))
@method_decorator(permission_required("permission_not_granted"))
def permission_protected_view(self, request):
t = Template(
"This is a permission protected test using a method. "
"Username is {{ user.username }}. "
"Permissions are {{ user.get_all_permissions }}.",
name="Permissions Template",
)
c = Context({"user": request.user})
return HttpResponse(t.render(c))
_view_manager = _ViewManager()
login_protected_method_view = _view_manager.login_protected_view
permission_protected_method_view = _view_manager.permission_protected_view
def session_view(request):
"A view that modifies the session"
request.session["tobacconist"] = "hovercraft"
t = Template(
"This is a view that modifies the session.",
name="Session Modifying View Template",
)
c = Context()
return HttpResponse(t.render(c))
def broken_view(request):
"""A view which just raises an exception, simulating a broken view."""
raise KeyError("Oops! Looks like you wrote some bad code.")
def mail_sending_view(request):
mail.EmailMessage(
"Test message",
"This is a test email",
"from@example.com",
["first@example.com", "second@example.com"],
).send()
return HttpResponse("Mail sent")
def mass_mail_sending_view(request):
m1 = mail.EmailMessage(
"First Test message",
"This is the first test email",
"from@example.com",
["first@example.com", "second@example.com"],
)
m2 = mail.EmailMessage(
"Second Test message",
"This is the second test email",
"from@example.com",
["second@example.com", "third@example.com"],
)
c = mail.get_connection()
c.send_messages([m1, m2])
return HttpResponse("Mail sent")
def nesting_exception_view(request):
"""
A view that uses a nested client to call another view and then raises an
exception.
"""
client = Client()
client.get("/get_view/")
raise Exception("exception message")
def django_project_redirect(request):
return HttpResponseRedirect("https://www.djangoproject.com/")
def no_trailing_slash_external_redirect(request):
"""
RFC 3986 Section 6.2.3: Empty path should be normalized to "/".
Use https://testserver, rather than an external domain, in order to allow
use of follow=True, triggering Client._handle_redirects().
"""
return HttpResponseRedirect("https://testserver")
def index_view(request):
"""Target for no_trailing_slash_external_redirect with follow=True."""
return HttpResponse("Hello world")
def upload_view(request):
"""Prints keys of request.FILES to the response."""
return HttpResponse(", ".join(request.FILES))
| _ViewManager |
python | bokeh__bokeh | src/bokeh/models/tickers.py | {
"start": 9986,
"end": 10264
} | class ____(AdaptiveTicker):
''' Generate ticks on a log scale.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
mantissas = Override(default=[1, 5])
| LogTicker |
python | Netflix__metaflow | metaflow/plugins/pypi/pypi_decorator.py | {
"start": 4835,
"end": 7312
} | class ____(FlowDecorator):
"""
Specifies the PyPI packages for all steps of the flow.
Use `@pypi_base` to set common packages required by all
steps and use `@pypi` to specify step-specific overrides.
Parameters
----------
packages : Dict[str, str], default: {}
Packages to use for this flow. The key is the name of the package
and the value is the version to use.
python : str, optional, default: None
Version of Python to use, e.g. '3.7.4'. A default value of None implies
that the version used will correspond to the version of the Python interpreter used to start the run.
"""
name = "pypi_base"
defaults = {"packages": {}, "python": None, "disabled": None}
def __init__(self, attributes=None, statically_defined=False, inserted_by=None):
self._attributes_with_user_values = (
set(attributes.keys()) if attributes is not None else set()
)
super().__init__(attributes, statically_defined, inserted_by)
def flow_init(
self, flow, graph, environment, flow_datastore, metadata, logger, echo, options
):
from metaflow import decorators
decorators._attach_decorators(flow, ["pypi"])
decorators._init(flow)
# @pypi uses a conda environment to create a virtual environment.
# The conda environment can be created through micromamba.
_supported_virtual_envs = ["conda"]
# To placate people who don't want to see a shred of conda in UX, we symlink
# --environment=pypi to --environment=conda
_supported_virtual_envs.extend(["pypi"])
# TODO: Hardcoded for now to support the fast bakery environment.
# We should introduce a more robust mechanism for appending supported environments, for example from within extensions.
_supported_virtual_envs.extend(["fast-bakery"])
# The --environment= requirement ensures that valid virtual environments are
# created for every step to execute it, greatly simplifying the @conda
# implementation.
if environment.TYPE not in _supported_virtual_envs:
raise InvalidEnvironmentException(
"@%s decorator requires %s"
% (
self.name,
" or ".join(
["--environment=%s" % env for env in _supported_virtual_envs]
),
)
)
| PyPIFlowDecorator |
python | python-pillow__Pillow | src/PIL/ImageCms.py | {
"start": 6969,
"end": 8663
} | class ____:
def __init__(self, profile: str | SupportsRead[bytes] | core.CmsProfile) -> None:
"""
:param profile: Either a string representing a filename,
a file like object containing a profile or a
low-level profile object
"""
self.filename: str | None = None
if isinstance(profile, str):
if sys.platform == "win32":
profile_bytes_path = profile.encode()
try:
profile_bytes_path.decode("ascii")
except UnicodeDecodeError:
with open(profile, "rb") as f:
self.profile = core.profile_frombytes(f.read())
return
self.filename = profile
self.profile = core.profile_open(profile)
elif hasattr(profile, "read"):
self.profile = core.profile_frombytes(profile.read())
elif isinstance(profile, core.CmsProfile):
self.profile = profile
else:
msg = "Invalid type for Profile" # type: ignore[unreachable]
raise TypeError(msg)
def __getattr__(self, name: str) -> Any:
if name in ("product_name", "product_info"):
deprecate(f"ImageCms.ImageCmsProfile.{name}", 13)
return None
msg = f"'{self.__class__.__name__}' object has no attribute '{name}'"
raise AttributeError(msg)
def tobytes(self) -> bytes:
"""
Returns the profile in a format suitable for embedding in
saved images.
:returns: a bytes object containing the ICC profile.
"""
return core.profile_tobytes(self.profile)
| ImageCmsProfile |
python | Pylons__pyramid | src/pyramid/httpexceptions.py | {
"start": 32219,
"end": 32971
} | class ____(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the origin server requires the
request to be conditional.
Its typical use is to avoid the "lost update" problem, where a client
GETs a resource's state, modifies it, and PUTs it back to the server,
when meanwhile a third party has modified the state on the server,
leading to a conflict. By requiring requests to be conditional, the
server can assure that clients are working with the correct copies.
RFC 6585.3
code: 428, title: Precondition Required
"""
code = 428
title = 'Precondition Required'
explanation = 'The origin server requires the request to be conditional.'
| HTTPPreconditionRequired |
python | doocs__leetcode | solution/1700-1799/1708.Largest Subarray Length K/Solution.py | {
"start": 0,
"end": 171
} | class ____:
def largestSubarray(self, nums: List[int], k: int) -> List[int]:
i = nums.index(max(nums[: len(nums) - k + 1]))
return nums[i : i + k]
| Solution |
python | celery__celery | t/unit/backends/test_couchdb.py | {
"start": 502,
"end": 4059
} | class ____:
def setup_method(self):
self.Server = self.patching('pycouchdb.Server')
self.backend = CouchBackend(app=self.app)
def test_init_no_pycouchdb(self):
"""test init no pycouchdb raises"""
prev, module.pycouchdb = module.pycouchdb, None
try:
with pytest.raises(ImproperlyConfigured):
CouchBackend(app=self.app)
finally:
module.pycouchdb = prev
def test_get_container_exists(self):
self.backend._connection = sentinel._connection
connection = self.backend.connection
assert connection is sentinel._connection
self.Server.assert_not_called()
def test_get(self):
"""test_get
CouchBackend.get should return and take two params
db conn to couchdb is mocked.
"""
x = CouchBackend(app=self.app)
x._connection = Mock()
get = x._connection.get = MagicMock()
assert x.get('1f3fab') == get.return_value['value']
x._connection.get.assert_called_once_with('1f3fab')
def test_get_non_existent_key(self):
x = CouchBackend(app=self.app)
x._connection = Mock()
get = x._connection.get = MagicMock()
get.side_effect = pycouchdb.exceptions.NotFound
assert x.get('1f3fab') is None
x._connection.get.assert_called_once_with('1f3fab')
@pytest.mark.parametrize("key", ['1f3fab', b'1f3fab'])
def test_set(self, key):
x = CouchBackend(app=self.app)
x._connection = Mock()
x._set_with_state(key, 'value', states.SUCCESS)
x._connection.save.assert_called_once_with({'_id': '1f3fab',
'value': 'value'})
@pytest.mark.parametrize("key", ['1f3fab', b'1f3fab'])
def test_set_with_conflict(self, key):
x = CouchBackend(app=self.app)
x._connection = Mock()
x._connection.save.side_effect = (pycouchdb.exceptions.Conflict, None)
get = x._connection.get = MagicMock()
x._set_with_state(key, 'value', states.SUCCESS)
x._connection.get.assert_called_once_with('1f3fab')
x._connection.get('1f3fab').__setitem__.assert_called_once_with(
'value', 'value')
x._connection.save.assert_called_with(get('1f3fab'))
assert x._connection.save.call_count == 2
def test_delete(self):
"""test_delete
CouchBackend.delete should return and take two params
db conn to pycouchdb is mocked.
TODO Should test on key not exists
"""
x = CouchBackend(app=self.app)
x._connection = Mock()
mocked_delete = x._connection.delete = Mock()
mocked_delete.return_value = None
# should return None
assert x.delete('1f3fab') is None
x._connection.delete.assert_called_once_with('1f3fab')
def test_backend_by_url(self, url='couchdb://myhost/mycoolcontainer'):
from celery.backends.couchdb import CouchBackend
backend, url_ = backends.by_url(url, self.app.loader)
assert backend is CouchBackend
assert url_ == url
def test_backend_params_by_url(self):
url = 'couchdb://johndoe:mysecret@myhost:123/mycoolcontainer'
with self.Celery(backend=url) as app:
x = app.backend
assert x.container == 'mycoolcontainer'
assert x.host == 'myhost'
assert x.username == 'johndoe'
assert x.password == 'mysecret'
assert x.port == 123
| test_CouchBackend |
python | kamyu104__LeetCode-Solutions | Python/replace-non-coprime-numbers-in-array.py | {
"start": 69,
"end": 600
} | class ____(object):
def replaceNonCoprimes(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
def gcd(a, b): # Time: O(log(min(a, b)))
while b:
a, b = b, a%b
return a
result = []
for x in nums:
while True:
g = gcd(result[-1] if result else 1, x)
if g == 1:
break
x *= result.pop()//g
result.append(x)
return result
| Solution |
python | PrefectHQ__prefect | tests/experimental/test_sla.py | {
"start": 4807,
"end": 8694
} | class ____:
async def test_runner_deployment_calls_internal_method_on_apply_with_sla(
self, monkeypatch
):
sla = TimeToCompletionSla(
name="test-sla",
duration=timedelta(minutes=10).total_seconds(),
)
deployment = RunnerDeployment.from_flow(
flow=tired_flow,
name=__file__,
_sla=sla,
)
monkeypatch.setattr(
deployment, "_create_slas", mock.AsyncMock(name="mock_create_slas")
)
await deployment.apply()
assert deployment._create_slas.called
@pytest.fixture
def deployment_id(self):
return UUID("89f0ac57-514a-4eb1-a068-dbbf44d2e199")
@pytest.fixture
def client(self, monkeypatch, prefect_client, deployment_id):
monkeypatch.setattr(prefect_client, "server_type", ServerType.CLOUD)
monkeypatch.setattr(
prefect_client,
"apply_slas_for_deployment",
mock.AsyncMock(name="mock_apply_slas_for_deployment"),
)
monkeypatch.setattr(
prefect_client,
"create_deployment",
mock.AsyncMock(name="mock_create_deployment", return_value=deployment_id),
)
return prefect_client
async def test_create_deployment_with_sla_config_against_cloud(
self, deployment, client, deployment_id
):
sla = TimeToCompletionSla(
name="test-sla",
duration=timedelta(minutes=10).total_seconds(),
)
deployment = RunnerDeployment.from_flow(
flow=tired_flow,
name=__file__,
_sla=sla,
)
await deployment._create_slas(deployment_id, client)
assert (
client.apply_slas_for_deployment.await_args_list[0].args[0] == deployment_id
)
assert (
client.apply_slas_for_deployment.await_args_list[0].args[1][0].name
== sla.name
)
async def test_create_deployment_with_multiple_slas_against_cloud(
self, client, deployment_id
):
sla1 = TimeToCompletionSla(
name="a little long",
severity="moderate",
duration=timedelta(minutes=10).total_seconds(),
)
sla2 = TimeToCompletionSla(
name="whoa this is bad",
severity="high",
duration=timedelta(minutes=30).total_seconds(),
)
deployment = RunnerDeployment.from_flow(
flow=tired_flow,
name=__file__,
_sla=[sla1, sla2],
)
await deployment._create_slas(deployment_id, client)
calls = client.apply_slas_for_deployment.await_args_list
assert len(calls) == 1
assert calls[0].args[0] == deployment_id
assert [sla.name for sla in calls[0].args[1]] == [sla1.name, sla2.name]
async def test_create_deployment_against_oss_server_produces_error_log(
self, prefect_client, deployment_id
):
sla = TimeToCompletionSla(
name="test-sla",
duration=timedelta(minutes=10).total_seconds(),
)
deployment = RunnerDeployment.from_flow(
flow=tired_flow,
name=__file__,
_sla=sla,
)
with pytest.raises(
ValueError,
match="SLA configuration is currently only supported on Prefect Cloud.",
):
await deployment._create_slas(deployment_id, prefect_client)
async def test_passing_empty_sla_list_calls_client_apply_slas_for_deployment(
self, client, deployment_id
):
deployment = RunnerDeployment.from_flow(
flow=tired_flow,
name=__file__,
_sla=[],
)
await deployment._create_slas(deployment_id, client)
assert client.apply_slas_for_deployment.called is True
| TestRunnerDeploymentApply |
python | gevent__gevent | src/gevent/tests/test__example_wsgiserver.py | {
"start": 358,
"end": 3202
} | class ____(util.TestServer):
example = 'wsgiserver.py'
URL = 'http://%s:8088' % (params.DEFAULT_LOCAL_HOST_ADDR,)
PORT = 8088
not_found_message = b'<h1>Not Found</h1>'
ssl_ctx = None
_use_ssl = False
def read(self, path='/'):
url = self.URL + path
try:
kwargs = {}
if self.ssl_ctx is not None:
kwargs = {'context': self.ssl_ctx}
response = urllib2.urlopen(url, None,
DEFAULT_XPC_SOCKET_TIMEOUT,
**kwargs)
except urllib2.HTTPError:
response = sys.exc_info()[1]
result = '%s %s' % (response.code, response.msg), response.read()
# XXX: It looks like under PyPy this isn't directly closing the socket
# when SSL is in use. It takes a GC cycle to make that true.
response.close()
return result
def _test_hello(self):
status, data = self.read('/')
self.assertEqual(status, '200 OK')
self.assertEqual(data, b"<b>hello world</b>")
def _test_not_found(self):
status, data = self.read('/xxx')
self.assertEqual(status, '404 Not Found')
self.assertEqual(data, self.not_found_message)
def _do_test_a_blocking_client(self):
# We spawn this in a separate server because if it's broken
# the whole server hangs
with self.running_server():
# First, make sure we can talk to it.
self._test_hello()
# Now create a connection and only partway finish
# the transaction
sock = socket.create_connection((params.DEFAULT_LOCAL_HOST_ADDR, self.PORT))
ssl_sock = None
if self._use_ssl:
context = ssl.SSLContext()
ssl_sock = context.wrap_socket(sock)
sock_file = ssl_sock.makefile(mode='rwb')
else:
sock_file = sock.makefile(mode='rwb')
# write an incomplete request
sock_file.write(b'GET /xxx HTTP/1.0\r\n')
sock_file.flush()
# Leave it open and not doing anything
# while the other request runs to completion.
# This demonstrates that a blocking client
# doesn't hang the whole server
self._test_hello()
# now finish the original request
sock_file.write(b'\r\n')
sock_file.flush()
line = sock_file.readline()
self.assertEqual(line, b'HTTP/1.1 404 Not Found\r\n')
sock_file.close()
if ssl_sock is not None:
ssl_sock.close()
sock.close()
def test_a_blocking_client(self):
self._do_test_a_blocking_client()
if __name__ == '__main__':
greentest.main()
| Test_wsgiserver |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.