language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
huggingface__transformers
|
src/transformers/models/llava_next_video/modeling_llava_next_video.py
|
{
"start": 2095,
"end": 3445
}
|
class ____(BaseModelOutputWithPast):
r"""
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
video_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size * num_frames, num_videos, sequence_length, hidden_size)`.
video_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
"""
image_hidden_states: Optional[torch.FloatTensor] = None
video_hidden_states: Optional[torch.FloatTensor] = None
@dataclass
@auto_docstring(
custom_intro="""
Base class for LlavaNextVideo causal language model (or autoregressive) outputs.
"""
)
|
LlavaNextVideoModelOutputWithPast
|
python
|
scipy__scipy
|
scipy/spatial/tests/test_distance.py
|
{
"start": 63231,
"end": 64693
}
|
class ____:
def test_num_obs_y_multi_matrix(self):
for n in range(2, 10):
X = np.random.rand(n, 4)
Y = wpdist_no_const(X)
assert_equal(num_obs_y(Y), n)
def test_num_obs_y_1(self):
# Tests num_obs_y(y) on a condensed distance matrix over 1
# observations. Expecting exception.
with pytest.raises(ValueError):
self.check_y(1)
def test_num_obs_y_2(self):
# Tests num_obs_y(y) on a condensed distance matrix over 2
# observations.
assert_(self.check_y(2))
def test_num_obs_y_3(self):
assert_(self.check_y(3))
def test_num_obs_y_4(self):
assert_(self.check_y(4))
def test_num_obs_y_5_10(self):
for i in range(5, 16):
self.minit(i)
def test_num_obs_y_2_100(self):
# Tests num_obs_y(y) on 100 improper condensed distance matrices.
# Expecting exception.
a = set()
for n in range(2, 16):
a.add(n * (n - 1) / 2)
for i in range(5, 105):
if i not in a:
with pytest.raises(ValueError):
self.bad_y(i)
def minit(self, n):
assert_(self.check_y(n))
def bad_y(self, n):
y = np.random.rand(n)
return num_obs_y(y)
def check_y(self, n):
return num_obs_y(self.make_y(n)) == n
def make_y(self, n):
return np.random.rand((n * (n - 1)) // 2)
|
TestNumObsY
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/logs/events.py
|
{
"start": 4508,
"end": 4677
}
|
class ____(graphene.ObjectType):
class Meta:
interfaces = (GrapheneMessageEvent, GrapheneRunEvent)
name = "AlertFailureEvent"
|
GrapheneAlertFailureEvent
|
python
|
pypa__pipenv
|
pipenv/vendor/pipdeptree/_cli.py
|
{
"start": 5205,
"end": 7346
}
|
class ____(Action):
"""
Generic action that exists to convert a string into a Enum value that is then added into a `Namespace` object.
This custom action exists because argparse doesn't have support for enums.
References
----------
- https://github.com/python/cpython/issues/69247#issuecomment-1308082792
- https://docs.python.org/3/library/argparse.html#action-classes
"""
def __init__( # noqa: PLR0913, PLR0917
self,
option_strings: list[str],
dest: str,
nargs: str | None = None,
const: Any | None = None,
default: Any | None = None,
type: Any | None = None, # noqa: A002
choices: Any | None = None,
required: bool = False, # noqa: FBT001, FBT002
help: str | None = None, # noqa: A002
metavar: str | None = None,
) -> None:
if not type or not issubclass(type, enum.Enum):
msg = "type must be a subclass of Enum"
raise TypeError(msg)
if not isinstance(default, str):
msg = "default must be defined with a string value"
raise TypeError(msg)
choices = tuple(e.name.lower() for e in type)
if default not in choices:
msg = "default value should be among the enum choices"
raise ValueError(msg)
super().__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=None, # We return None here so that we default to str.
choices=choices,
required=required,
help=help,
metavar=metavar,
)
self._enum = type
def __call__(
self,
parser: ArgumentParser, # noqa: ARG002
namespace: Namespace,
value: Any,
option_string: str | None = None, # noqa: ARG002
) -> None:
value = value or self.default
value = next(e for e in self._enum if e.name.lower() == value)
setattr(namespace, self.dest, value)
__all__ = [
"Options",
"get_options",
]
|
EnumAction
|
python
|
viewflow__viewflow
|
viewflow/workflow/admin.py
|
{
"start": 595,
"end": 1648
}
|
class ____(admin.ModelAdmin):
"""List all of viewflow process."""
icon = '<i class="material-icons">assignment</i>'
actions = None
date_hierarchy = "created"
list_display = ["pk", "created", "flow_class", "status", "participants", "brief"]
list_display_links = ["pk", "created", "flow_class"]
list_filter = ["status", "flow_class"]
readonly_fields = ["flow_class", "status", "finished", "parent_task"]
inlines = [TaskInline]
def has_add_permission(self, request, obj=None):
"""Disable manually process creation."""
return False
def participants(self, obj):
"""List of users performed tasks on the process."""
user_ids = obj.task_set.exclude(owner__isnull=True).values("owner")
USER_MODEL = auth.get_user_model()
username_field = USER_MODEL.USERNAME_FIELD
users = USER_MODEL._default_manager.filter(pk__in=user_ids).values_list(
username_field
)
return ", ".join(user[0] for user in users)
@admin.register(Task)
|
ProcessAdmin
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_config/config_type.py
|
{
"start": 4468,
"end": 4757
}
|
class ____(ConfigScalar):
def __init__(self, scalar_kind, description=None):
super().__init__(
key=type(self).__name__,
given_name=type(self).__name__,
scalar_kind=scalar_kind,
description=description,
)
|
BuiltinConfigScalar
|
python
|
PrefectHQ__prefect
|
tests/cli/test_transfer.py
|
{
"start": 6614,
"end": 8313
}
|
class ____:
"""Test resource collection from source profile."""
@patch("prefect.cli.transfer.load_profiles")
@patch("prefect.cli.transfer.use_profile")
@patch("prefect.cli.transfer.get_client")
async def test_transfer_no_resources_found(
self,
mock_get_client: MagicMock,
mock_use_profile: MagicMock,
mock_load_profiles: MagicMock,
mock_profiles: ProfilesCollection,
):
"""Test transfer when no resources are found."""
mock_load_profiles.return_value = mock_profiles
# Mock context managers
mock_use_profile.return_value.__enter__.return_value = None
mock_use_profile.return_value.__exit__.return_value = None
# Mock client with empty resource collections
mock_client = AsyncMock()
mock_client.read_work_pools = AsyncMock(return_value=[])
mock_client.read_work_queues = AsyncMock(return_value=[])
mock_client.read_deployments = AsyncMock(return_value=[])
mock_client.read_block_documents = AsyncMock(return_value=[])
mock_client.read_variables = AsyncMock(return_value=[])
mock_client.read_global_concurrency_limits = AsyncMock(return_value=[])
mock_client.read_automations = AsyncMock(return_value=[])
mock_get_client.return_value.__aenter__.return_value = mock_client
mock_get_client.return_value.__aexit__.return_value = None
await run_sync_in_worker_thread(
invoke_and_assert,
command=["transfer", "--from", "source", "--to", "target"],
expected_code=0,
expected_output_contains="No resources found to transfer",
)
|
TestResourceCollection
|
python
|
Textualize__textual
|
docs/examples/how-to/layout.py
|
{
"start": 418,
"end": 567
}
|
class ____(Placeholder):
DEFAULT_CSS = """
Tweet {
height: 5;
width: 1fr;
border: tall $background;
}
"""
|
Tweet
|
python
|
weaviate__weaviate-python-client
|
weaviate/exceptions.py
|
{
"start": 12638,
"end": 12952
}
|
class ____(WeaviateBaseError):
"""Is raised when a request to Weaviate fails and is retried multiple times."""
def __init__(self, message: str, count: int) -> None:
msg = f"""The request to Weaviate failed after {count} retries. Details: {message}"""
super().__init__(msg)
|
WeaviateRetryError
|
python
|
walkccc__LeetCode
|
solutions/2689. Extract Kth Character From The Rope Tree/2689.py
|
{
"start": 0,
"end": 394
}
|
class ____:
def getKthCharacter(self, root: object | None, k: int) -> str:
""":type root: RopeTreeNode | None"""
if root.len == 0:
return root.val[k - 1]
leftLen = (0 if not root.left
else max(root.left.len, len(root.left.val)))
if leftLen >= k:
return self.getKthCharacter(root.left, k)
return self.getKthCharacter(root.right, k - leftLen)
|
Solution
|
python
|
google__pytype
|
pytype/pyc/compiler.py
|
{
"start": 959,
"end": 7380
}
|
class ____(Exception):
"""A compilation error."""
def __init__(self, msg):
super().__init__(msg)
match = _COMPILE_ERROR_RE.match(msg)
if match:
self.error = match.group(1)
self.filename = match.group(2)
self.line = int(match.group(3))
else:
self.error = msg
self.filename = None
self.line = 1
def compile_src_string_to_pyc_string(
src, filename, python_version, python_exe: list[str], mode="exec"
):
"""Compile Python source code to pyc data.
This may use py_compile if the src is for the same version as we're running,
or else it spawns an external process to produce a .pyc file. The generated
bytecode (.pyc file) is read and both it and any temporary files are deleted.
Args:
src: Python sourcecode
filename: Name of the source file. For error messages.
python_version: Python version, (major, minor).
python_exe: A path to a Python interpreter.
mode: Same as builtins.compile: "exec" if source consists of a sequence of
statements, "eval" if it consists of a single expression, or "single" if
it consists of a single interactive statement.
Returns:
The compiled pyc file as a binary string.
Raises:
CompileError: If we find a syntax error in the file.
IOError: If our compile script failed.
"""
if can_compile_bytecode_natively(python_version):
output = io.BytesIO()
compile_bytecode.compile_src_to_pyc(src, filename or "<>", output, mode)
bytecode = output.getvalue()
else:
tempfile_options = {"mode": "w", "suffix": ".py", "delete": False}
tempfile_options.update({"encoding": "utf-8"})
fi = compatible_tempfile.NamedTemporaryFile(**tempfile_options) # pylint: disable=consider-using-with
try:
fi.write(src)
fi.close()
# In order to be able to compile pyc files for a different Python version
# from the one we're running under, we spawn an external process.
# We pass -E to ignore the environment so that PYTHONPATH and
# sitecustomize on some people's systems don't mess with the interpreter.
cmd = python_exe + ["-E", "-", fi.name, filename or fi.name, mode]
compile_script_src = pytype_source_utils.load_binary_file(_COMPILE_SCRIPT)
with subprocess.Popen(
cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE
) as p:
bytecode, _ = p.communicate(compile_script_src)
assert p.poll() == 0, "Child process failed"
finally:
os.unlink(fi.name)
first_byte = bytecode[0]
if first_byte == 0: # compile OK
return bytecode[1:]
elif first_byte == 1: # compile error
code = bytecode[1:] # type: bytes
raise CompileError(utils.native_str(code))
else:
raise OSError("_compile.py produced invalid result")
def get_python_executable(version: tuple[int, ...]) -> list[str] | None:
"""Get a python executable corresponding to version.
Args:
version: The required python version
Returns:
- None: The current host interpreter can compile `version`
- [path-to-exe, args]: A valid python-`version` interpreter
Raises:
PythonNotFoundError: if no suitable interpreter is found.
"""
if can_compile_bytecode_natively(version):
# pytype does not need an exe for bytecode compilation. Abort early to
# avoid extracting a large unused exe into /tmp.
return None
for exe in _get_python_exes(version):
exe_version = _get_python_exe_version(exe)
if exe_version == version:
return exe
raise PythonNotFoundError()
def can_compile_bytecode_natively(python_version):
# Optimization: calling compile_bytecode directly is faster than spawning a
# subprocess and lets us avoid extracting a large Python executable into tmp.
# We can do this only when the host and target versions match.
return python_version == sys.version_info[:2]
def _get_python_exes(python_version) -> Iterable[list[str]]:
"""Find possible python executables to use.
Arguments:
python_version: the version tuple (e.g. (3, 7))
Yields:
The path to the executable
"""
if python_version in _CUSTOM_PYTHON_EXES:
yield [_path_to_custom_exe(_CUSTOM_PYTHON_EXES[python_version])]
return
for version in (utils.format_version(python_version), "3"):
if sys.platform == "win32":
python_exe = ["py", f"-{version}"]
else:
python_exe = [f"python{version}"]
yield python_exe
def _get_python_exe_version(python_exe: list[str]):
"""Determine the major and minor version of given Python executable.
Arguments:
python_exe: absolute path to the Python executable
Returns:
Version as (major, minor) tuple, or None if it could not be determined.
"""
try:
python_exe_version = subprocess.check_output(
python_exe + ["-V"], stderr=subprocess.STDOUT
).decode()
except (subprocess.CalledProcessError, FileNotFoundError):
return None
return _parse_exe_version_string(python_exe_version)
def _parse_exe_version_string(version_str):
"""Parse the version string of a Python executable.
Arguments:
version_str: Version string as emitted by running `PYTHON_EXE -V`
Returns:
Version as (major, minor) tuple, or None if it could not be determined.
"""
# match the major.minor part of the version string, ignore the micro part
matcher = re.search(r"Python (\d+\.\d+)\.\d+", version_str)
if matcher:
return utils.version_from_string(matcher.group(1))
else:
return None
def _load_data_file(path):
"""Get the contents of a data file."""
loader = globals().get("__loader__", None)
if loader:
# For an explanation of the args to loader.get_data, see
# https://www.python.org/dev/peps/pep-0302/#optional-extensions-to-the-importer-protocol
# https://docs.python.org/3/library/importlib.html#importlib.abc.ResourceLoader.get_data
return loader.get_data(path)
with open(path, "rb") as fi:
return fi.read()
def _path_to_custom_exe(relative_path):
"""Get the full path to a custom python exe in the pytype/ src directory."""
path = pytype_source_utils.get_full_path(relative_path)
if os.path.exists(path):
return path
data = _load_data_file(path)
with tempfile.NamedTemporaryFile(delete=False, suffix="python") as fi:
fi.write(data)
fi.close()
exe_file = fi.name
os.chmod(exe_file, 0o750)
atexit.register(lambda: os.unlink(exe_file))
return exe_file
|
CompileError
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/orm/collections.py
|
{
"start": 47913,
"end": 48002
}
|
class ____(Set[_T]):
"""An instrumented version of the built-in set."""
|
InstrumentedSet
|
python
|
doocs__leetcode
|
lcof/面试题56 - II. 数组中数字出现的次数 II/Solution.py
|
{
"start": 0,
"end": 262
}
|
class ____:
def singleNumber(self, nums: List[int]) -> int:
cnt = [0] * 32
for x in nums:
for i in range(32):
cnt[i] += x & 1
x >>= 1
return sum(1 << i for i in range(32) if cnt[i] % 3)
|
Solution
|
python
|
tensorflow__tensorflow
|
tensorflow/python/training/training.py
|
{
"start": 18555,
"end": 25641
}
|
class ____(typing.NamedTuple):
context: Dict[str, Feature]
feature_lists: FeatureLists
```
To parse a `SequenceExample` in TensorFlow refer to the
`tf.io.parse_sequence_example` function.
The `context` contains features which apply to the entire
example. The `feature_lists` contain a key, value map where each key is
associated with a repeated set of `tf.train.Features` (a `tf.train.FeatureList`).
A `FeatureList` represents the values of a feature identified by its key
over time / frames.
Below is a `SequenceExample` for a movie recommendation application recording a
sequence of ratings by a user. The time-independent features ("locale",
"age", "favorites") describing the user are part of the context. The sequence
of movies the user rated are part of the feature_lists. For each movie in the
sequence we have information on its name and actors and the user's rating.
This information is recorded in three separate `feature_list`s.
In the example below there are only two movies. All three `feature_list`s,
namely "movie_ratings", "movie_names", and "actors" have a feature value for
both movies. Note, that "actors" is itself a `bytes_list` with multiple
strings per movie.
```
context: {
feature: {
key : "locale"
value: {
bytes_list: {
value: [ "pt_BR" ]
}
}
}
feature: {
key : "age"
value: {
float_list: {
value: [ 19.0 ]
}
}
}
feature: {
key : "favorites"
value: {
bytes_list: {
value: [ "Majesty Rose", "Savannah Outen", "One Direction" ]
}
}
}
}
feature_lists: {
feature_list: {
key : "movie_ratings"
value: {
feature: {
float_list: {
value: [ 4.5 ]
}
}
feature: {
float_list: {
value: [ 5.0 ]
}
}
}
}
feature_list: {
key : "movie_names"
value: {
feature: {
bytes_list: {
value: [ "The Shawshank Redemption" ]
}
}
feature: {
bytes_list: {
value: [ "Fight Club" ]
}
}
}
}
feature_list: {
key : "actors"
value: {
feature: {
bytes_list: {
value: [ "Tim Robbins", "Morgan Freeman" ]
}
}
feature: {
bytes_list: {
value: [ "Brad Pitt", "Edward Norton", "Helena Bonham Carter" ]
}
}
}
}
}
```
A conformant `SequenceExample` data set obeys the following conventions:
`context`:
- All conformant context features `K` must obey the same conventions as
a conformant Example's features (see above).
`feature_lists`:
- A `FeatureList L` may be missing in an example; it is up to the
parser configuration to determine if this is allowed or considered
an empty list (zero length).
- If a `FeatureList L` exists, it may be empty (zero length).
- If a `FeatureList L` is non-empty, all features within the `FeatureList`
must have the same data type `T`. Even across `SequenceExample`s, the type `T`
of the `FeatureList` identified by the same key must be the same. An entry
without any values may serve as an empty feature.
- If a `FeatureList L` is non-empty, it is up to the parser configuration
to determine if all features within the `FeatureList` must
have the same size. The same holds for this `FeatureList` across multiple
examples.
- For sequence modeling ([example](https://github.com/tensorflow/nmt)), the
feature lists represent a sequence of frames. In this scenario, all
`FeatureList`s in a `SequenceExample` have the same number of `Feature`
messages, so that the i-th element in each `FeatureList` is part of the
i-th frame (or time step).
**Examples of conformant and non-conformant examples' `FeatureLists`:**
Conformant `FeatureLists`:
```
feature_lists: { feature_list: {
key: "movie_ratings"
value: { feature: { float_list: { value: [ 4.5 ] } }
feature: { float_list: { value: [ 5.0 ] } } }
} }
```
Non-conformant `FeatureLists` (mismatched types):
```
feature_lists: { feature_list: {
key: "movie_ratings"
value: { feature: { float_list: { value: [ 4.5 ] } }
feature: { int64_list: { value: [ 5 ] } } }
} }
```
Conditionally conformant `FeatureLists`, the parser configuration determines
if the feature sizes must match:
```
feature_lists: { feature_list: {
key: "movie_ratings"
value: { feature: { float_list: { value: [ 4.5 ] } }
feature: { float_list: { value: [ 5.0, 6.0 ] } } }
} }
```
**Examples of conformant and non-conformant `SequenceExample`s:**
Conformant pair of SequenceExample:
```
feature_lists: { feature_list: {
key: "movie_ratings"
value: { feature: { float_list: { value: [ 4.5 ] } }
feature: { float_list: { value: [ 5.0 ] } } }
} }
feature_lists: { feature_list: {
key: "movie_ratings"
value: { feature: { float_list: { value: [ 4.5 ] } }
feature: { float_list: { value: [ 5.0 ] } }
feature: { float_list: { value: [ 2.0 ] } } }
} }
```
Conformant pair of `SequenceExample`s:
```
feature_lists: { feature_list: {
key: "movie_ratings"
value: { feature: { float_list: { value: [ 4.5 ] } }
feature: { float_list: { value: [ 5.0 ] } } }
} }
feature_lists: { feature_list: {
key: "movie_ratings"
value: { }
} }
```
Conditionally conformant pair of `SequenceExample`s, the parser configuration
determines if the second `feature_lists` is consistent (zero-length) or
invalid (missing "movie_ratings"):
```
feature_lists: { feature_list: {
key: "movie_ratings"
value: { feature: { float_list: { value: [ 4.5 ] } }
feature: { float_list: { value: [ 5.0 ] } } }
} }
feature_lists: { }
```
Non-conformant pair of `SequenceExample`s (mismatched types):
```
feature_lists: { feature_list: {
key: "movie_ratings"
value: { feature: { float_list: { value: [ 4.5 ] } }
feature: { float_list: { value: [ 5.0 ] } } }
} }
feature_lists: { feature_list: {
key: "movie_ratings"
value: { feature: { int64_list: { value: [ 4 ] } }
feature: { int64_list: { value: [ 5 ] } }
feature: { int64_list: { value: [ 2 ] } } }
} }
```
Conditionally conformant pair of `SequenceExample`s; the parser configuration
determines if the feature sizes must match:
```
feature_lists: { feature_list: {
key: "movie_ratings"
value: { feature: { float_list: { value: [ 4.5 ] } }
feature: { float_list: { value: [ 5.0 ] } } }
} }
feature_lists: { feature_list: {
key: "movie_ratings"
value: { feature: { float_list: { value: [ 4.0 ] } }
feature: { float_list: { value: [ 5.0, 3.0 ] } }
} }
```
"""
|
SequenceExample
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_chart_errorbars07.py
|
{
"start": 315,
"end": 2240
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_errorbars07.xlsx")
self.ignore_elements = {"xl/charts/chart1.xml": ["<c:formatCode"]}
def test_create_file(self):
"""Test the creation of an XlsxWriter file with error bars."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "stock"})
date_format = workbook.add_format({"num_format": 14})
chart.axis_ids = [45470848, 45472768]
data = [
[39083, 39084, 39085, 39086, 39087],
[27.2, 25.03, 19.05, 20.34, 18.5],
[23.49, 19.55, 15.12, 17.84, 16.34],
[25.45, 23.05, 17.32, 20.45, 17.34],
]
for row in range(5):
worksheet.write(row, 0, data[0][row], date_format)
worksheet.write(row, 1, data[1][row])
worksheet.write(row, 2, data[2][row])
worksheet.write(row, 3, data[3][row])
worksheet.set_column("A:D", 11)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$B$1:$B$5",
"y_error_bars": {"type": "standard_error"},
}
)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$C$1:$C$5",
"y_error_bars": {"type": "standard_error"},
}
)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$D$1:$D$5",
"y_error_bars": {"type": "standard_error"},
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
keras-team__keras
|
keras/src/optimizers/lion_test.py
|
{
"start": 178,
"end": 3782
}
|
class ____(testing.TestCase):
def test_invalid_beta_1(self):
with self.assertRaisesRegex(
ValueError,
"Argument `beta_1` must be in the \\[0, 1\\] range. Otherwise, the "
"optimizer degenerates to SignSGD. Received: beta_1=-0.1.",
):
Lion(beta_1=-0.1)
with self.assertRaisesRegex(
ValueError,
"Argument `beta_1` must be in the \\[0, 1\\] range. Otherwise, the "
"optimizer degenerates to SignSGD. Received: beta_1=0.0.",
):
Lion(beta_1=0.0)
with self.assertRaisesRegex(
ValueError,
"Argument `beta_1` must be in the \\[0, 1\\] range. Otherwise, the "
"optimizer degenerates to SignSGD. Received: beta_1=1.1.",
):
Lion(beta_1=1.1)
def test_config(self):
optimizer = Lion(
learning_rate=0.5,
beta_1=0.5,
beta_2=0.67,
)
self.run_class_serialization_test(optimizer)
def test_single_step(self):
optimizer = Lion(learning_rate=0.5)
grads = ops.array([1.0, 6.0, 7.0, 2.0])
vars = backend.Variable([1.0, 2.0, 3.0, 4.0])
optimizer.apply_gradients(zip([grads], [vars]))
self.assertAllClose(vars, [0.5, 1.5, 2.5, 3.5], rtol=1e-4, atol=1e-4)
def test_weight_decay(self):
grads, var1, var2, var3 = (
ops.zeros(()),
backend.Variable(2.0),
backend.Variable(2.0, name="exclude"),
backend.Variable(2.0),
)
optimizer_1 = Lion(learning_rate=1.0, weight_decay=0.004)
optimizer_1.apply_gradients(zip([grads], [var1]))
optimizer_2 = Lion(learning_rate=1.0, weight_decay=0.004)
optimizer_2.exclude_from_weight_decay(var_names=["exclude"])
optimizer_2.apply_gradients(zip([grads, grads], [var1, var2]))
optimizer_3 = Lion(learning_rate=1.0, weight_decay=0.004)
optimizer_3.exclude_from_weight_decay(var_list=[var3])
optimizer_3.apply_gradients(zip([grads, grads], [var1, var3]))
self.assertAlmostEqual(var1.numpy(), 1.9760959, decimal=6)
self.assertAlmostEqual(var2.numpy(), 2.0, decimal=6)
self.assertAlmostEqual(var3.numpy(), 2.0, decimal=6)
def test_correctness_with_golden(self):
optimizer = Lion()
x = backend.Variable(np.ones([10]))
grads = ops.arange(0.1, 1.1, 0.1)
first_grads = ops.full((10,), 0.01)
golden = np.tile(
[[0.999], [0.998], [0.997], [0.996], [0.995]],
(1, 10),
)
optimizer.apply_gradients(zip([first_grads], [x]))
for i in range(5):
self.assertAllClose(x, golden[i], rtol=5e-4, atol=5e-4)
optimizer.apply_gradients(zip([grads], [x]))
def test_clip_norm(self):
optimizer = Lion(clipnorm=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2])
def test_clip_value(self):
optimizer = Lion(clipvalue=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [1.0, 1.0])
@pytest.mark.requires_trainable_backend
def test_ema(self):
# TODO: test correctness
model = keras.Sequential([keras.layers.Dense(10)])
model.compile(optimizer=Lion(use_ema=True), loss="mse")
x = keras.ops.zeros((1, 5))
y = keras.ops.zeros((1, 10))
model.fit(x, y)
|
LionTest
|
python
|
readthedocs__readthedocs.org
|
readthedocs/projects/migrations/0059_migrate_null_rank.py
|
{
"start": 321,
"end": 543
}
|
class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0058_update_timestamp_fields"),
]
operations = [
migrations.RunPython(forwards_func),
]
|
Migration
|
python
|
fastai__fastai
|
fastai/interpret.py
|
{
"start": 4482,
"end": 7727
}
|
class ____(Interpretation):
"Interpretation methods for classification models."
def __init__(self,
learn:Learner,
dl:DataLoader, # `DataLoader` to run inference over
losses:TensorBase, # Losses calculated from `dl`
act=None # Activation function for prediction
):
super().__init__(learn, dl, losses, act)
self.vocab = self.dl.vocab
if is_listy(self.vocab): self.vocab = self.vocab[-1]
def confusion_matrix(self):
"Confusion matrix as an `np.ndarray`."
x = torch.arange(0, len(self.vocab))
_,targs,decoded = self.learn.get_preds(dl=self.dl, with_decoded=True, with_preds=True,
with_targs=True, act=self.act)
d,t = flatten_check(decoded, targs)
cm = ((d==x[:,None]) & (t==x[:,None,None])).long().sum(2)
return to_np(cm)
def plot_confusion_matrix(self,
normalize:bool=False, # Whether to normalize occurrences
title:str='Confusion matrix', # Title of plot
cmap:str="Blues", # Colormap from matplotlib
norm_dec:int=2, # Decimal places for normalized occurrences
plot_txt:bool=True, # Display occurrence in matrix
**kwargs
):
"Plot the confusion matrix, with `title` and using `cmap`."
# This function is mainly copied from the sklearn docs
cm = self.confusion_matrix()
if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
fig = plt.figure(**kwargs)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
tick_marks = np.arange(len(self.vocab))
plt.xticks(tick_marks, self.vocab, rotation=90)
plt.yticks(tick_marks, self.vocab, rotation=0)
if plot_txt:
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
coeff = f'{cm[i, j]:.{norm_dec}f}' if normalize else f'{cm[i, j]}'
plt.text(j, i, coeff, horizontalalignment="center", verticalalignment="center", color="white"
if cm[i, j] > thresh else "black")
ax = fig.gca()
ax.set_ylim(len(self.vocab)-.5,-.5)
plt.tight_layout()
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.grid(False)
def most_confused(self, min_val=1):
"Sorted descending largest non-diagonal entries of confusion matrix (actual, predicted, # occurrences"
cm = self.confusion_matrix()
np.fill_diagonal(cm, 0)
res = [(self.vocab[i],self.vocab[j],cm[i,j]) for i,j in zip(*np.where(cm>=min_val))]
return sorted(res, key=itemgetter(2), reverse=True)
def print_classification_report(self):
"Print scikit-learn classification report"
_,targs,decoded = self.learn.get_preds(dl=self.dl, with_decoded=True, with_preds=True,
with_targs=True, act=self.act)
d,t = flatten_check(decoded, targs)
names = [str(v) for v in self.vocab]
print(skm.classification_report(t, d, labels=list(self.vocab.o2i.values()), target_names=names))
# %% ../nbs/20_interpret.ipynb 27
|
ClassificationInterpretation
|
python
|
ray-project__ray
|
python/ray/serve/tests/test_metrics_2.py
|
{
"start": 1325,
"end": 1545
}
|
class ____:
def __init__(self, deployment_name: str, app_name: str):
self.handle = DeploymentHandle(deployment_name, app_name)
async def call(self, *args):
await self.handle.remote(*args)
|
CallActor
|
python
|
django__django
|
tests/nested_foreign_keys/models.py
|
{
"start": 353,
"end": 450
}
|
class ____(Event):
movie = models.ForeignKey(Movie, models.SET_NULL, null=True)
|
ScreeningNullFK
|
python
|
huggingface__transformers
|
src/transformers/models/hiera/modeling_hiera.py
|
{
"start": 44352,
"end": 47273
}
|
class ____(nn.Module):
def __init__(self, config: HieraConfig):
super().__init__()
self.mask_unit_spatial_shape_final = [
i // s ** (config.num_query_pool) for i, s in zip(config.masked_unit_size, config.query_stride)
]
self.stage_dimensions = [
int(config.embed_dim * config.embed_dim_multiplier**i) for i in range(len(config.depths))
]
current_masked_unit_size = config.masked_unit_size
self.multi_scale_fusion_heads = nn.ModuleList()
for idx in range(config.num_query_pool):
kernel = [i // s for i, s in zip(current_masked_unit_size, self.mask_unit_spatial_shape_final)]
current_masked_unit_size = [i // s for i, s in zip(current_masked_unit_size, config.query_stride)]
self.multi_scale_fusion_heads.append(
nn.Conv2d(
self.stage_dimensions[idx],
self.stage_dimensions[-1],
kernel_size=kernel,
stride=kernel,
)
)
self.multi_scale_fusion_heads.append(nn.Identity())
def apply_fusion_head(self, head: nn.Module, hidden_states: torch.Tensor) -> torch.Tensor:
if isinstance(head, nn.Identity):
return hidden_states
batch_size, num_mask_units, mask_unit_height, mask_unit_width, hidden_size = hidden_states.shape
# From: [batch_size, num_mask_units, mask_unit_height, mask_unit_width, hidden_size]
# To: head([batch_size * num_mask_units, hidden_size, mask_unit_height, mask_unit_width])
hidden_states = hidden_states.reshape(
batch_size * num_mask_units, mask_unit_height, mask_unit_width, hidden_size
)
hidden_states = hidden_states.permute(0, 3, 1, 2)
hidden_states = head(hidden_states)
# Restore original layout
hidden_states = hidden_states.permute(0, 2, 3, 1)
mask_unit_height_final, mask_unit_width_final, hidden_size = hidden_states.shape[1:]
hidden_states = hidden_states.reshape(
batch_size, num_mask_units, mask_unit_height_final, mask_unit_width_final, hidden_size
)
return hidden_states
def forward(self, feature_maps: list[torch.Tensor]) -> torch.Tensor:
# Multi-scale fusion
hidden_states = 0.0
for head, feature_map in zip(self.multi_scale_fusion_heads, feature_maps):
hidden_states = hidden_states + self.apply_fusion_head(head, feature_map)
return hidden_states
@auto_docstring(
custom_intro="""
The Hiera Model transformer with the decoder on top for self-supervised pre-training.
<Tip>
Note that we provide a script to pre-train this model on custom data in our [examples
directory](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining).
</Tip>
"""
)
|
HieraMultiScaleHead
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_20/models.py
|
{
"start": 111263,
"end": 112710
}
|
class ____(Request):
"""
Get the list of frameworks used in the company models
:param projects: The list of projects which models will be analyzed. If not
passed or empty then all the company and public models will be analyzed
:type projects: Sequence[str]
"""
_service = "models"
_action = "get_frameworks"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"projects": {
"description": "The list of projects which models will be analyzed. If not passed or empty then all the company and public models will be analyzed",
"items": {"type": "string"},
"type": ["array", "null"],
}
},
"type": "object",
}
def __init__(self, projects: Optional[List[str]] = None, **kwargs: Any) -> None:
super(GetFrameworksRequest, self).__init__(**kwargs)
self.projects = projects
@schema_property("projects")
def projects(self) -> Optional[List[str]]:
return self._property_projects
@projects.setter
def projects(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_projects = None
return
self.assert_isinstance(value, "projects", (list, tuple))
self.assert_isinstance(value, "projects", six.string_types, is_array=True)
self._property_projects = value
|
GetFrameworksRequest
|
python
|
django__django
|
tests/m2m_signals/models.py
|
{
"start": 148,
"end": 393
}
|
class ____(models.Model):
name = models.CharField(max_length=20)
default_parts = models.ManyToManyField(Part)
optional_parts = models.ManyToManyField(Part, related_name="cars_optional")
class Meta:
ordering = ("name",)
|
Car
|
python
|
getsentry__sentry
|
src/sentry/auth/services/auth/impl.py
|
{
"start": 8185,
"end": 9217
}
|
class ____:
d: Mapping[str, str | bytes | None]
_accessed: set[str]
def __init__(self, **d: Any):
self.d = d
self._accessed = set()
@property
def accessed(self) -> bool:
return bool(self._accessed)
def __getitem__(self, item: str) -> str | bytes:
self._accessed.add(item)
result = self.d[item]
if result is None:
raise KeyError(f"Key '{item!r}' does not exist")
return result
def __contains__(self, item: str) -> bool:
return self.d.get(item, None) is not None
def get(self, key: str, default: str | bytes | None = None) -> str | bytes | None:
try:
return self[key]
except KeyError:
return default
def promote_request_rpc_user(request: Any) -> User:
if not hasattr(request, "_promoted_user"):
setattr(request, "_promoted_user", User.objects.get(id=request.user.id))
return request._promoted_user
promote_request_api_user = promote_request_rpc_user
|
FakeRequestDict
|
python
|
weaviate__weaviate-python-client
|
weaviate/collections/classes/config.py
|
{
"start": 60180,
"end": 60364
}
|
class ____(_ConfigBase):
factor: int
async_enabled: bool
deletion_strategy: ReplicationDeletionStrategy
ReplicationConfig = _ReplicationConfig
@dataclass
|
_ReplicationConfig
|
python
|
cython__cython
|
Cython/Debugger/libcython.py
|
{
"start": 48248,
"end": 48916
}
|
class ____(CyCName):
"""
Get the value of a Cython variable.
"""
@libpython.dont_suppress_errors
@require_cython_frame
@gdb_function_value_to_unicode
def invoke(self, cyname, frame=None):
globals_dict = self.get_cython_globals_dict()
cython_function = self.get_cython_function(frame)
if self.is_initialized(cython_function, cyname):
cname = super().invoke(cyname, frame=frame)
return gdb.parse_and_eval(cname)
elif cyname in globals_dict:
return globals_dict[cyname]._gdbval
else:
raise gdb.GdbError("Variable %s is not initialized." % cyname)
|
CyCValue
|
python
|
ray-project__ray
|
python/ray/serve/tests/test_handle_streaming.py
|
{
"start": 9263,
"end": 10020
}
|
class ____:
def test_app_handle(self, deployment: Deployment):
h = serve.run(deployment.bind()).options(stream=True)
gen = h.remote(5)
assert list(gen) == list(range(5))
def test_deployment_handle(self, deployment: Deployment):
@serve.deployment
class Delegate:
def __init__(self, f: DeploymentHandle):
self._f = f.options(stream=True)
async def __call__(self):
gen = self._f.remote(5)
assert [result async for result in gen] == list(range(5))
h = serve.run(Delegate.bind(deployment.bind()))
h.remote().result()
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
|
TestGeneratorFunctionDeployment
|
python
|
nedbat__coveragepy
|
tests/test_concurrency.py
|
{
"start": 23871,
"end": 28210
}
|
class ____(CoverageTest):
"""Tests of our handling of SIGTERM."""
@pytest.mark.parametrize("sigterm", [False, True])
def test_sigterm_multiprocessing_saves_data(self, sigterm: bool) -> None:
# A terminated process should save its coverage data.
self.make_file(
"clobbered.py",
"""\
import multiprocessing
import time
def subproc(x):
if x.value == 3:
print("THREE", flush=True) # line 6, missed
else:
print("NOT THREE", flush=True)
x.value = 0
time.sleep(60)
if __name__ == "__main__":
print("START", flush=True)
x = multiprocessing.Value("L", 1)
proc = multiprocessing.Process(target=subproc, args=(x,))
proc.start()
while x.value != 0:
time.sleep(.05)
proc.terminate()
print("END", flush=True)
""",
)
self.make_file(
".coveragerc",
"""\
[run]
parallel = True
concurrency = multiprocessing
"""
+ ("sigterm = true" if sigterm else ""),
)
out = self.run_command("coverage run clobbered.py")
# Under Linux, things go wrong. Does that matter?
if env.LINUX and "assert self._collectors" in out:
lines = out.splitlines(True)
out = "".join(lines[:3])
assert out == "START\nNOT THREE\nEND\n"
self.run_command("coverage combine")
out = self.run_command("coverage report -m")
if sigterm:
expected = "clobbered.py 17 1 94% 6"
else:
expected = "clobbered.py 17 5 71% 5-10"
assert self.squeezed_lines(out)[2] == expected
def test_sigterm_threading_saves_data(self) -> None:
# A terminated process should save its coverage data.
self.make_file(
"handler.py",
"""\
import os, signal
print("START", flush=True)
print("SIGTERM", flush=True)
os.kill(os.getpid(), signal.SIGTERM)
print("NOT HERE", flush=True)
""",
)
self.make_file(
".coveragerc",
"""\
[run]
# The default concurrency option.
concurrency = thread
sigterm = true
""",
)
status, out = self.run_command_status("coverage run handler.py")
assert status != 0
out_lines = out.splitlines()
assert len(out_lines) in [2, 3]
assert out_lines[:2] == ["START", "SIGTERM"]
if len(out_lines) == 3:
assert out_lines[2] == "Terminated"
out = self.run_command("coverage report -m")
expected = "handler.py 5 1 80% 6"
assert self.squeezed_lines(out)[2] == expected
def test_sigterm_still_runs(self) -> None:
# A terminated process still runs its own SIGTERM handler.
self.make_file(
"handler.py",
"""\
import multiprocessing
import signal
import time
def subproc(x):
print("START", flush=True)
def on_sigterm(signum, frame):
print("SIGTERM", flush=True)
signal.signal(signal.SIGTERM, on_sigterm)
x.value = 0
try:
time.sleep(.1)
except OSError: # This happens on PyPy3.11 on Mac
pass
print("END", flush=True)
if __name__ == "__main__":
x = multiprocessing.Value("L", 1)
proc = multiprocessing.Process(target=subproc, args=(x,))
proc.start()
while x.value != 0:
time.sleep(.02)
proc.terminate()
""",
)
self.make_file(
".coveragerc",
"""\
[run]
parallel = True
concurrency = multiprocessing
sigterm = True
""",
)
out = self.run_command("coverage run handler.py")
assert out == "START\nSIGTERM\nEND\n"
|
SigtermTest
|
python
|
pytorch__pytorch
|
torch/_dynamo/device_interface.py
|
{
"start": 1359,
"end": 5606
}
|
class ____:
"""
This is a simple device runtime interface for Inductor. It enables custom
backends to be integrated with Inductor in a device-agnostic semantic.
"""
class device:
def __new__(cls, device: torch.types.Device) -> Any:
raise NotImplementedError
class Event:
def __new__(cls, *args: Any, **kwargs: Any) -> Any:
raise NotImplementedError(
"Event should be inherited from torch.Event, otherwise, it couldn't be captured by dynamo."
)
class Stream:
def __new__(cls, *args: Any, **kwargs: Any) -> Any:
raise NotImplementedError(
"Stream should be inherited from torch.Stream, otherwise, it couldn't be captured by dynamo."
)
class Worker:
"""
Worker API to query device properties that will work in multi processing
workers that cannot use the GPU APIs (due to processing fork() and
initialization time issues). Properties are recorded in the main process
before we fork the workers.
"""
@staticmethod
def set_device(device: int) -> None:
raise NotImplementedError
@staticmethod
def current_device() -> int:
raise NotImplementedError
@staticmethod
def get_device_properties(device: torch.types.Device = None) -> Any:
raise NotImplementedError
@staticmethod
def current_device() -> int:
raise NotImplementedError
@staticmethod
def set_device(device: torch.types.Device) -> None:
raise NotImplementedError
@staticmethod
def maybe_exchange_device(device: int) -> int:
raise NotImplementedError
@staticmethod
def exchange_device(device: int) -> int:
raise NotImplementedError
@staticmethod
def device_count() -> int:
raise NotImplementedError
@staticmethod
def is_available() -> bool:
raise NotImplementedError
@staticmethod
def stream(stream: torch.Stream) -> Any:
raise NotImplementedError
@staticmethod
def current_stream() -> torch.Stream:
raise NotImplementedError
@staticmethod
def set_stream(stream: torch.Stream) -> None:
raise NotImplementedError
@staticmethod
def _set_stream_by_id(stream_id: int, device_index: int, device_type: int) -> None:
raise NotImplementedError
@staticmethod
def get_raw_stream(device_idx: int) -> int:
raise NotImplementedError
@staticmethod
def synchronize(device: torch.types.Device = None) -> None:
raise NotImplementedError
@classmethod
def get_device_properties(cls, device: torch.types.Device = None) -> Any:
return cls.Worker.get_device_properties(device)
@staticmethod
def get_compute_capability(device: torch.types.Device = None) -> Any:
raise NotImplementedError
@staticmethod
def is_bf16_supported(including_emulation: bool = False) -> bool:
raise NotImplementedError
@classmethod
def is_dtype_supported(
cls, dtype: torch.dtype, including_emulation: bool = False
) -> bool:
return dtype != torch.bfloat16 or cls.is_bf16_supported(including_emulation)
@staticmethod
def memory_allocated(device: torch.types.Device = None) -> int:
raise NotImplementedError
@staticmethod
def is_triton_capable(device: torch.types.Device = None) -> bool:
"""
Returns True if the device has Triton support, False otherwise, even if
the appropriate Triton backend is not available.
"""
return False
@classmethod
def raise_if_triton_unavailable(cls, device: torch.types.Device = None) -> None:
"""
Raises a `RuntimeError` with the appropriate human-readable instructions
to resolve the issue if Triton is not available for the given device, or
the default device if `device` is `None`.
The caller should ensure the presence of the 'triton' package before
calling this method.
"""
if not cls.is_triton_capable():
raise RuntimeError("This device is not capable of supporting Triton")
|
DeviceInterface
|
python
|
pyinstaller__pyinstaller
|
PyInstaller/__main__.py
|
{
"start": 1355,
"end": 2501
}
|
class ____(argparse.HelpFormatter):
def _split_lines(self, text, width):
if text.startswith('R|'):
# The underlying implementation of ``RawTextHelpFormatter._split_lines`` invokes this; mimic it.
return text[2:].splitlines()
else:
# Invoke the usual formatter.
return super()._split_lines(text, width)
def run_makespec(filenames, **opts):
# Split pathex by using the path separator
temppaths = opts['pathex'][:]
pathex = opts['pathex'] = []
for p in temppaths:
pathex.extend(p.split(os.pathsep))
import PyInstaller.building.makespec
spec_file = PyInstaller.building.makespec.main(filenames, **opts)
logger.info('wrote %s' % spec_file)
return spec_file
def run_build(pyi_config, spec_file, **kwargs):
import PyInstaller.building.build_main
PyInstaller.building.build_main.main(pyi_config, spec_file, **kwargs)
def __add_options(parser):
parser.add_argument(
'-v',
'--version',
action='version',
version=__version__,
help='Show program version info and exit.',
)
|
_SmartFormatter
|
python
|
django__django
|
tests/migration_test_data_persistence/tests.py
|
{
"start": 514,
"end": 1163
}
|
class ____(TransactionTestCase):
"""
Data loaded in migrations is available during class setup if
TransactionTestCase.serialized_rollback = True.
"""
available_apps = ["migration_test_data_persistence"]
serialized_rollback = True
@classmethod
def setUpClass(cls):
# Simulate another TransactionTestCase having just torn down.
call_command("flush", verbosity=0, interactive=False, allow_cascade=True)
super().setUpClass()
cls.book = Book.objects.first()
def test_data_available_in_class_setup(self):
self.assertIsInstance(self.book, Book)
|
MigrationDataPersistenceClassSetup
|
python
|
GoogleCloudPlatform__python-docs-samples
|
firestore/cloud-client/distributed_counters.py
|
{
"start": 1010,
"end": 2612
}
|
class ____:
"""
A counter stores a collection of shards which are
summed to return a total count. This allows for more
frequent incrementing than a single document.
"""
def __init__(self, num_shards):
self._num_shards = num_shards
# [END firestore_solution_sharded_counter_custom_type]
# [START firestore_solution_sharded_counter_create]
def init_counter(self, doc_ref):
"""
Create a given number of shards as
subcollection of specified document.
"""
col_ref = doc_ref.collection("shards")
# Initialize each shard with count=0
for num in range(self._num_shards):
shard = Shard()
col_ref.document(str(num)).set(shard.to_dict())
# [END firestore_solution_sharded_counter_create]
# [START firestore_solution_sharded_counter_increment]
def increment_counter(self, doc_ref):
"""Increment a randomly picked shard."""
doc_id = random.randint(0, self._num_shards - 1)
shard_ref = doc_ref.collection("shards").document(str(doc_id))
return shard_ref.update({"count": firestore.Increment(1)})
# [END firestore_solution_sharded_counter_increment]
# [START firestore_solution_sharded_counter_get]
def get_count(self, doc_ref):
"""Return a total count across all shards."""
total = 0
shards = doc_ref.collection("shards").list_documents()
for shard in shards:
total += shard.get().to_dict().get("count", 0)
return total
# [END firestore_solution_sharded_counter_get]
|
Counter
|
python
|
networkx__networkx
|
networkx/algorithms/tests/test_regular.py
|
{
"start": 2437,
"end": 2951
}
|
class ____:
def test_is_k_regular1(self):
g = gen.cycle_graph(4)
assert reg.is_k_regular(g, 2)
assert not reg.is_k_regular(g, 3)
def test_is_k_regular2(self):
g = gen.complete_graph(5)
assert reg.is_k_regular(g, 4)
assert not reg.is_k_regular(g, 3)
assert not reg.is_k_regular(g, 6)
def test_is_k_regular3(self):
g = gen.lollipop_graph(5, 5)
assert not reg.is_k_regular(g, 5)
assert not reg.is_k_regular(g, 6)
|
TestIsKRegular
|
python
|
django__django
|
django/db/backends/postgresql/features.py
|
{
"start": 251,
"end": 6578
}
|
class ____(BaseDatabaseFeatures):
minimum_database_version = (15,)
allows_group_by_selected_pks = True
can_return_columns_from_insert = True
can_return_rows_from_bulk_insert = True
can_return_rows_from_update = True
has_real_datatype = True
has_native_uuid_field = True
has_native_duration_field = True
has_native_json_field = True
can_defer_constraint_checks = True
has_select_for_update = True
has_select_for_update_nowait = True
has_select_for_update_of = True
has_select_for_update_skip_locked = True
has_select_for_no_key_update = True
can_release_savepoints = True
supports_comments = True
supports_tablespaces = True
supports_transactions = True
can_introspect_materialized_views = True
can_distinct_on_fields = True
can_rollback_ddl = True
schema_editor_uses_clientside_param_binding = True
supports_combined_alters = True
nulls_order_largest = True
closed_cursor_error_class = InterfaceError
greatest_least_ignores_nulls = True
can_clone_databases = True
supports_temporal_subtraction = True
supports_slicing_ordering_in_compound = True
create_test_procedure_without_params_sql = """
CREATE FUNCTION test_procedure () RETURNS void AS $$
DECLARE
V_I INTEGER;
BEGIN
V_I := 1;
END;
$$ LANGUAGE plpgsql;"""
create_test_procedure_with_int_param_sql = """
CREATE FUNCTION test_procedure (P_I INTEGER) RETURNS void AS $$
DECLARE
V_I INTEGER;
BEGIN
V_I := P_I;
END;
$$ LANGUAGE plpgsql;"""
requires_casted_case_in_updates = True
supports_over_clause = True
supports_frame_exclusion = True
only_supports_unbounded_with_preceding_and_following = True
supports_aggregate_filter_clause = True
supports_aggregate_order_by_clause = True
supported_explain_formats = {"JSON", "TEXT", "XML", "YAML"}
supports_deferrable_unique_constraints = True
has_json_operators = True
json_key_contains_list_matching_requires_list = True
supports_update_conflicts = True
supports_update_conflicts_with_target = True
supports_covering_indexes = True
supports_stored_generated_columns = True
supports_nulls_distinct_unique_constraints = True
supports_no_precision_decimalfield = True
can_rename_index = True
test_collations = {
"deterministic": "C",
"non_default": "sv-x-icu",
"swedish_ci": "sv-x-icu",
"virtual": "sv-x-icu",
}
test_now_utc_template = "STATEMENT_TIMESTAMP() AT TIME ZONE 'UTC'"
insert_test_table_with_defaults = "INSERT INTO {} DEFAULT VALUES"
@cached_property
def django_test_skips(self):
skips = {
"opclasses are PostgreSQL only.": {
"indexes.tests.SchemaIndexesNotPostgreSQLTests."
"test_create_index_ignores_opclasses",
},
"PostgreSQL requires casting to text.": {
"lookup.tests.LookupTests.test_textfield_exact_null",
},
}
if self.connection.settings_dict["OPTIONS"].get("pool"):
skips.update(
{
"Pool does implicit health checks": {
"backends.base.test_base.ConnectionHealthChecksTests."
"test_health_checks_enabled",
"backends.base.test_base.ConnectionHealthChecksTests."
"test_set_autocommit_health_checks_enabled",
},
}
)
if self.uses_server_side_binding:
skips.update(
{
"The actual query cannot be determined for server side bindings": {
"backends.base.test_base.ExecuteWrapperTests."
"test_wrapper_debug",
}
},
)
return skips
@cached_property
def django_test_expected_failures(self):
expected_failures = set()
if self.uses_server_side_binding:
expected_failures.update(
{
# Parameters passed to expressions in SELECT and GROUP BY
# clauses are not recognized as the same values when using
# server-side binding cursors (#34255).
"aggregation.tests.AggregateTestCase."
"test_group_by_nested_expression_with_params",
}
)
if not is_psycopg3:
expected_failures.update(
{
# operator does not exist: bigint[] = integer[]
"postgres_tests.test_array.TestQuerying.test_gt",
"postgres_tests.test_array.TestQuerying.test_in",
"postgres_tests.test_array.TestQuerying.test_lt",
}
)
return expected_failures
@cached_property
def uses_server_side_binding(self):
options = self.connection.settings_dict["OPTIONS"]
return is_psycopg3 and options.get("server_side_binding") is True
@cached_property
def prohibits_null_characters_in_text_exception(self):
if is_psycopg3:
return DataError, "PostgreSQL text fields cannot contain NUL (0x00) bytes"
else:
return ValueError, "A string literal cannot contain NUL (0x00) characters."
@cached_property
def introspected_field_types(self):
return {
**super().introspected_field_types,
"PositiveBigIntegerField": "BigIntegerField",
"PositiveIntegerField": "IntegerField",
"PositiveSmallIntegerField": "SmallIntegerField",
}
@cached_property
def is_postgresql_16(self):
return self.connection.pg_version >= 160000
@cached_property
def is_postgresql_17(self):
return self.connection.pg_version >= 170000
@cached_property
def is_postgresql_18(self):
return self.connection.pg_version >= 180000
supports_unlimited_charfield = True
supports_any_value = property(operator.attrgetter("is_postgresql_16"))
supports_virtual_generated_columns = property(
operator.attrgetter("is_postgresql_18")
)
|
DatabaseFeatures
|
python
|
pypa__virtualenv
|
src/virtualenv/run/plugin/creators.py
|
{
"start": 555,
"end": 3626
}
|
class ____(ComponentBuilder):
def __init__(self, interpreter, parser) -> None:
creators, self.key_to_meta, self.describe, self.builtin_key = self.for_interpreter(interpreter)
super().__init__(interpreter, parser, "creator", creators)
@classmethod
def for_interpreter(cls, interpreter):
key_to_class, key_to_meta, builtin_key, describe = OrderedDict(), {}, None, None
errors = defaultdict(list)
for key, creator_class in cls.options("virtualenv.create").items():
if key == "builtin":
msg = "builtin creator is a reserved name"
raise RuntimeError(msg)
meta = creator_class.can_create(interpreter)
if meta:
if meta.error:
errors[meta.error].append(creator_class)
else:
if "builtin" not in key_to_class and issubclass(creator_class, VirtualenvBuiltin):
builtin_key = key
key_to_class["builtin"] = creator_class
key_to_meta["builtin"] = meta
key_to_class[key] = creator_class
key_to_meta[key] = meta
if describe is None and issubclass(creator_class, Describe) and creator_class.can_describe(interpreter):
describe = creator_class
if not key_to_meta:
if errors:
rows = [f"{k} for creators {', '.join(i.__name__ for i in v)}" for k, v in errors.items()]
raise RuntimeError("\n".join(rows))
msg = f"No virtualenv implementation for {interpreter}"
raise RuntimeError(msg)
return CreatorInfo(
key_to_class=key_to_class,
key_to_meta=key_to_meta,
describe=describe,
builtin_key=builtin_key,
)
def add_selector_arg_parse(self, name, choices):
# prefer the built-in venv if present, otherwise fallback to first defined type
choices = sorted(choices, key=lambda a: 0 if a == "builtin" else 1)
default_value = self._get_default(choices)
self.parser.add_argument(
f"--{name}",
choices=choices,
default=default_value,
required=False,
help=f"create environment via{'' if self.builtin_key is None else f' (builtin = {self.builtin_key})'}",
)
@staticmethod
def _get_default(choices):
return next(iter(choices))
def populate_selected_argparse(self, selected, app_data):
self.parser.description = f"options for {self.name} {selected}"
self._impl_class.add_parser_arguments(self.parser, self.interpreter, self.key_to_meta[selected], app_data)
def create(self, options):
options.meta = self.key_to_meta[getattr(options, self.name)]
if not issubclass(self._impl_class, Describe):
options.describe = self.describe(options, self.interpreter)
return super().create(options)
__all__ = [
"CreatorInfo",
"CreatorSelector",
]
|
CreatorSelector
|
python
|
scikit-learn__scikit-learn
|
examples/developing_estimators/sklearn_is_fitted.py
|
{
"start": 1536,
"end": 2607
}
|
class ____(BaseEstimator, ClassifierMixin):
def __init__(self, parameter=1):
self.parameter = parameter
def fit(self, X, y):
"""
Fit the estimator to the training data.
"""
self.classes_ = sorted(set(y))
# Custom attribute to track if the estimator is fitted
self._is_fitted = True
return self
def predict(self, X):
"""
Perform Predictions
If the estimator is not fitted, then raise NotFittedError
"""
check_is_fitted(self)
# Perform prediction logic
predictions = [self.classes_[0]] * len(X)
return predictions
def score(self, X, y):
"""
Calculate Score
If the estimator is not fitted, then raise NotFittedError
"""
check_is_fitted(self)
# Perform scoring logic
return 0.5
def __sklearn_is_fitted__(self):
"""
Check fitted status and return a Boolean value.
"""
return hasattr(self, "_is_fitted") and self._is_fitted
|
CustomEstimator
|
python
|
ray-project__ray
|
python/ray/_common/tests/test_wait_for_condition.py
|
{
"start": 8771,
"end": 10671
}
|
class ____:
"""Tests for edge cases and boundary conditions."""
def test_zero_timeout(self):
"""Test behavior with zero timeout."""
def slow_condition():
time.sleep(0.1)
return True
with pytest.raises(RuntimeError):
wait_for_condition(slow_condition, timeout=0, retry_interval_ms=50)
@pytest.mark.asyncio
async def test_async_zero_timeout(self):
"""Test async behavior with zero timeout."""
async def slow_condition():
await asyncio.sleep(0.1)
return True
with pytest.raises(RuntimeError):
await async_wait_for_condition(
slow_condition, timeout=0, retry_interval_ms=50
)
def test_very_small_retry_interval(self):
"""Test with very small retry interval."""
counter = {"value": 0}
def condition():
counter["value"] += 1
return counter["value"] >= 5
start_time = time.time()
wait_for_condition(condition, timeout=1, retry_interval_ms=1)
elapsed = time.time() - start_time
# Should complete quickly due to small retry interval
assert elapsed < 0.5
assert counter["value"] >= 5
@pytest.mark.asyncio
async def test_async_very_small_retry_interval(self):
"""Test async version with very small retry interval."""
counter = {"value": 0}
def condition():
counter["value"] += 1
return counter["value"] >= 5
start_time = time.time()
await async_wait_for_condition(condition, timeout=1, retry_interval_ms=1)
elapsed = time.time() - start_time
# Should complete quickly due to small retry interval
assert elapsed < 0.5
assert counter["value"] >= 5
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
|
TestEdgeCases
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_chart_table02.py
|
{
"start": 315,
"end": 1549
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_table02.xlsx")
def test_create_file(self):
"""Test XlsxWriter chart axis table properties."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [61354368, 61355904]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
chart.set_table(
{
"vertical": False,
"horizontal": False,
"outline": False,
"show_keys": True,
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
airbytehq__airbyte
|
airbyte-ci/connectors/metadata_service/lib/metadata_service/models/generated/ConnectorMetadataDefinitionV0.py
|
{
"start": 7186,
"end": 7471
}
|
class ____(BaseModel):
class Config:
extra = Extra.allow
usage: Optional[Union[str, Literal["low", "medium", "high"]]] = None
sync_success_rate: Optional[Union[str, Literal["low", "medium", "high"]]] = None
connector_version: Optional[str] = None
|
ConnectorMetric
|
python
|
pexpect__pexpect
|
pexpect/pxssh.py
|
{
"start": 1266,
"end": 1831
}
|
class ____(ExceptionPexpect):
'''Raised for pxssh exceptions.
'''
if sys.version_info > (3, 0):
from shlex import quote
else:
_find_unsafe = re.compile(r'[^\w@%+=:,./-]').search
def quote(s):
"""Return a shell-escaped version of the string *s*."""
if not s:
return "''"
if _find_unsafe(s) is None:
return s
# use single quotes, and put single quotes into double quotes
# the string $'b is then quoted as '$'"'"'b'
return "'" + s.replace("'", "'\"'\"'") + "'"
|
ExceptionPxssh
|
python
|
allegroai__clearml
|
clearml/utilities/pigar/unpack.py
|
{
"start": 234,
"end": 3870
}
|
class ____(object):
"""Archive provides a consistent interface for unpacking
compressed file.
"""
def __init__(self, filename: str, fileobj: Any) -> None:
self._filename = filename
self._fileobj = fileobj
self._file = None
self._names = None
self._read = None
@property
def filename(self) -> str:
return self._filename
@property
def names(self) -> List[str]:
"""If name list is not required, do not get it."""
if self._file is None:
self._prepare()
if not hasattr(self, "_namelist"):
self._namelist = self._names()
return self._namelist
def close(self) -> None:
"""Close file object."""
if self._file is not None:
self._file.close()
if hasattr(self, "_namelist"):
del self._namelist
self._filename = self._fileobj = None
self._file = self._names = self._read = None
def read(self, filename: str) -> bytes:
"""Read one file from archive."""
if self._file is None:
self._prepare()
return self._read(filename)
def unpack(self, to_path: str) -> None:
"""Unpack compressed files to path."""
if self._file is None:
self._prepare()
self._safe_extractall(to_path)
def _prepare(self) -> None:
if self._filename.endswith((".tar.gz", ".tar.bz2", ".tar.xz")):
self._prepare_tarball()
# An .egg file is actually just a .zip file
# with a different extension, .whl too.
elif self._filename.endswith((".zip", ".egg", ".whl")):
self._prepare_zip()
else:
raise ValueError("unreadable: {0}".format(self._filename))
def _safe_extractall(self, to_path: str = ".") -> None:
unsafe = []
for name in self.names:
if not self.is_safe(name):
unsafe.append(name)
if unsafe:
raise ValueError("unsafe to unpack: {}".format(unsafe))
self._file.extractall(to_path)
def _prepare_zip(self) -> None:
self._file = zipfile.ZipFile(self._fileobj)
self._names = self._file.namelist
self._read = self._file.read
def _prepare_tarball(self) -> None:
# tarfile has no read method
def _read(filename: str) -> bytes:
f = self._file.extractfile(filename)
return f.read()
self._file = tarfile.open(mode="r:*", fileobj=self._fileobj)
self._names = self._file.getnames
self._read = _read
def is_safe(self, filename: str) -> bool:
return not (
filename.startswith(("/", "\\"))
or (len(filename) > 1 and filename[1] == ":" and filename[0] in string.ascii_letter)
or re.search(r"[.][.][/\\]", filename)
)
def __enter__(self) -> "Archive":
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
self.close()
def top_level(url: str, data: bytes) -> List[str]:
"""Read top level names from compressed file."""
sb = io.BytesIO(data)
txt = None
with Archive(url, sb) as archive:
file = None
for name in archive.names:
if name.lower().endswith("top_level.txt"):
file = name
break
if file:
txt = archive.read(file).decode("utf-8")
sb.close()
return [name.replace("/", ".") for name in txt.splitlines()] if txt else []
|
Archive
|
python
|
huggingface__transformers
|
src/transformers/trainer_pt_utils.py
|
{
"start": 23424,
"end": 25327
}
|
class ____(Sampler):
"""
Sampler that shards batches between several processes. Dispatches indices batch by batch: on 2 processes with batch
size 4, the first two batches are `[0, 1, 2, 3, 4, 5, 6, 7]` and `[8, 9, 10, 11, 12, 13, 14, 15]`, which shard into
`[0, 1, 2, 3]` and `[8, 9, 10, 11]` for GPU-0 and `[4, 5, 6, 7]` and `[12, 13, 14, 15]` for GPU-1.
The sampler thus yields `[0, 1, 2, 3, 8, 9, 10, 11]` on GPU-0 and `[4, 5, 6, 7, 12, 13, 14, 15]` on GPU-1.
"""
def __init__(
self,
dataset: Dataset,
batch_size: int = 1,
drop_last: bool = False,
num_processes: int = 1,
process_index: int = 0,
):
self.dataset = dataset
self.batch_size = batch_size
self.drop_last = drop_last
self.num_processes = num_processes
self.process_index = process_index
self.total_batch_size = total_batch_size = batch_size * num_processes
num_batches = len(dataset) // total_batch_size if drop_last else math.ceil(len(dataset) / total_batch_size)
self.total_num_samples = num_batches * total_batch_size
def __iter__(self):
indices = list(range(len(self.dataset)))
# Add extra samples to make it evenly divisible. While loop is there in the edge case we have a tiny dataset
# and it needs to be done several times.
while len(indices) < self.total_num_samples:
indices += indices[: (self.total_num_samples - len(indices))]
result = []
for batch_start in range(self.batch_size * self.process_index, self.total_num_samples, self.total_batch_size):
result += indices[batch_start : batch_start + self.batch_size]
return iter(result)
def __len__(self):
# Each shard only sees a fraction of total_num_samples.
return self.total_num_samples // self.num_processes
|
ShardSampler
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_23/projects.py
|
{
"start": 45767,
"end": 51150
}
|
class ____(Response):
"""
Response of projects.delete endpoint.
:param deleted: Number of projects deleted (0 or 1)
:type deleted: int
:param disassociated_tasks: Number of tasks disassociated from the deleted
project
:type disassociated_tasks: int
:param urls: The urls of the files that were uploaded by the project tasks and
models. Returned if the 'delete_contents' was set to 'true'
:type urls: Urls
:param deleted_models: Number of models deleted
:type deleted_models: int
:param deleted_tasks: Number of tasks deleted
:type deleted_tasks: int
"""
_service = "projects"
_action = "delete"
_version = "2.23"
_schema = {
"definitions": {
"urls": {
"properties": {
"artifact_urls": {
"items": {"type": "string"},
"type": ["array", "null"],
},
"event_urls": {
"items": {"type": "string"},
"type": ["array", "null"],
},
"model_urls": {
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
}
},
"properties": {
"deleted": {
"description": "Number of projects deleted (0 or 1)",
"type": ["integer", "null"],
},
"deleted_models": {
"description": "Number of models deleted",
"type": ["integer", "null"],
},
"deleted_tasks": {
"description": "Number of tasks deleted",
"type": ["integer", "null"],
},
"disassociated_tasks": {
"description": "Number of tasks disassociated from the deleted project",
"type": ["integer", "null"],
},
"urls": {
"description": "The urls of the files that were uploaded by the project tasks and models. Returned if the 'delete_contents' was set to 'true'",
"oneOf": [{"$ref": "#/definitions/urls"}, {"type": "null"}],
},
},
"type": "object",
}
def __init__(
self,
deleted: Optional[int] = None,
disassociated_tasks: Optional[int] = None,
urls: Any = None,
deleted_models: Optional[int] = None,
deleted_tasks: Optional[int] = None,
**kwargs: Any
) -> None:
super(DeleteResponse, self).__init__(**kwargs)
self.deleted = deleted
self.disassociated_tasks = disassociated_tasks
self.urls = urls
self.deleted_models = deleted_models
self.deleted_tasks = deleted_tasks
@schema_property("deleted")
def deleted(self) -> Optional[int]:
return self._property_deleted
@deleted.setter
def deleted(self, value: Optional[int]) -> None:
if value is None:
self._property_deleted = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "deleted", six.integer_types)
self._property_deleted = value
@schema_property("disassociated_tasks")
def disassociated_tasks(self) -> Optional[int]:
return self._property_disassociated_tasks
@disassociated_tasks.setter
def disassociated_tasks(self, value: Optional[int]) -> None:
if value is None:
self._property_disassociated_tasks = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "disassociated_tasks", six.integer_types)
self._property_disassociated_tasks = value
@schema_property("urls")
def urls(self) -> Any:
return self._property_urls
@urls.setter
def urls(self, value: Any) -> None:
if value is None:
self._property_urls = None
return
if isinstance(value, dict):
value = Urls.from_dict(value)
else:
self.assert_isinstance(value, "urls", Urls)
self._property_urls = value
@schema_property("deleted_models")
def deleted_models(self) -> Optional[int]:
return self._property_deleted_models
@deleted_models.setter
def deleted_models(self, value: Optional[int]) -> None:
if value is None:
self._property_deleted_models = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "deleted_models", six.integer_types)
self._property_deleted_models = value
@schema_property("deleted_tasks")
def deleted_tasks(self) -> Optional[int]:
return self._property_deleted_tasks
@deleted_tasks.setter
def deleted_tasks(self, value: Optional[int]) -> None:
if value is None:
self._property_deleted_tasks = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "deleted_tasks", six.integer_types)
self._property_deleted_tasks = value
|
DeleteResponse
|
python
|
conda__conda
|
conda/common/configuration.py
|
{
"start": 38976,
"end": 41018
}
|
class ____(Parameter):
"""Parameter type for a Configuration class that holds a sequence (i.e. list) of Parameters."""
_type = tuple
def __init__(self, element_type, default=(), validation=None, string_delimiter=","):
"""
Args:
element_type (Parameter): The Parameter type that is held in the sequence.
default (Sequence): default value, empty tuple if not given.
string_delimiter (str): separation string used to parse string into sequence.
"""
self._element_type = element_type
self.string_delimiter = string_delimiter
super().__init__(default, validation)
def get_all_matches(self, name, names, instance):
# this is necessary to handle argparse `action="append"`, which can't be set to a
# default value of NULL
# it also config settings like `channels: ~`
matches, exceptions = super().get_all_matches(name, names, instance)
matches = tuple(m for m in matches if m._raw_value is not None)
return matches, exceptions
def load(self, name, match):
value = match.value(self)
if value is None:
return SequenceLoadedParameter(
name,
(),
self._element_type,
match.keyflag(),
(),
validation=self._validation,
)
if not isiterable(value):
raise InvalidTypeError(
name, value, match.source, value.__class__.__name__, self._type.__name__
)
loaded_sequence = []
for child_value in value:
loaded_child_value = self._element_type.load(name, child_value)
loaded_sequence.append(loaded_child_value)
return SequenceLoadedParameter(
name,
tuple(loaded_sequence),
self._element_type,
match.keyflag(),
match.valueflags(self._element_type),
validation=self._validation,
)
|
SequenceParameter
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/hooks/test_datafusion.py
|
{
"start": 23395,
"end": 28212
}
|
class ____:
@pytest.mark.asyncio
@mock.patch(HOOK_STR.format("DataFusionAsyncHook._get_link"))
async def test_async_get_pipeline_should_execute_successfully(self, mocked_link, hook_async):
await hook_async.get_pipeline(
instance_url=INSTANCE_URL,
namespace=NAMESPACE,
pipeline_name=PIPELINE_NAME,
pipeline_id=PIPELINE_ID,
session=session,
)
mocked_link.assert_awaited_once_with(url=CONSTRUCTED_PIPELINE_URL, session=session)
@pytest.mark.asyncio
@pytest.mark.parametrize(
("pipeline_type", "constructed_url"),
[
(DataFusionPipelineType.BATCH, CONSTRUCTED_PIPELINE_URL_GET),
(DataFusionPipelineType.STREAM, CONSTRUCTED_PIPELINE_STREAM_URL_GET),
],
)
@mock.patch(HOOK_STR.format("DataFusionAsyncHook.get_pipeline"))
async def test_async_get_pipeline_status_completed_should_execute_successfully(
self, mocked_get, hook_async, pipeline_type, constructed_url
):
response = aiohttp.ClientResponse(
"get",
URL(constructed_url),
request_info=mock.Mock(),
writer=mock.Mock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=mock.Mock(),
session=None,
)
response.status = 200
mocked_get.return_value = response
mocked_get.return_value._headers = {"Authorization": "some-token"}
mocked_get.return_value._body = b'{"status": "COMPLETED"}'
pipeline_status = await hook_async.get_pipeline_status(
pipeline_name=PIPELINE_NAME,
instance_url=INSTANCE_URL,
pipeline_id=PIPELINE_ID,
namespace=NAMESPACE,
)
mocked_get.assert_awaited_once()
assert pipeline_status == "success"
@pytest.mark.asyncio
@pytest.mark.parametrize(
("pipeline_type", "constructed_url"),
[
(DataFusionPipelineType.BATCH, CONSTRUCTED_PIPELINE_URL_GET),
(DataFusionPipelineType.STREAM, CONSTRUCTED_PIPELINE_STREAM_URL_GET),
],
)
@mock.patch(HOOK_STR.format("DataFusionAsyncHook.get_pipeline"))
async def test_async_get_pipeline_status_running_should_execute_successfully(
self, mocked_get, hook_async, pipeline_type, constructed_url
):
"""Assets that the DataFusionAsyncHook returns pending response when job is still in running state"""
response = aiohttp.ClientResponse(
"get",
URL(constructed_url),
request_info=mock.Mock(),
writer=mock.Mock(),
continue100=None,
timer=TimerNoop(),
traces=[],
loop=mock.Mock(),
session=None,
)
response.status = 200
mocked_get.return_value = response
mocked_get.return_value._headers = {"Authorization": "some-token"}
mocked_get.return_value._body = b'{"status": "RUNNING"}'
pipeline_status = await hook_async.get_pipeline_status(
pipeline_name=PIPELINE_NAME,
instance_url=INSTANCE_URL,
pipeline_id=PIPELINE_ID,
pipeline_type=pipeline_type,
namespace=NAMESPACE,
)
mocked_get.assert_awaited_once()
assert pipeline_status == "pending"
@pytest.mark.asyncio
@mock.patch(HOOK_STR.format("DataFusionAsyncHook.get_pipeline"))
async def test_async_get_pipeline_status_os_error_should_execute_successfully(
self, mocked_get, hook_async
):
"""Assets that the DataFusionAsyncHook returns a pending response when OSError is raised"""
mocked_get.side_effect = OSError()
pipeline_status = await hook_async.get_pipeline_status(
pipeline_name=PIPELINE_NAME,
instance_url=INSTANCE_URL,
pipeline_id=PIPELINE_ID,
namespace=NAMESPACE,
)
mocked_get.assert_awaited_once()
assert pipeline_status == "pending"
@pytest.mark.asyncio
@mock.patch(HOOK_STR.format("DataFusionAsyncHook.get_pipeline"))
async def test_async_get_pipeline_status_exception_should_execute_successfully(
self, mocked_get, hook_async, caplog
):
"""Assets that the logging is done correctly when DataFusionAsyncHook raises Exception"""
caplog.set_level(logging.INFO)
mocked_get.side_effect = Exception()
await hook_async.get_pipeline_status(
pipeline_name=PIPELINE_NAME,
instance_url=INSTANCE_URL,
pipeline_id=PIPELINE_ID,
namespace=NAMESPACE,
)
mocked_get.assert_awaited_once()
assert "Retrieving pipeline status finished with errors..." in caplog.text
|
TestDataFusionHookAsynch
|
python
|
sympy__sympy
|
sympy/physics/quantum/boson.py
|
{
"start": 3145,
"end": 3915
}
|
class ____(Ket):
"""Fock state ket for a bosonic mode.
Parameters
==========
n : Number
The Fock state number.
"""
def __new__(cls, n):
return Ket.__new__(cls, n)
@property
def n(self):
return self.label[0]
@classmethod
def dual_class(self):
return BosonFockBra
@classmethod
def _eval_hilbert_space(cls, label):
return FockSpace()
def _eval_innerproduct_BosonFockBra(self, bra, **hints):
return KroneckerDelta(self.n, bra.n)
def _apply_from_right_to_BosonOp(self, op, **options):
if op.is_annihilation:
return sqrt(self.n) * BosonFockKet(self.n - 1)
else:
return sqrt(self.n + 1) * BosonFockKet(self.n + 1)
|
BosonFockKet
|
python
|
numpy__numpy
|
numpy/f2py/tests/test_return_logical.py
|
{
"start": 1385,
"end": 2048
}
|
class ____(TestReturnLogical):
sources = [
util.getpath("tests", "src", "return_logical", "foo77.f"),
util.getpath("tests", "src", "return_logical", "foo90.f90"),
]
@pytest.mark.slow
@pytest.mark.parametrize("name", ["t0", "t1", "t2", "t4", "s0", "s1", "s2", "s4"])
def test_all_f77(self, name):
self.check_function(getattr(self.module, name))
@pytest.mark.slow
@pytest.mark.parametrize("name",
["t0", "t1", "t2", "t4", "t8", "s0", "s1", "s2", "s4", "s8"])
def test_all_f90(self, name):
self.check_function(getattr(self.module.f90_return_logical, name))
|
TestFReturnLogical
|
python
|
python-excel__xlwt
|
xlwt/BIFFRecords.py
|
{
"start": 89116,
"end": 92846
}
|
class ____(BiffRecord):
"""
This record is part of a Link Table. It contains the name and the token
array of an internal defined name. Token arrays of defined names
contain tokens with aberrant token classes.
Record NAME, BIFF5/BIFF7:
Offset Size Contents
0 2 Option flags, see below
2 1 Keyboard shortcut (only for command macro names, see below)
3 1 Length of the name (character count, ln)
4 2 Size of the formula data (sz)
6 2 0 = Global name, otherwise index to EXTERNSHEET record (one-based)
8 2 0 = Global name, otherwise index to sheet (one-based)
10 1 Length of menu text (character count, lm)
11 1 Length of description text (character count, ld)
12 1 Length of help topic text (character count, lh)
13 1 Length of status bar text (character count, ls)
14 ln Character array of the name
14+ln sz Formula data (RPN token array without size field, 4)
14+ln+sz lm Character array of menu text
var. ld Character array of description text
var. lh Character array of help topic text
var. ls Character array of status bar text
Record NAME, BIFF8:
Offset Size Contents
0 2 Option flags, see below
2 1 Keyboard shortcut (only for command macro names, see below)
3 1 Length of the name (character count, ln)
4 2 Size of the formula data (sz)
6 2 Not used
8 2 0 = Global name, otherwise index to sheet (one-based)
10 1 Length of menu text (character count, lm)
11 1 Length of description text (character count, ld)
12 1 Length of help topic text (character count, lh)
13 1 Length of status bar text (character count, ls)
14 var. Name (Unicode string without length field, 3.4)
var. sz Formula data (RPN token array without size field, 4)
[var.] var. (optional, only if lm > 0) Menu text (Unicode string without length field, 3.4)
[var.] var. (optional, only if ld > 0) Description text (Unicode string without length field, 3.4)
[var.] var. (optional, only if lh > 0) Help topic text (Unicode string without length field, 3.4)
[var.] var. (optional, only if ls > 0) Status bar text (Unicode string without length field, 3.4)
"""
_REC_ID = 0x0018
def __init__(self, options, keyboard_shortcut, name, sheet_index, rpn, menu_text='', desc_text='', help_text='', status_text=''):
if type(name) == int:
uname = chr(name)
else:
uname = upack1(name)[1:]
uname_len = len(uname)
#~ self._rec_data = pack('<HBBHHHBBBB%ds%ds' % (uname_len, len(rpn)), options, keyboard_shortcut, uname_len, len(rpn), 0x0000, sheet_index, len(menu_text), len(desc_text), len(help_text), len(status_text), uname, rpn) + menu_text + desc_text + help_text + status_text
self._rec_data = pack('<HBBHHHBBBBB%ds%ds' % (uname_len, len(rpn)), options, keyboard_shortcut, uname_len, len(rpn), 0x0000, sheet_index, 0x00, len(menu_text), len(desc_text), len(help_text), len(status_text), uname, rpn) + menu_text + desc_text + help_text + status_text
# Excel (both 2003 and 2007) don't like refs
# split over a record boundary, which is what the
# standard BiffRecord.get method does.
# 8224 max data bytes in a BIFF record
# 6 bytes per ref
# 1370 = floor((8224 - 2) / 6.0) max refs in a record
_maxRefPerRecord = 1370
|
NameRecord
|
python
|
pypa__pipenv
|
pipenv/vendor/tomlkit/items.py
|
{
"start": 8629,
"end": 8888
}
|
class ____(Enum):
"""
The type of a Key.
Keys can be bare (unquoted), or quoted using basic ("), or literal (')
quotes following the same escaping rules as single-line StringType.
"""
Bare = ""
Basic = '"'
Literal = "'"
|
KeyType
|
python
|
fabric__fabric
|
fabric/testing/base.py
|
{
"start": 17914,
"end": 19833
}
|
class ____:
"""
Class managing mocked SFTP remote state.
Used in start/stop fashion in eg doctests; wrapped in the SFTP fixtures in
conftest.py for main use.
.. versionadded:: 2.1
"""
def __init__(self, autostart=True):
if autostart:
self.start()
def start(self):
# Set up mocks
self.os_patcher = patch("fabric.transfer.os")
self.client_patcher = patch("fabric.connection.SSHClient")
self.path_patcher = patch("fabric.transfer.Path")
mock_os = self.os_patcher.start()
Client = self.client_patcher.start()
self.path_patcher.start()
sftp = Client.return_value.open_sftp.return_value
# Handle common filepath massage actions; tests will assume these.
def fake_abspath(path):
# Run normpath to avoid tests not seeing abspath wrinkles (like
# trailing slash chomping)
return "/local/{}".format(os.path.normpath(path))
mock_os.path.abspath.side_effect = fake_abspath
sftp.getcwd.return_value = "/remote"
# Ensure stat st_mode is a real number; Python 3's stat.S_IMODE doesn't
# like just being handed a MagicMock?
fake_mode = 0o644 # arbitrary real-ish mode
sftp.stat.return_value.st_mode = fake_mode
mock_os.stat.return_value.st_mode = fake_mode
# Not super clear to me why the 'wraps' functionality in mock isn't
# working for this :( reinstate a bunch of os(.path) so it still works
mock_os.sep = os.sep
for name in ("basename", "split", "join", "normpath"):
getattr(mock_os.path, name).side_effect = getattr(os.path, name)
# Return the sftp and OS mocks for use by decorator use case.
return sftp, mock_os
def stop(self):
self.os_patcher.stop()
self.client_patcher.stop()
self.path_patcher.stop()
|
MockSFTP
|
python
|
openai__openai-python
|
src/openai/types/realtime/realtime_tracing_config_param.py
|
{
"start": 276,
"end": 840
}
|
class ____(TypedDict, total=False):
group_id: str
"""
The group id to attach to this trace to enable filtering and grouping in the
Traces Dashboard.
"""
metadata: object
"""
The arbitrary metadata to attach to this trace to enable filtering in the Traces
Dashboard.
"""
workflow_name: str
"""The name of the workflow to attach to this trace.
This is used to name the trace in the Traces Dashboard.
"""
RealtimeTracingConfigParam: TypeAlias = Union[Literal["auto"], TracingConfiguration]
|
TracingConfiguration
|
python
|
mlflow__mlflow
|
mlflow/entities/span_event.py
|
{
"start": 2934,
"end": 3382
}
|
class ____(json.JSONEncoder):
"""
Custom encoder to handle json serialization.
"""
def default(self, o):
try:
return super().default(o)
except TypeError:
# convert datetime to string format by default
if isinstance(o, datetime):
return o.isoformat()
# convert object direct to string to avoid error in serialization
return str(o)
|
CustomEncoder
|
python
|
marshmallow-code__marshmallow
|
tests/test_schema.py
|
{
"start": 35776,
"end": 38519
}
|
class ____:
@pytest.fixture
def schema(self):
class GrandChildSchema(Schema):
str_dump_only = fields.String()
str_load_only = fields.String()
str_regular = fields.String()
class ChildSchema(Schema):
str_dump_only = fields.String()
str_load_only = fields.String()
str_regular = fields.String()
grand_child = fields.Nested(GrandChildSchema, unknown=EXCLUDE)
class ParentSchema(Schema):
str_dump_only = fields.String()
str_load_only = fields.String()
str_regular = fields.String()
child = fields.Nested(ChildSchema, unknown=EXCLUDE)
return ParentSchema(
dump_only=(
"str_dump_only",
"child.str_dump_only",
"child.grand_child.str_dump_only",
),
load_only=(
"str_load_only",
"child.str_load_only",
"child.grand_child.str_load_only",
),
)
@pytest.fixture
def data(self):
return dict(
str_dump_only="Dump Only",
str_load_only="Load Only",
str_regular="Regular String",
child=dict(
str_dump_only="Dump Only",
str_load_only="Load Only",
str_regular="Regular String",
grand_child=dict(
str_dump_only="Dump Only",
str_load_only="Load Only",
str_regular="Regular String",
),
),
)
def test_load_only(self, schema, data):
result = schema.dump(data)
assert "str_load_only" not in result
assert "str_dump_only" in result
assert "str_regular" in result
child = result["child"]
assert "str_load_only" not in child
assert "str_dump_only" in child
assert "str_regular" in child
grand_child = child["grand_child"]
assert "str_load_only" not in grand_child
assert "str_dump_only" in grand_child
assert "str_regular" in grand_child
def test_dump_only(self, schema, data):
result = schema.load(data, unknown=EXCLUDE)
assert "str_dump_only" not in result
assert "str_load_only" in result
assert "str_regular" in result
child = result["child"]
assert "str_dump_only" not in child
assert "str_load_only" in child
assert "str_regular" in child
grand_child = child["grand_child"]
assert "str_dump_only" not in grand_child
assert "str_load_only" in grand_child
assert "str_regular" in grand_child
|
TestDeeplyNestedLoadOnly
|
python
|
falconry__falcon
|
tests/test_compiled_router.py
|
{
"start": 2697,
"end": 3548
}
|
class ____:
def on_get(self, req, res):
pass
def on_get_other(self, req, res):
pass
def test_cannot_replace_compiled():
opt = CompiledRouterOptions()
with pytest.raises(AttributeError, match='Cannot set'):
opt.converters = {}
with pytest.raises(AttributeError, match='object has no attribute'):
opt.other = 123
def test_converter_not_subclass():
class X:
def convert(self, v):
return v
router = CompiledRouter()
router.options.converters['x'] = X
router.add_route('/foo/{bar:x}', MockResource())
res = router.find('/foo/bar')
assert res is not None
assert res[2] == {'bar': 'bar'}
assert router.find('/foo/bar/bar') is None
def test_base_classes():
with pytest.raises(NotImplementedError):
compiled._CxChild().src(42)
|
MockResource
|
python
|
encode__django-rest-framework
|
tests/test_renderers.py
|
{
"start": 25176,
"end": 29646
}
|
class ____(URLPatternsTestCase):
class ExampleViewSet(ViewSet):
def list(self, request):
return Response()
@action(detail=False, name="Extra list action")
def list_action(self, request):
raise NotImplementedError
class AuthExampleViewSet(ExampleViewSet):
permission_classes = [permissions.IsAuthenticated]
class SimpleSerializer(serializers.Serializer):
name = serializers.CharField()
router = SimpleRouter()
router.register('examples', ExampleViewSet, basename='example')
router.register('auth-examples', AuthExampleViewSet, basename='auth-example')
urlpatterns = [path('api/', include(router.urls))]
def setUp(self):
self.renderer = BrowsableAPIRenderer()
self.renderer.accepted_media_type = ''
self.renderer.renderer_context = {}
def test_render_form_for_serializer(self):
with self.subTest('Serializer'):
serializer = BrowsableAPIRendererTests.SimpleSerializer(data={'name': 'Name'})
form = self.renderer.render_form_for_serializer(serializer)
assert isinstance(form, str), 'Must return form for serializer'
with self.subTest('ListSerializer'):
list_serializer = BrowsableAPIRendererTests.SimpleSerializer(data=[{'name': 'Name'}], many=True)
form = self.renderer.render_form_for_serializer(list_serializer)
assert form is None, 'Must not return form for list serializer'
def test_get_raw_data_form(self):
with self.subTest('Serializer'):
class DummyGenericViewsetLike(APIView):
def get_serializer(self, **kwargs):
return BrowsableAPIRendererTests.SimpleSerializer(**kwargs)
def get(self, request):
response = Response()
response.view = self
return response
post = get
view = DummyGenericViewsetLike.as_view()
_request = APIRequestFactory().get('/')
request = Request(_request)
response = view(_request)
view = response.view
raw_data_form = self.renderer.get_raw_data_form({'name': 'Name'}, view, 'POST', request)
assert raw_data_form['_content'].initial == '{\n "name": ""\n}'
with self.subTest('ListSerializer'):
class DummyGenericViewsetLike(APIView):
def get_serializer(self, **kwargs):
return BrowsableAPIRendererTests.SimpleSerializer(many=True, **kwargs) # returns ListSerializer
def get(self, request):
response = Response()
response.view = self
return response
post = get
view = DummyGenericViewsetLike.as_view()
_request = APIRequestFactory().get('/')
request = Request(_request)
response = view(_request)
view = response.view
raw_data_form = self.renderer.get_raw_data_form([{'name': 'Name'}], view, 'POST', request)
assert raw_data_form['_content'].initial == '[\n {\n "name": ""\n }\n]'
def test_get_description_returns_empty_string_for_401_and_403_statuses(self):
assert self.renderer.get_description({}, status_code=401) == ''
assert self.renderer.get_description({}, status_code=403) == ''
def test_get_filter_form_returns_none_if_data_is_not_list_instance(self):
class DummyView:
get_queryset = None
filter_backends = None
result = self.renderer.get_filter_form(data='not list',
view=DummyView(), request={})
assert result is None
def test_extra_actions_dropdown(self):
resp = self.client.get('/api/examples/', HTTP_ACCEPT='text/html')
assert 'id="extra-actions-menu"' in resp.content.decode()
assert '/api/examples/list_action/' in resp.content.decode()
assert '>Extra list action<' in resp.content.decode()
def test_extra_actions_dropdown_not_authed(self):
resp = self.client.get('/api/unauth-examples/', HTTP_ACCEPT='text/html')
assert 'id="extra-actions-menu"' not in resp.content.decode()
assert '/api/examples/list_action/' not in resp.content.decode()
assert '>Extra list action<' not in resp.content.decode()
|
BrowsableAPIRendererTests
|
python
|
prompt-toolkit__python-prompt-toolkit
|
src/prompt_toolkit/completion/base.py
|
{
"start": 639,
"end": 4183
}
|
class ____:
"""
:param text: The new string that will be inserted into the document.
:param start_position: Position relative to the cursor_position where the
new text will start. The text will be inserted between the
start_position and the original cursor position.
:param display: (optional string or formatted text) If the completion has
to be displayed differently in the completion menu.
:param display_meta: (Optional string or formatted text) Meta information
about the completion, e.g. the path or source where it's coming from.
This can also be a callable that returns a string.
:param style: Style string.
:param selected_style: Style string, used for a selected completion.
This can override the `style` parameter.
"""
def __init__(
self,
text: str,
start_position: int = 0,
display: AnyFormattedText | None = None,
display_meta: AnyFormattedText | None = None,
style: str = "",
selected_style: str = "",
) -> None:
from prompt_toolkit.formatted_text import to_formatted_text
self.text = text
self.start_position = start_position
self._display_meta = display_meta
if display is None:
display = text
self.display = to_formatted_text(display)
self.style = style
self.selected_style = selected_style
assert self.start_position <= 0
def __repr__(self) -> str:
if isinstance(self.display, str) and self.display == self.text:
return f"{self.__class__.__name__}(text={self.text!r}, start_position={self.start_position!r})"
else:
return f"{self.__class__.__name__}(text={self.text!r}, start_position={self.start_position!r}, display={self.display!r})"
def __eq__(self, other: object) -> bool:
if not isinstance(other, Completion):
return False
return (
self.text == other.text
and self.start_position == other.start_position
and self.display == other.display
and self._display_meta == other._display_meta
)
def __hash__(self) -> int:
return hash((self.text, self.start_position, self.display, self._display_meta))
@property
def display_text(self) -> str:
"The 'display' field as plain text."
from prompt_toolkit.formatted_text import fragment_list_to_text
return fragment_list_to_text(self.display)
@property
def display_meta(self) -> StyleAndTextTuples:
"Return meta-text. (This is lazy when using a callable)."
from prompt_toolkit.formatted_text import to_formatted_text
return to_formatted_text(self._display_meta or "")
@property
def display_meta_text(self) -> str:
"The 'meta' field as plain text."
from prompt_toolkit.formatted_text import fragment_list_to_text
return fragment_list_to_text(self.display_meta)
def new_completion_from_position(self, position: int) -> Completion:
"""
(Only for internal use!)
Get a new completion by splitting this one. Used by `Application` when
it needs to have a list of new completions after inserting the common
prefix.
"""
assert position - self.start_position >= 0
return Completion(
text=self.text[position - self.start_position :],
display=self.display,
display_meta=self._display_meta,
)
|
Completion
|
python
|
django-extensions__django-extensions
|
tests/testapp/models.py
|
{
"start": 14447,
"end": 14700
}
|
class ____(models.Model):
field_to_update = models.BooleanField(default=True)
modified = ModificationDateTimeField()
update_modified = False
class Meta:
app_label = "django_extensions"
|
DisabledUpdateModelModificationDateTimeField
|
python
|
bokeh__bokeh
|
src/bokeh/models/selections.py
|
{
"start": 4084,
"end": 5042
}
|
class ____(SelectionPolicy):
'''
When a data source is shared between multiple renderers, selecting a point on
from any renderer will cause that row in the data source to be selected. The
selection is made from the union of hit test results from all renderers.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
UnionRenderers
|
python
|
facelessuser__pymdown-extensions
|
pymdownx/blocks/caption.py
|
{
"start": 7709,
"end": 12139
}
|
class ____(Block):
"""Figure captions."""
NAME = ''
PREFIX = ''
CLASSES = ''
ARGUMENT = None
OPTIONS = {
'type': ('', type_html_identifier)
}
def on_init(self):
"""Initialize."""
self.auto = self.config['auto']
self.prepend = self.config['prepend']
self.caption = None
self.fig_num = ''
self.level = ''
self.classes = self.CLASSES.split()
def on_validate(self, parent):
"""Handle on validate event."""
argument = self.argument
if argument:
if argument.startswith('>'):
self.prepend = False
argument = argument[1:].lstrip()
elif argument.startswith('<'):
self.prepend = True
argument = argument[1:].lstrip()
m = RE_FIG_NUM.match(argument)
if m:
if m.group(1):
self.level = m.group(2)
else:
self.fig_num = m.group(2)
argument = argument[m.end():].lstrip()
if argument:
tokens = argument.split()
if len(tokens) > 1:
return False
id_token = tokens[0]
if not id_token.startswith('#'):
return False
identifier = id_token[1:]
try:
identifier = type_html_identifier(identifier)
except ValueError:
return False
attrs = dict(self.options['attrs'])
if 'id' not in attrs:
attrs['id'] = identifier
self.options['attrs'] = attrs
return True
return True
def on_create(self, parent):
"""Create the element."""
# Find sibling to add caption to.
fig = None
child = None
children = list(parent)
if children:
child = children[-1]
# Do we have a figure with no caption?
if child.tag == 'figure':
fig = child
for c in list(child):
if c.tag == 'figcaption':
fig = None
break
# Create a new figure if sibling is not a figure or already has a caption.
# Add sibling to the new figure.
if fig is None:
attrib = {} if not self.classes else {'class': ' '.join(self.classes)}
fig = etree.SubElement(parent, 'figure', attrib)
if child is not None:
fig.append(child)
parent.remove(child)
# Add classes to existing figure
elif self.CLASSES:
classes = fig.attrib.get('class', '').strip()
if classes:
class_list = classes.split()
for c in self.classes:
if c not in class_list:
classes += " " + c
else:
classes = ' '.join(self.classes)
fig.attrib['class'] = classes
if self.auto:
fig.attrib['__figure_type'] = self.NAME
if self.level:
fig.attrib['__figure_level'] = self.level
if self.fig_num:
fig.attrib['__figure_num'] = self.fig_num
# Add caption to the target figure.
if self.prepend:
if self.auto:
fig.attrib['__figure_prepend'] = "1"
self.caption = etree.Element('figcaption')
fig.insert(0, self.caption)
else:
self.caption = etree.SubElement(fig, 'figcaption')
return fig
def on_add(self, block):
"""Return caption as the target container for content."""
return self.caption
def on_end(self, block):
"""Handle explicit, manual prefixes on block end."""
prefix = self.PREFIX
if prefix and not self.auto:
# Levels should not be used in manual mode, but if they are, give a generic result.
if self.level:
self.fig_num = '.'.join(['1'] * (int(self.level) + 1))
if self.fig_num:
update_tag(
block,
self.NAME,
self.fig_num,
prefix,
self.prepend,
self.md
)
|
Caption
|
python
|
vyperlang__vyper
|
vyper/venom/passes/algebraic_optimization.py
|
{
"start": 623,
"end": 15012
}
|
class ____(IRPass):
"""
This pass reduces algebraic evaluatable expressions.
It currently optimizes:
- iszero chains
- binops
- offset adds
"""
dfg: DFGAnalysis
updater: InstUpdater
def run_pass(self):
self.dfg = self.analyses_cache.request_analysis(DFGAnalysis)
self.updater = InstUpdater(self.dfg)
self._handle_offset()
self._algebraic_opt()
self._optimize_iszero_chains()
self._algebraic_opt()
self.analyses_cache.invalidate_analysis(LivenessAnalysis)
def _optimize_iszero_chains(self) -> None:
fn = self.function
for bb in fn.get_basic_blocks():
for inst in bb.instructions:
if inst.opcode != "iszero":
continue
iszero_chain = self._get_iszero_chain(inst.operands[0])
iszero_count = len(iszero_chain)
if iszero_count == 0:
continue
inst_out = inst.output
for use_inst in self.dfg.get_uses(inst_out).copy():
opcode = use_inst.opcode
if opcode == "iszero":
# We keep iszero instuctions as is
continue
if opcode in ("jnz", "assert"):
# instructions that accept a truthy value as input:
# we can remove up to all the iszero instructions
keep_count = 1 - iszero_count % 2
else:
# all other instructions:
# we need to keep at least one or two iszero instructions
keep_count = 1 + iszero_count % 2
if keep_count >= iszero_count:
continue
out_var = iszero_chain[keep_count].operands[0]
self.updater.update_operands(use_inst, {inst_out: out_var})
def _get_iszero_chain(self, op: IROperand) -> list[IRInstruction]:
chain: list[IRInstruction] = []
while True:
if not isinstance(op, IRVariable):
break
inst = self.dfg.get_producing_instruction(op)
if inst is None or inst.opcode != "iszero":
break
op = inst.operands[0]
chain.append(inst)
chain.reverse()
return chain
def _handle_offset(self):
for bb in self.function.get_basic_blocks():
for inst in bb.instructions:
if (
inst.opcode == "add"
and self._is_lit(inst.operands[0])
and isinstance(inst.operands[1], IRLabel)
):
inst.opcode = "offset"
def _is_lit(self, operand: IROperand) -> bool:
return isinstance(operand, IRLiteral)
def _algebraic_opt(self):
self._algebraic_opt_pass()
def _algebraic_opt_pass(self):
for bb in self.function.get_basic_blocks():
for inst in bb.instructions:
self._handle_inst_peephole(inst)
self._flip_inst(inst)
def _flip_inst(self, inst: IRInstruction):
ops = inst.operands
# improve code. this seems like it should be properly handled by
# better heuristics in DFT pass.
if inst.flippable and self._is_lit(ops[0]) and not self._is_lit(ops[1]):
inst.flip()
# "peephole", weakening algebraic optimizations
def _handle_inst_peephole(self, inst: IRInstruction):
if inst.num_outputs != 1:
return
inst_out = inst.output
if inst.is_volatile:
return
if inst.opcode == "assign":
return
if inst.is_pseudo:
return
# TODO nice to have rules:
# -1 * x => 0 - x
# x // -1 => 0 - x (?)
# x + (-1) => x - 1 # save codesize, maybe for all negative numbers)
# 1 // x => x == 1(?)
# 1 % x => x > 1(?)
# !!x => x > 0 # saves 1 gas as of shanghai
operands = inst.operands
# make logic easier for commutative instructions.
if inst.flippable and self._is_lit(operands[1]) and not self._is_lit(operands[0]):
inst.flip()
operands = inst.operands
if inst.opcode in {"shl", "shr", "sar"}:
# (x >> 0) == (x << 0) == x
if lit_eq(operands[1], 0):
self.updater.mk_assign(inst, operands[0])
return
# no more cases for these instructions
return
if inst.opcode == "exp":
# x ** 0 -> 1
if lit_eq(operands[0], 0):
self.updater.mk_assign(inst, IRLiteral(1))
return
# 1 ** x -> 1
if lit_eq(operands[1], 1):
self.updater.mk_assign(inst, IRLiteral(1))
return
# 0 ** x -> iszero x
if lit_eq(operands[1], 0):
self.updater.update(inst, "iszero", [operands[0]])
return
# x ** 1 -> x
if lit_eq(operands[0], 1):
self.updater.mk_assign(inst, operands[1])
return
# no more cases for this instruction
return
if inst.opcode == "gep":
if lit_eq(inst.operands[1], 0):
self.updater.mk_assign(inst, inst.operands[0])
return
if inst.opcode in {"add", "sub", "xor"}:
# (x - x) == (x ^ x) == 0
if inst.opcode in ("xor", "sub") and operands[0] == operands[1]:
self.updater.mk_assign(inst, IRLiteral(0))
return
# (x + 0) == (0 + x) -> x
# x - 0 -> x
# (x ^ 0) == (0 ^ x) -> x
if lit_eq(operands[0], 0):
self.updater.mk_assign(inst, operands[1])
return
# (-1) - x -> ~x
# from two's complement
if inst.opcode == "sub" and lit_eq(operands[1], -1):
self.updater.update(inst, "not", [operands[0]])
return
# x ^ 0xFFFF..FF -> ~x
if inst.opcode == "xor" and lit_eq(operands[0], -1):
self.updater.update(inst, "not", [operands[1]])
return
return
# x & 0xFF..FF -> x
if inst.opcode == "and" and lit_eq(operands[0], -1):
self.updater.mk_assign(inst, operands[1])
return
if inst.opcode in ("mul", "and", "div", "sdiv", "mod", "smod"):
# (x * 0) == (x & 0) == (x // 0) == (x % 0) -> 0
if any(lit_eq(op, 0) for op in operands):
self.updater.mk_assign(inst, IRLiteral(0))
return
if inst.opcode in {"mul", "div", "sdiv", "mod", "smod"}:
if inst.opcode in ("mod", "smod") and lit_eq(operands[0], 1):
# x % 1 -> 0
self.updater.mk_assign(inst, IRLiteral(0))
return
# (x * 1) == (1 * x) == (x // 1) -> x
if inst.opcode in ("mul", "div", "sdiv") and lit_eq(operands[0], 1):
self.updater.mk_assign(inst, operands[1])
return
if self._is_lit(operands[0]) and is_power_of_two(operands[0].value):
val = operands[0].value
# x % (2^n) -> x & (2^n - 1)
if inst.opcode == "mod":
self.updater.update(inst, "and", [IRLiteral(val - 1), operands[1]])
return
# x / (2^n) -> x >> n
if inst.opcode == "div":
self.updater.update(inst, "shr", [operands[1], IRLiteral(int_log2(val))])
return
# x * (2^n) -> x << n
if inst.opcode == "mul":
self.updater.update(inst, "shl", [operands[1], IRLiteral(int_log2(val))])
return
return
uses = self.dfg.get_uses(inst_out)
is_truthy = all(i.opcode in TRUTHY_INSTRUCTIONS for i in uses)
prefer_iszero = all(i.opcode in ("assert", "iszero") for i in uses)
# TODO rules like:
# not x | not y => not (x & y)
# x | not y => not (not x & y)
if inst.opcode == "or":
# x | 0xff..ff == 0xff..ff
if any(lit_eq(op, SizeLimits.MAX_UINT256) for op in operands):
self.updater.mk_assign(inst, IRLiteral(SizeLimits.MAX_UINT256))
return
# x | n -> 1 in truthy positions (if n is non zero)
if is_truthy and self._is_lit(operands[0]) and operands[0].value != 0:
self.updater.mk_assign(inst, IRLiteral(1))
return
# x | 0 -> x
if lit_eq(operands[0], 0):
self.updater.mk_assign(inst, operands[1])
return
if inst.opcode == "eq":
# x == x -> 1
if operands[0] == operands[1]:
self.updater.mk_assign(inst, IRLiteral(1))
return
# x == 0 -> iszero x
if lit_eq(operands[0], 0):
self.updater.update(inst, "iszero", [operands[1]])
return
# eq x -1 -> iszero(~x)
# (saves codesize, not gas)
if lit_eq(operands[0], -1):
var = self.updater.add_before(inst, "not", [operands[1]])
assert var is not None # help mypy
self.updater.update(inst, "iszero", [var])
return
if prefer_iszero:
# (eq x y) has the same truthyness as (iszero (xor x y))
tmp = self.updater.add_before(inst, "xor", [operands[0], operands[1]])
assert tmp is not None # help mypy
self.updater.update(inst, "iszero", [tmp])
return
if inst.opcode in COMPARATOR_INSTRUCTIONS:
self._optimize_comparator_instruction(inst, prefer_iszero)
def _optimize_comparator_instruction(self, inst, prefer_iszero):
opcode, operands = inst.opcode, inst.operands
assert opcode in COMPARATOR_INSTRUCTIONS # sanity
inst_out = inst.output
# (x > x) == (x < x) -> 0
if operands[0] == operands[1]:
self.updater.mk_assign(inst, IRLiteral(0))
return
is_gt = "g" in opcode
signed = "s" in opcode
lo, hi = int_bounds(bits=256, signed=signed)
if not isinstance(operands[0], IRLiteral):
return
# for comparison operators, we have three special boundary cases:
# almost always, never and almost never.
# almost_always is always true for the non-strict ("ge" and co)
# comparators. for strict comparators ("gt" and co), almost_always
# is true except for one case. never is never true for the strict
# comparators. never is almost always false for the non-strict
# comparators, except for one case. and almost_never is almost
# never true (except one case) for the strict comparators.
if is_gt:
almost_always, never = lo, hi
almost_never = hi - 1
else:
almost_always, never = hi, lo
almost_never = lo + 1
if lit_eq(operands[0], never):
self.updater.mk_assign(inst, IRLiteral(0))
return
if lit_eq(operands[0], almost_never):
# (lt x 1), (gt x (MAX_UINT256 - 1)), (slt x (MIN_INT256 + 1))
self.updater.update(inst, "eq", [operands[1], IRLiteral(never)])
return
# rewrites. in positions where iszero is preferred, (gt x 5) => (ge x 6)
if prefer_iszero and lit_eq(operands[0], almost_always):
# e.g. gt x 0, slt x MAX_INT256
tmp = self.updater.add_before(inst, "eq", operands)
self.updater.update(inst, "iszero", [tmp])
return
# since push0 was introduced in shanghai, it's potentially
# better to actually reverse this optimization -- i.e.
# replace iszero(iszero(x)) with (gt x 0)
if opcode == "gt" and lit_eq(operands[0], 0):
tmp = self.updater.add_before(inst, "iszero", [operands[1]])
self.updater.update(inst, "iszero", [tmp])
return
# rewrite comparisons by either inserting or removing an `iszero`,
# e.g. `x > N` -> `x >= (N + 1)`
uses = self.dfg.get_uses(inst_out)
if len(uses) != 1:
return
after = uses.first()
if after.opcode not in ("iszero", "assert"):
return
if after.opcode == "iszero":
# peer down the iszero chain to see if it actually makes sense
# to remove the iszero.
n_uses = self.dfg.get_uses(after.output)
if len(n_uses) != 1: # block the optimization
return
# "assert" inserts an iszero in assembly, so we will have
# two iszeros in the asm. this is already optimal, so we don't
# apply the iszero insertion
if n_uses.first().opcode == "assert":
return
val = wrap256(operands[0].value, signed=signed)
assert val != never, "unreachable" # sanity
if is_gt:
val += 1
else:
# TODO: if resulting val is -1 (0xFF..FF), disable this
# when optimization level == codesize
val -= 1
# sanity -- implied by precondition that `val != never`
assert wrap256(val, signed=signed) == val
new_opcode = flip_comparison_opcode(opcode)
self.updater.update(inst, new_opcode, [IRLiteral(val), operands[1]])
insert_iszero = after.opcode == "assert"
if insert_iszero:
# next instruction is an assert, so we insert an iszero so
# that there will be two iszeros in the assembly.
assert len(after.operands) == 1, after
var = self.updater.add_before(after, "iszero", [inst_out])
self.updater.update_operands(after, {after.operands[0]: var})
else:
# remove the iszero!
assert len(after.operands) == 1, after
self.updater.update(after, "assign", after.operands)
|
AlgebraicOptimizationPass
|
python
|
dask__dask
|
dask/dataframe/dask_expr/_groupby.py
|
{
"start": 24918,
"end": 25038
}
|
class ____(Head):
groupby_chunk = staticmethod(_tail_chunk)
groupby_aggregate = staticmethod(_tail_aggregate)
|
Tail
|
python
|
ray-project__ray
|
python/ray/dashboard/modules/node/datacenter.py
|
{
"start": 1012,
"end": 9137
}
|
class ____:
@staticmethod
@async_loop_forever(dashboard_consts.RAY_DASHBOARD_STATS_PURGING_INTERVAL)
async def purge():
# Purge data that is out of date.
# These data sources are maintained by DashboardHead,
# we do not needs to purge them:
# * agents
# * nodes
alive_nodes = {
node_id
for node_id, node_info in DataSource.nodes.items()
if node_info["state"] == "ALIVE"
}
for key in DataSource.node_stats.keys() - alive_nodes:
DataSource.node_stats.pop(key)
for key in DataSource.node_physical_stats.keys() - alive_nodes:
DataSource.node_physical_stats.pop(key)
@classmethod
@async_loop_forever(dashboard_consts.RAY_DASHBOARD_STATS_UPDATING_INTERVAL)
async def organize(cls, thread_pool_executor):
"""
Organizes data: read from (node_physical_stats, node_stats) and updates
(node_workers, node_worker_stats).
This methods is not really async, but DataSource is not thread safe so we need
to make sure it's on the main event loop thread. To avoid blocking the main
event loop, we yield after each node processed.
"""
loop = get_or_create_event_loop()
node_workers = {}
core_worker_stats = {}
# NOTE: We copy keys of the `DataSource.nodes` to make sure
# it doesn't change during the iteration (since its being updated
# from another async task)
for node_id in list(DataSource.nodes.keys()):
node_physical_stats = DataSource.node_physical_stats.get(node_id, {})
node_stats = DataSource.node_stats.get(node_id, {})
# Offloads the blocking operation to a thread pool executor. This also
# yields to the event loop.
workers = await loop.run_in_executor(
thread_pool_executor,
cls._extract_workers_for_node,
node_physical_stats,
node_stats,
)
for worker in workers:
for stats in worker.get("coreWorkerStats", []):
worker_id = stats["workerId"]
core_worker_stats[worker_id] = stats
node_workers[node_id] = workers
DataSource.node_workers = node_workers
DataSource.core_worker_stats = core_worker_stats
@classmethod
def _extract_workers_for_node(cls, node_physical_stats, node_stats):
workers = []
# Merge coreWorkerStats (node stats) to workers (node physical stats)
pid_to_worker_stats = {}
pid_to_language = {}
pid_to_job_id = {}
for core_worker_stats in node_stats.get("coreWorkersStats", []):
pid = core_worker_stats["pid"]
pid_to_worker_stats[pid] = core_worker_stats
pid_to_language[pid] = core_worker_stats["language"]
pid_to_job_id[pid] = core_worker_stats["jobId"]
for worker in node_physical_stats.get("workers", []):
worker = dict(worker)
pid = worker["pid"]
core_worker_stats = pid_to_worker_stats.get(pid)
# Empty list means core worker stats is not available.
worker["coreWorkerStats"] = [core_worker_stats] if core_worker_stats else []
worker["language"] = pid_to_language.get(
pid, dashboard_consts.DEFAULT_LANGUAGE
)
worker["jobId"] = pid_to_job_id.get(pid, dashboard_consts.DEFAULT_JOB_ID)
workers.append(worker)
return workers
@classmethod
async def get_node_info(cls, node_id, get_summary=False):
node_physical_stats = dict(DataSource.node_physical_stats.get(node_id, {}))
node_stats = dict(DataSource.node_stats.get(node_id, {}))
node = DataSource.nodes.get(node_id, {})
if get_summary:
node_physical_stats.pop("workers", None)
node_stats.pop("workersStats", None)
else:
node_stats.pop("coreWorkersStats", None)
store_stats = node_stats.get("storeStats", {})
used = int(store_stats.get("objectStoreBytesUsed", 0))
# objectStoreBytesAvail == total in the object_manager.cc definition.
total = int(store_stats.get("objectStoreBytesAvail", 0))
ray_stats = {
"object_store_used_memory": used,
"object_store_available_memory": total - used,
}
node_info = node_physical_stats
# Merge node stats to node physical stats under raylet
node_info["raylet"] = node_stats
node_info["raylet"].update(ray_stats)
# Merge GcsNodeInfo to node physical stats
node_info["raylet"].update(node)
death_info = node.get("deathInfo", {})
node_info["raylet"]["stateMessage"] = compose_state_message(
death_info.get("reason", None), death_info.get("reasonMessage", None)
)
if not get_summary:
actor_table_entries = DataSource.node_actors.get(node_id, {})
# Merge actors to node physical stats
node_info["actors"] = {
actor_id: await DataOrganizer._get_actor_info(actor_table_entry)
for actor_id, actor_table_entry in actor_table_entries.items()
}
# Update workers to node physical stats
node_info["workers"] = DataSource.node_workers.get(node_id, [])
return node_info
@classmethod
async def get_all_node_summary(cls):
return [
# NOTE: We're intentionally awaiting in a loop to avoid excessive
# concurrency spinning up excessive # of tasks for large clusters
await DataOrganizer.get_node_info(node_id, get_summary=True)
for node_id in DataSource.nodes.keys()
]
@classmethod
async def get_actor_infos(cls, actor_ids: Optional[List[str]] = None):
target_actor_table_entries: dict[str, Optional[dict]]
if actor_ids is not None:
target_actor_table_entries = {
actor_id: DataSource.actors.get(actor_id) for actor_id in actor_ids
}
else:
target_actor_table_entries = DataSource.actors
return {
actor_id: await DataOrganizer._get_actor_info(actor_table_entry)
for actor_id, actor_table_entry in target_actor_table_entries.items()
}
@staticmethod
async def _get_actor_info(actor: Optional[dict]) -> Optional[dict]:
if actor is None:
return None
actor = actor.copy()
worker_id = actor["address"]["workerId"]
core_worker_stats = DataSource.core_worker_stats.get(worker_id, {})
actor.update(core_worker_stats)
# TODO(fyrestone): remove this, give a link from actor
# info to worker info in front-end.
node_id = actor["address"]["nodeId"]
pid = core_worker_stats.get("pid")
node_physical_stats = DataSource.node_physical_stats.get(node_id, {})
actor_process_stats = None
actor_process_gpu_stats = []
if pid:
for process_stats in node_physical_stats.get("workers", []):
if process_stats["pid"] == pid:
actor_process_stats = process_stats
break
for gpu_stats in node_physical_stats.get("gpus", []):
# gpu_stats.get("processesPids") can be None, an empty list or a
# list of dictionaries.
for process in gpu_stats.get("processesPids") or []:
if process["pid"] == pid:
actor_process_gpu_stats.append(gpu_stats)
break
actor["gpus"] = actor_process_gpu_stats
actor["processStats"] = actor_process_stats
actor["mem"] = node_physical_stats.get("mem", [])
required_resources = parse_pg_formatted_resources_to_original(
actor["requiredResources"]
)
actor["requiredResources"] = required_resources
return actor
|
DataOrganizer
|
python
|
pytorch__pytorch
|
test/dynamo/test_repros.py
|
{
"start": 29010,
"end": 29300
}
|
class ____(torch.nn.Module):
def inner_fn(self, left, right):
return tuple(left) == tuple(right)
def fn(self, tensor):
if type(tensor) is int:
return False
torch.add(tensor, tensor)
return self.inner_fn(tensor.shape, (1, 2, 3))
|
MockModule
|
python
|
spyder-ide__spyder
|
spyder/utils/snippets/nodes.py
|
{
"start": 10535,
"end": 11018
}
|
class ____(SnippetASTNode):
"""
Node that represents a variable snippet.
This node represents the expression ${var} or $var, where var is some
variable qualified name.
"""
KIND = SnippetKind.VARIABLE
def __init__(self, variable):
SnippetASTNode.__init__(self)
self.variable = variable
self.value = variable
def update(self, value):
self.value = value
def text(self):
return self.value
|
VariableSnippetNode
|
python
|
django__django
|
tests/delete/models.py
|
{
"start": 714,
"end": 752
}
|
class ____(RChild):
pass
|
RChildChild
|
python
|
coleifer__peewee
|
tests/apsw_ext.py
|
{
"start": 716,
"end": 985
}
|
class ____(object):
def Filter(self, *a):
self.val = 0
def Eof(self): return False
def Rowid(self):
return self.val
def Column(self, col):
return self.val
def Next(self):
self.val += 1
def Close(self): pass
|
VTCursor
|
python
|
PyCQA__flake8
|
src/flake8/exceptions.py
|
{
"start": 80,
"end": 150
}
|
class ____(Exception):
"""Plain Flake8 exception."""
|
Flake8Exception
|
python
|
Textualize__textual
|
src/textual/renderables/background_screen.py
|
{
"start": 297,
"end": 2975
}
|
class ____:
"""Tints a renderable and removes links / meta."""
def __init__(
self,
screen: Screen,
color: Color,
) -> None:
"""Initialize a BackgroundScreen instance.
Args:
screen: A Screen instance.
color: A color (presumably with alpha).
"""
self.screen = screen
"""Screen to process."""
self.color = color
"""Color to apply (should have alpha)."""
@classmethod
def process_segments(
cls, segments: Iterable[Segment], color: Color
) -> Iterable[Segment]:
"""Apply tint to segments and remove meta + styles
Args:
segments: Incoming segments.
color: Color of tint.
Returns:
Segments with applied tint.
"""
from_rich_color = Color.from_rich_color
style_from_color = Style.from_color
_Segment = Segment
NULL_STYLE = Style()
if color.a == 0:
# Special case for transparent color
for segment in segments:
text, style, control = segment
if control:
yield segment
else:
yield _Segment(
text,
NULL_STYLE if style is None else style.clear_meta_and_links(),
control,
)
return
for segment in segments:
text, style, control = segment
if control:
yield segment
else:
style = NULL_STYLE if style is None else style.clear_meta_and_links()
yield _Segment(
text,
(
style
+ style_from_color(
(
(from_rich_color(style.color) + color).rich_color
if style.color is not None
else None
),
(
(from_rich_color(style.bgcolor) + color).rich_color
if style.bgcolor is not None
else None
),
)
),
control,
)
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
segments = console.render(self.screen._compositor, options)
color = self.color
return self.process_segments(segments, color)
|
BackgroundScreen
|
python
|
scrapy__scrapy
|
tests/test_engine.py
|
{
"start": 2697,
"end": 2752
}
|
class ____(MySpider):
item_cls = dict
|
DictItemsSpider
|
python
|
Netflix__metaflow
|
metaflow/cli.py
|
{
"start": 24339,
"end": 25699
}
|
class ____(object):
def __init__(self, flow):
self.flow = flow
def main(flow, args=None, handle_exceptions=True, entrypoint=None):
# Ignore warning(s) and prevent spamming the end-user.
# TODO: This serves as a short term workaround for RuntimeWarning(s) thrown
# in py3.8 related to log buffering (bufsize=1).
import warnings
warnings.filterwarnings("ignore")
if entrypoint is None:
entrypoint = [sys.executable, sys.argv[0]]
state = CliState(flow)
state.entrypoint = entrypoint
try:
if args is None:
start(auto_envvar_prefix="METAFLOW", obj=state)
else:
try:
start(args=args, obj=state, auto_envvar_prefix="METAFLOW")
except SystemExit as e:
return e.code
except MetaflowException as x:
if handle_exceptions:
print_metaflow_exception(x)
sys.exit(1)
else:
raise
except Exception as x:
if handle_exceptions:
print_unknown_exception(x)
sys.exit(1)
else:
raise
finally:
if hasattr(state, "monitor") and state.monitor is not None:
state.monitor.terminate()
if hasattr(state, "event_logger") and state.event_logger is not None:
state.event_logger.terminate()
|
CliState
|
python
|
langchain-ai__langchain
|
libs/langchain/langchain_classic/chains/query_constructor/parser.py
|
{
"start": 1630,
"end": 1799
}
|
class ____(TypedDict):
"""A datetime in ISO 8601 format (YYYY-MM-DDTHH:MM:SS)."""
datetime: str
type: Literal["datetime"]
@v_args(inline=True)
|
ISO8601DateTime
|
python
|
pypa__warehouse
|
tests/unit/email/test_init.py
|
{
"start": 59940,
"end": 63295
}
|
class ____:
def test_send_new_organization_approved_email(
self, pyramid_request, pyramid_config, monkeypatch
):
initiator_user = pretend.stub(
id="id",
username="username",
name="",
email="email@example.com",
primary_email=pretend.stub(email="email@example.com", verified=True),
)
organization_name = "example"
message = "example message"
subject_renderer = pyramid_config.testing_add_renderer(
"email/new-organization-approved/subject.txt"
)
subject_renderer.string_response = "Email Subject"
body_renderer = pyramid_config.testing_add_renderer(
"email/new-organization-approved/body.txt"
)
body_renderer.string_response = "Email Body"
html_renderer = pyramid_config.testing_add_renderer(
"email/new-organization-approved/body.html"
)
html_renderer.string_response = "Email HTML Body"
send_email = pretend.stub(
delay=pretend.call_recorder(lambda *args, **kwargs: None)
)
pyramid_request.task = pretend.call_recorder(lambda *args, **kwargs: send_email)
monkeypatch.setattr(email, "send_email", send_email)
pyramid_request.db = pretend.stub(
query=lambda a: pretend.stub(
filter=lambda *a: pretend.stub(
one=lambda: pretend.stub(user_id=initiator_user.id)
)
),
)
pyramid_request.user = initiator_user
pyramid_request.registry.settings = {"mail.sender": "noreply@example.com"}
result = email.send_new_organization_approved_email(
pyramid_request,
initiator_user,
organization_name=organization_name,
message=message,
)
assert result == {
"organization_name": organization_name,
"message": message,
}
subject_renderer.assert_(
organization_name=organization_name,
message=message,
)
body_renderer.assert_(
organization_name=organization_name,
message=message,
)
html_renderer.assert_(
organization_name=organization_name,
message=message,
)
assert pyramid_request.task.calls == [pretend.call(send_email)]
assert send_email.delay.calls == [
pretend.call(
f"{initiator_user.username} <{initiator_user.email}>",
{
"sender": None,
"subject": "Email Subject",
"body_text": "Email Body",
"body_html": (
"<html>\n<head></head>\n"
"<body><p>Email HTML Body</p></body>\n</html>\n"
),
},
{
"tag": "account:email:sent",
"user_id": initiator_user.id,
"additional": {
"from_": "noreply@example.com",
"to": initiator_user.email,
"subject": "Email Subject",
"redact_ip": False,
},
},
)
]
|
TestSendNewOrganizationApprovedEmail
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/operators/test_managed_kafka.py
|
{
"start": 16180,
"end": 17372
}
|
class ____:
@mock.patch(MANAGED_KAFKA_PATH.format("types.ConsumerGroup.to_dict"))
@mock.patch(MANAGED_KAFKA_PATH.format("ManagedKafkaHook"))
def test_execute(self, mock_hook, to_dict_mock):
op = ManagedKafkaGetConsumerGroupOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
location=GCP_LOCATION,
project_id=GCP_PROJECT,
cluster_id=TEST_CLUSTER_ID,
consumer_group_id=TEST_CONSUMER_GROUP_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
op.execute(context={"ti": mock.MagicMock(), "task": mock.MagicMock()})
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN)
mock_hook.return_value.get_consumer_group.assert_called_once_with(
location=GCP_LOCATION,
project_id=GCP_PROJECT,
cluster_id=TEST_CLUSTER_ID,
consumer_group_id=TEST_CONSUMER_GROUP_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
|
TestManagedKafkaGetConsumerGroupOperator
|
python
|
python__mypy
|
mypy/types.py
|
{
"start": 8287,
"end": 10779
}
|
class ____(mypy.nodes.Context):
"""Abstract base class for all types."""
__slots__ = ("_can_be_true", "_can_be_false")
# 'can_be_true' and 'can_be_false' mean whether the value of the
# expression can be true or false in a boolean context. They are useful
# when inferring the type of logic expressions like `x and y`.
#
# For example:
# * the literal `False` can't be true while `True` can.
# * a value with type `bool` can be true or false.
# * `None` can't be true
# * ...
def __init__(self, line: int = -1, column: int = -1) -> None:
super().__init__(line, column)
# Value of these can be -1 (use the default, lazy init), 0 (false) or 1 (true)
self._can_be_true = -1
self._can_be_false = -1
@property
def can_be_true(self) -> bool:
if self._can_be_true == -1: # Lazy init helps mypyc
self._can_be_true = self.can_be_true_default()
return bool(self._can_be_true)
@can_be_true.setter
def can_be_true(self, v: bool) -> None:
self._can_be_true = v
@property
def can_be_false(self) -> bool:
if self._can_be_false == -1: # Lazy init helps mypyc
self._can_be_false = self.can_be_false_default()
return bool(self._can_be_false)
@can_be_false.setter
def can_be_false(self, v: bool) -> None:
self._can_be_false = v
def can_be_true_default(self) -> bool:
return True
def can_be_false_default(self) -> bool:
return True
def accept(self, visitor: TypeVisitor[T]) -> T:
raise RuntimeError("Not implemented", type(self))
def __repr__(self) -> str:
return self.accept(TypeStrVisitor(options=Options()))
def str_with_options(self, options: Options) -> str:
return self.accept(TypeStrVisitor(options=options))
def serialize(self) -> JsonDict | str:
raise NotImplementedError(f"Cannot serialize {self.__class__.__name__} instance")
@classmethod
def deserialize(cls, data: JsonDict) -> Type:
raise NotImplementedError(f"Cannot deserialize {cls.__name__} instance")
def write(self, data: WriteBuffer) -> None:
raise NotImplementedError(f"Cannot serialize {self.__class__.__name__} instance")
@classmethod
def read(cls, data: ReadBuffer) -> Type:
raise NotImplementedError(f"Cannot deserialize {cls.__name__} instance")
def is_singleton_type(self) -> bool:
return False
|
Type
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_chart_pattern09.py
|
{
"start": 315,
"end": 2204
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_pattern09.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [115359744, 115361280]
data = [
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
worksheet.write_column("D1", data[3])
worksheet.write_column("E1", data[4])
worksheet.write_column("F1", data[5])
worksheet.write_column("G1", data[6])
worksheet.write_column("H1", data[7])
chart.add_series({"values": "=Sheet1!$A$1:$A$3"})
chart.add_series({"values": "=Sheet1!$B$1:$B$3"})
chart.add_series({"values": "=Sheet1!$C$1:$C$3"})
chart.add_series({"values": "=Sheet1!$D$1:$D$3"})
chart.add_series({"values": "=Sheet1!$E$1:$E$3"})
chart.add_series({"values": "=Sheet1!$F$1:$F$3"})
chart.add_series({"values": "=Sheet1!$G$1:$G$3"})
chart.add_series({"values": "=Sheet1!$H$1:$H$3"})
chart.set_chartarea(
{
"pattern": {
"pattern": "percent_5",
"fg_color": "red",
"bg_color": "yellow",
}
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
kamyu104__LeetCode-Solutions
|
Python/find-the-array-concatenation-value.py
|
{
"start": 54,
"end": 341
}
|
class ____(object):
def findTheArrayConcVal(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
return sum((nums[i]*10**(int(math.log10(nums[~i]))+1) for i in xrange(len(nums)//2)))+sum(nums[i] for i in xrange(len(nums)//2, len(nums)))
|
Solution
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/dataclassTransform3.py
|
{
"start": 2103,
"end": 2321
}
|
class ____(Generic[_T]):
not_a_field: _T
def __init_subclass__(
cls,
*,
frozen: bool = False,
kw_only: bool = True,
order: bool = True,
) -> None: ...
|
GenericModelBase
|
python
|
pennersr__django-allauth
|
tests/projects/common/headless/rest_framework/views.py
|
{
"start": 188,
"end": 471
}
|
class ____(APIView):
authentication_classes = [JWTTokenAuthentication]
def get(self, request, *args, **kwargs):
data = {"resource": "ok"}
if "userinfo" in request.GET:
data["user_email"] = request.user.email
return Response(data)
|
ResourceView
|
python
|
django__django
|
tests/backends/sqlite/test_introspection.py
|
{
"start": 170,
"end": 1952
}
|
class ____(TestCase):
def test_get_primary_key_column(self):
"""
Get the primary key column regardless of whether or not it has
quotation.
"""
testable_column_strings = (
("id", "id"),
("[id]", "id"),
("`id`", "id"),
('"id"', "id"),
("[id col]", "id col"),
("`id col`", "id col"),
('"id col"', "id col"),
)
with connection.cursor() as cursor:
for column, expected_string in testable_column_strings:
sql = "CREATE TABLE test_primary (%s int PRIMARY KEY NOT NULL)" % column
with self.subTest(column=column):
try:
cursor.execute(sql)
field = connection.introspection.get_primary_key_column(
cursor, "test_primary"
)
self.assertEqual(field, expected_string)
finally:
cursor.execute("DROP TABLE test_primary")
def test_get_primary_key_column_pk_constraint(self):
sql = """
CREATE TABLE test_primary(
id INTEGER NOT NULL,
created DATE,
PRIMARY KEY(id)
)
"""
with connection.cursor() as cursor:
try:
cursor.execute(sql)
field = connection.introspection.get_primary_key_column(
cursor,
"test_primary",
)
self.assertEqual(field, "id")
finally:
cursor.execute("DROP TABLE test_primary")
@unittest.skipUnless(connection.vendor == "sqlite", "SQLite tests")
|
IntrospectionTests
|
python
|
getsentry__sentry
|
tests/sentry/seer/similarity/test_utils.py
|
{
"start": 40116,
"end": 49170
}
|
class ____(TestCase):
def setUp(self) -> None:
# The `in_app` and `contributes` values of these frames will be determined by the project
# stacktrace rules we'll add below
self.contributing_system_frame = {
"function": "handleRequest",
"filename": "/node_modules/express/router.js",
"context_line": "return handler(request);",
}
self.non_contributing_system_frame = {
"function": "runApp",
"filename": "/node_modules/express/app.js",
"context_line": "return server.serve(port);",
}
self.contributing_in_app_frame = {
"function": "playFetch",
"filename": "/dogApp/dogpark.js",
"context_line": "raise FailedToFetchError('Charlie didn't bring the ball back');",
}
self.non_contributing_in_app_frame = {
"function": "recordMetrics",
"filename": "/dogApp/metrics.js",
"context_line": "return withMetrics(handler, metricName, tags);",
}
self.exception_value = {
"type": "FailedToFetchError",
"value": "Charlie didn't bring the ball back",
}
self.event = Event(
event_id="12312012041520130908201311212012",
project_id=self.project.id,
data={
"title": "FailedToFetchError('Charlie didn't bring the ball back')",
"exception": {"values": [self.exception_value]},
},
)
self.project.update_option(
"sentry:grouping_enhancements",
"\n".join(
[
"stack.function:runApp -app -group",
"stack.function:handleRequest -app +group",
"stack.function:recordMetrics +app -group",
"stack.function:playFetch +app +group",
]
),
)
def test_single_exception_simple(self) -> None:
for stacktrace_length, expected_result in [
(MAX_FRAME_COUNT - 1, False),
(MAX_FRAME_COUNT + 1, True),
]:
self.event.data["platform"] = "java"
self.event.data["exception"]["values"][0]["stacktrace"] = {
"frames": [self.contributing_in_app_frame] * stacktrace_length
}
# `normalize_stacktraces=True` forces the custom stacktrace enhancements to run
variants = self.event.get_grouping_variants(normalize_stacktraces=True)
assert (
stacktrace_exceeds_limits(self.event, variants, ReferrerOptions.INGEST)
is expected_result
)
def test_single_exception_bypassed_platform(self) -> None:
# Regardless of the number of frames, we never flag it as being too long
for stacktrace_length, expected_result in [
(MAX_FRAME_COUNT - 1, False),
(MAX_FRAME_COUNT + 1, False),
]:
self.event.data["platform"] = "python"
self.event.data["exception"]["values"][0]["stacktrace"] = {
"frames": [self.contributing_in_app_frame] * stacktrace_length
}
# `normalize_stacktraces=True` forces the custom stacktrace enhancements to run
variants = self.event.get_grouping_variants(normalize_stacktraces=True)
assert (
stacktrace_exceeds_limits(self.event, variants, ReferrerOptions.INGEST)
is expected_result
)
def test_chained_exception_simple(self) -> None:
for total_frames, expected_result in [
(MAX_FRAME_COUNT - 2, False),
(MAX_FRAME_COUNT + 2, True),
]:
self.event.data["platform"] = "java"
self.event.data["exception"]["values"] = [
{**self.exception_value},
{**self.exception_value},
]
self.event.data["exception"]["values"][0]["stacktrace"] = {
"frames": [self.contributing_in_app_frame] * (total_frames // 2)
}
self.event.data["exception"]["values"][1]["stacktrace"] = {
"frames": [self.contributing_in_app_frame] * (total_frames // 2)
}
# `normalize_stacktraces=True` forces the custom stacktrace enhancements to run
variants = self.event.get_grouping_variants(normalize_stacktraces=True)
assert (
stacktrace_exceeds_limits(self.event, variants, ReferrerOptions.INGEST)
is expected_result
)
def test_chained_exception_bypassed_platform(self) -> None:
# Regardless of the number of frames, we never flag it as being too long
for total_frames, expected_result in [
(MAX_FRAME_COUNT - 2, False),
(MAX_FRAME_COUNT + 2, False),
]:
self.event.data["platform"] = "python"
self.event.data["exception"]["values"] = [
{**self.exception_value},
{**self.exception_value},
]
self.event.data["exception"]["values"][0]["stacktrace"] = {
"frames": [self.contributing_in_app_frame] * (total_frames // 2)
}
self.event.data["exception"]["values"][1]["stacktrace"] = {
"frames": [self.contributing_in_app_frame] * (total_frames // 2)
}
# `normalize_stacktraces=True` forces the custom stacktrace enhancements to run
variants = self.event.get_grouping_variants(normalize_stacktraces=True)
assert (
stacktrace_exceeds_limits(self.event, variants, ReferrerOptions.INGEST)
is expected_result
)
def test_ignores_non_contributing_frames(self) -> None:
self.event.data["platform"] = "java"
self.event.data["exception"]["values"][0]["stacktrace"] = {
"frames": (
# Taken together, there are too many frames
[self.contributing_in_app_frame] * (MAX_FRAME_COUNT - 1)
+ [self.non_contributing_in_app_frame] * 2
)
}
# `normalize_stacktraces=True` forces the custom stacktrace enhancements to run
variants = self.event.get_grouping_variants(normalize_stacktraces=True)
assert (
stacktrace_exceeds_limits(self.event, variants, ReferrerOptions.INGEST)
is False # Not flagged as too many because only contributing frames are counted
)
def test_prefers_app_frames(self) -> None:
self.event.data["platform"] = "java"
self.event.data["exception"]["values"][0]["stacktrace"] = {
"frames": (
[self.contributing_in_app_frame] * (MAX_FRAME_COUNT - 1) # Under the limit
+ [self.contributing_system_frame] * (MAX_FRAME_COUNT + 1) # Over the limit
)
}
# `normalize_stacktraces=True` forces the custom stacktrace enhancements to run
variants = self.event.get_grouping_variants(normalize_stacktraces=True)
assert (
stacktrace_exceeds_limits(self.event, variants, ReferrerOptions.INGEST)
is False # Not flagged as too many because only in-app frames are counted
)
def test_uses_app_or_system_variants(self) -> None:
for frame, expected_variant_name in [
(self.contributing_in_app_frame, "app"),
(self.contributing_system_frame, "system"),
]:
self.event.data["platform"] = "java"
self.event.data["exception"]["values"][0]["stacktrace"] = {
"frames": [frame] * (MAX_FRAME_COUNT + 1)
}
# `normalize_stacktraces=True` forces the custom stacktrace enhancements to run
variants = self.event.get_grouping_variants(normalize_stacktraces=True)
contributing_variant, _ = get_contributing_variant_and_component(variants)
assert contributing_variant.variant_name == expected_variant_name
assert stacktrace_exceeds_limits(self.event, variants, ReferrerOptions.INGEST) is True
def test_ignores_events_not_grouped_on_stacktrace(self) -> None:
self.event.data["platform"] = "java"
self.event.data["exception"]["values"][0]["stacktrace"] = {
"frames": ([self.contributing_system_frame] * (MAX_FRAME_COUNT + 1)) # Over the limit
}
self.event.data["fingerprint"] = ["dogs_are_great"]
# `normalize_stacktraces=True` forces the custom stacktrace enhancements to run
variants = self.event.get_grouping_variants(normalize_stacktraces=True)
contributing_variant, _ = get_contributing_variant_and_component(variants)
assert isinstance(contributing_variant, CustomFingerprintVariant)
assert (
stacktrace_exceeds_limits(self.event, variants, ReferrerOptions.INGEST)
is False # Not flagged as too many because it's grouped by fingerprint
)
|
HasTooManyFramesTest
|
python
|
redis__redis-py
|
redis/_parsers/base.py
|
{
"start": 7913,
"end": 10628
}
|
class ____(Protocol):
"""Protocol defining RESP3-specific parsing functionality"""
pubsub_push_handler_func: Callable
invalidation_push_handler_func: Optional[Callable] = None
node_moving_push_handler_func: Optional[Callable] = None
maintenance_push_handler_func: Optional[Callable] = None
def handle_pubsub_push_response(self, response):
"""Handle pubsub push responses"""
raise NotImplementedError()
def handle_push_response(self, response, **kwargs):
msg_type = response[0]
if isinstance(msg_type, bytes):
msg_type = msg_type.decode()
if msg_type not in (
_INVALIDATION_MESSAGE,
*_MAINTENANCE_MESSAGES,
_MOVING_MESSAGE,
):
return self.pubsub_push_handler_func(response)
try:
if (
msg_type == _INVALIDATION_MESSAGE
and self.invalidation_push_handler_func
):
return self.invalidation_push_handler_func(response)
if msg_type == _MOVING_MESSAGE and self.node_moving_push_handler_func:
parser_function = MSG_TYPE_TO_MAINT_NOTIFICATION_PARSER_MAPPING[
msg_type
][1]
notification = parser_function(response)
return self.node_moving_push_handler_func(notification)
if msg_type in _MAINTENANCE_MESSAGES and self.maintenance_push_handler_func:
parser_function = MSG_TYPE_TO_MAINT_NOTIFICATION_PARSER_MAPPING[
msg_type
][1]
notification_type = MSG_TYPE_TO_MAINT_NOTIFICATION_PARSER_MAPPING[
msg_type
][0]
notification = parser_function(response, notification_type)
if notification is not None:
return self.maintenance_push_handler_func(notification)
except Exception as e:
logger.error(
"Error handling {} message ({}): {}".format(msg_type, response, e)
)
return None
def set_pubsub_push_handler(self, pubsub_push_handler_func):
self.pubsub_push_handler_func = pubsub_push_handler_func
def set_invalidation_push_handler(self, invalidation_push_handler_func):
self.invalidation_push_handler_func = invalidation_push_handler_func
def set_node_moving_push_handler(self, node_moving_push_handler_func):
self.node_moving_push_handler_func = node_moving_push_handler_func
def set_maintenance_push_handler(self, maintenance_push_handler_func):
self.maintenance_push_handler_func = maintenance_push_handler_func
|
PushNotificationsParser
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/operators/workflows.py
|
{
"start": 26429,
"end": 29232
}
|
class ____(GoogleCloudBaseOperator):
"""
Returns an execution for the given ``workflow_id`` and ``execution_id``.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:WorkflowsGetExecutionOperator`
:param workflow_id: Required. The ID of the workflow.
:param execution_id: Required. The ID of the execution.
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param location: Required. The GCP region in which to handle the request.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
template_fields: Sequence[str] = ("location", "workflow_id", "execution_id")
operator_extra_links = (WorkflowsExecutionLink(),)
def __init__(
self,
*,
workflow_id: str,
execution_id: str,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.workflow_id = workflow_id
self.execution_id = execution_id
self.location = location
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = WorkflowsHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
self.log.info("Retrieving execution %s for workflow %s", self.execution_id, self.workflow_id)
execution = hook.get_execution(
workflow_id=self.workflow_id,
execution_id=self.execution_id,
location=self.location,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
WorkflowsExecutionLink.persist(
context=context,
location_id=self.location,
workflow_id=self.workflow_id,
execution_id=self.execution_id,
project_id=self.project_id or hook.project_id,
)
return Execution.to_dict(execution)
|
WorkflowsGetExecutionOperator
|
python
|
cython__cython
|
Tools/dataclass_test_data/test_dataclasses.py
|
{
"start": 88382,
"end": 93406
}
|
class ____(unittest.TestCase):
def test_frozen(self):
@dataclass(frozen=True)
class C:
i: int
c = C(10)
self.assertEqual(c.i, 10)
with self.assertRaises(FrozenInstanceError):
c.i = 5
self.assertEqual(c.i, 10)
def test_inherit(self):
@dataclass(frozen=True)
class C:
i: int
@dataclass(frozen=True)
class D(C):
j: int
d = D(0, 10)
with self.assertRaises(FrozenInstanceError):
d.i = 5
with self.assertRaises(FrozenInstanceError):
d.j = 6
self.assertEqual(d.i, 0)
self.assertEqual(d.j, 10)
def test_inherit_nonfrozen_from_empty_frozen(self):
@dataclass(frozen=True)
class C:
pass
with self.assertRaisesRegex(TypeError,
'cannot inherit non-frozen dataclass from a frozen one'):
@dataclass
class D(C):
j: int
def test_inherit_nonfrozen_from_empty(self):
@dataclass
class C:
pass
@dataclass
class D(C):
j: int
d = D(3)
self.assertEqual(d.j, 3)
self.assertIsInstance(d, C)
# Test both ways: with an intermediate normal (non-dataclass)
# class and without an intermediate class.
def test_inherit_nonfrozen_from_frozen(self):
for intermediate_class in [True, False]:
with self.subTest(intermediate_class=intermediate_class):
@dataclass(frozen=True)
class C:
i: int
if intermediate_class:
class I(C): pass
else:
I = C
with self.assertRaisesRegex(TypeError,
'cannot inherit non-frozen dataclass from a frozen one'):
@dataclass
class D(I):
pass
def test_inherit_frozen_from_nonfrozen(self):
for intermediate_class in [True, False]:
with self.subTest(intermediate_class=intermediate_class):
@dataclass
class C:
i: int
if intermediate_class:
class I(C): pass
else:
I = C
with self.assertRaisesRegex(TypeError,
'cannot inherit frozen dataclass from a non-frozen one'):
@dataclass(frozen=True)
class D(I):
pass
def test_inherit_from_normal_class(self):
for intermediate_class in [True, False]:
with self.subTest(intermediate_class=intermediate_class):
class C:
pass
if intermediate_class:
class I(C): pass
else:
I = C
@dataclass(frozen=True)
class D(I):
i: int
d = D(10)
with self.assertRaises(FrozenInstanceError):
d.i = 5
def test_non_frozen_normal_derived(self):
# See bpo-32953.
@dataclass(frozen=True)
class D:
x: int
y: int = 10
class S(D):
pass
s = S(3)
self.assertEqual(s.x, 3)
self.assertEqual(s.y, 10)
s.cached = True
# But can't change the frozen attributes.
with self.assertRaises(FrozenInstanceError):
s.x = 5
with self.assertRaises(FrozenInstanceError):
s.y = 5
self.assertEqual(s.x, 3)
self.assertEqual(s.y, 10)
self.assertEqual(s.cached, True)
def test_overwriting_frozen(self):
# frozen uses __setattr__ and __delattr__.
with self.assertRaisesRegex(TypeError,
'Cannot overwrite attribute __setattr__'):
@dataclass(frozen=True)
class C:
x: int
def __setattr__(self):
pass
with self.assertRaisesRegex(TypeError,
'Cannot overwrite attribute __delattr__'):
@dataclass(frozen=True)
class C:
x: int
def __delattr__(self):
pass
@dataclass(frozen=False)
class C:
x: int
def __setattr__(self, name, value):
self.__dict__['x'] = value * 2
self.assertEqual(C(10).x, 20)
def test_frozen_hash(self):
@dataclass(frozen=True)
class C:
x: Any
# If x is immutable, we can compute the hash. No exception is
# raised.
hash(C(3))
# If x is mutable, computing the hash is an error.
with self.assertRaisesRegex(TypeError, 'unhashable type'):
hash(C({}))
|
TestFrozen
|
python
|
agronholm__apscheduler
|
src/apscheduler/_enums.py
|
{
"start": 1771,
"end": 2263
}
|
class ____(Enum):
"""
Used to indicate what to do when trying to add a schedule whose ID conflicts with an
existing schedule.
.. attribute:: replace
replace the existing schedule with a new one
.. attribute:: do_nothing
keep the existing schedule as-is and drop the new schedule
.. attribute:: exception
raise an exception if a conflict is detected
"""
replace = auto()
do_nothing = auto()
exception = auto()
|
ConflictPolicy
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-jira/unit_tests/integration/config.py
|
{
"start": 125,
"end": 1084
}
|
class ____:
def __init__(self) -> None:
self._config: Dict[str, Any] = {
"api_token": "any_api_token",
"domain": "airbyteio.atlassian.net",
"email": "integration-test@airbyte.io",
"start_date": "2021-01-01T00:00:00Z",
"projects": [],
}
def with_api_token(self, api_token: str) -> "ConfigBuilder":
self._config["api_token"] = api_token
return self
def with_domain(self, domain: str) -> "ConfigBuilder":
self._config["domain"] = domain
return self
def with_start_date(self, start_datetime: datetime) -> "ConfigBuilder":
self._config["start_date"] = start_datetime.strftime("%Y-%m-%dT%H:%M:%SZ")
return self
def with_projects(self, projects: List[str]) -> "ConfigBuilder":
self._config["projects"] = projects
return self
def build(self) -> Dict[str, Any]:
return self._config
|
ConfigBuilder
|
python
|
pallets__itsdangerous
|
src/itsdangerous/signer.py
|
{
"start": 1362,
"end": 2296
}
|
class ____(SigningAlgorithm):
"""Provides signature generation using HMACs."""
#: The digest method to use with the MAC algorithm. This defaults to
#: SHA1, but can be changed to any other function in the hashlib
#: module.
default_digest_method: t.Any = staticmethod(_lazy_sha1)
def __init__(self, digest_method: t.Any = None):
if digest_method is None:
digest_method = self.default_digest_method
self.digest_method: t.Any = digest_method
def get_signature(self, key: bytes, value: bytes) -> bytes:
mac = hmac.new(key, msg=value, digestmod=self.digest_method)
return mac.digest()
def _make_keys_list(
secret_key: str | bytes | cabc.Iterable[str] | cabc.Iterable[bytes],
) -> list[bytes]:
if isinstance(secret_key, (str, bytes)):
return [want_bytes(secret_key)]
return [want_bytes(s) for s in secret_key] # pyright: ignore
|
HMACAlgorithm
|
python
|
networkx__networkx
|
benchmarks/benchmarks/benchmark_classes.py
|
{
"start": 60,
"end": 1316
}
|
class ____:
params = ["Graph", "DiGraph", "MultiGraph", "MultiDiGraph"]
param_names = ["graph_type"]
def setup(self, graph_type):
self.nodes = list(range(1, 1000))
self.edges = []
self.subgraph_nodes = list(range(1, 100))
self.subgraph_nodes_large = list(range(1, 900))
self.G = getattr(nx, graph_type)()
def time_graph_create(self, graph_type):
_ = getattr(nx, graph_type)()
def time_add_nodes_from(self, graph_type):
self.G.add_nodes_from(self.nodes)
def time_add_edges_from(self, graph_type):
self.G.add_edges_from(self.edges)
def time_remove_nodes_from(self, graph_type):
self.G.remove_nodes_from(self.nodes)
def time_remove_edges_from(self, graph_type):
self.G.remove_edges_from(self.edges)
def time_copy(self, graph_type):
_ = self.G.copy()
def time_to_directed(self, graph_type):
_ = self.G.to_directed()
def time_to_undirected(self, graph_type):
_ = self.G.to_undirected()
def time_subgraph(self, graph_type):
_ = self.G.subgraph(self.subgraph_nodes).copy()
def time_subgraph_large(self, graph_type):
_ = self.G.subgraph(self.subgraph_nodes_large).copy()
|
GraphBenchmark
|
python
|
sympy__sympy
|
sympy/solvers/ode/single.py
|
{
"start": 85599,
"end": 89400
}
|
class ____(SingleODESolver):
r"""
Solves an `n`\th order linear homogeneous variable-coefficient
Cauchy-Euler equidimensional ordinary differential equation.
This is an equation with form `0 = a_0 f(x) + a_1 x f'(x) + a_2 x^2 f''(x)
\cdots`.
These equations can be solved in a general manner, by substituting
solutions of the form `f(x) = x^r`, and deriving a characteristic equation
for `r`. When there are repeated roots, we include extra terms of the
form `C_{r k} \ln^k(x) x^r`, where `C_{r k}` is an arbitrary integration
constant, `r` is a root of the characteristic equation, and `k` ranges
over the multiplicity of `r`. In the cases where the roots are complex,
solutions of the form `C_1 x^a \sin(b \log(x)) + C_2 x^a \cos(b \log(x))`
are returned, based on expansions with Euler's formula. The general
solution is the sum of the terms found. If SymPy cannot find exact roots
to the characteristic equation, a
:py:obj:`~.ComplexRootOf` instance will be returned
instead.
>>> from sympy import Function, dsolve
>>> from sympy.abc import x
>>> f = Function('f')
>>> dsolve(4*x**2*f(x).diff(x, 2) + f(x), f(x),
... hint='nth_linear_euler_eq_homogeneous')
... # doctest: +NORMALIZE_WHITESPACE
Eq(f(x), sqrt(x)*(C1 + C2*log(x)))
Note that because this method does not involve integration, there is no
``nth_linear_euler_eq_homogeneous_Integral`` hint.
The following is for internal use:
- ``returns = 'sol'`` returns the solution to the ODE.
- ``returns = 'list'`` returns a list of linearly independent solutions,
corresponding to the fundamental solution set, for use with non
homogeneous solution methods like variation of parameters and
undetermined coefficients. Note that, though the solutions should be
linearly independent, this function does not explicitly check that. You
can do ``assert simplify(wronskian(sollist)) != 0`` to check for linear
independence. Also, ``assert len(sollist) == order`` will need to pass.
- ``returns = 'both'``, return a dictionary ``{'sol': <solution to ODE>,
'list': <list of linearly independent solutions>}``.
Examples
========
>>> from sympy import Function, dsolve, pprint
>>> from sympy.abc import x
>>> f = Function('f')
>>> eq = f(x).diff(x, 2)*x**2 - 4*f(x).diff(x)*x + 6*f(x)
>>> pprint(dsolve(eq, f(x),
... hint='nth_linear_euler_eq_homogeneous'))
2
f(x) = x *(C1 + C2*x)
References
==========
- https://en.wikipedia.org/wiki/Cauchy%E2%80%93Euler_equation
- C. Bender & S. Orszag, "Advanced Mathematical Methods for Scientists and
Engineers", Springer 1999, pp. 12
# indirect doctest
"""
hint = "nth_linear_euler_eq_homogeneous"
has_integral = False
def _matches(self):
eq = self.ode_problem.eq_preprocessed
f = self.ode_problem.func.func
order = self.ode_problem.order
x = self.ode_problem.sym
match = self.ode_problem.get_linear_coefficients(eq, f(x), order)
self.r = None
does_match = False
if order and match:
coeff = match[order]
factor = x**order / coeff
self.r = {i: factor*match[i] for i in match}
if self.r and all(_test_term(self.r[i], f(x), i) for i in
self.r if i >= 0):
if not self.r[-1]:
does_match = True
return does_match
def _get_general_solution(self, *, simplify_flag: bool = True):
fx = self.ode_problem.func
eq = self.ode_problem.eq
homogen_sol = _get_euler_characteristic_eq_sols(eq, fx, self.r)[0]
return [homogen_sol]
|
NthLinearEulerEqHomogeneous
|
python
|
getsentry__sentry
|
src/sentry/integrations/api/endpoints/organization_integration_repos.py
|
{
"start": 895,
"end": 3537
}
|
class ____(RegionOrganizationIntegrationBaseEndpoint):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
owner = ApiOwner.ISSUES
def get(
self,
request: Request,
organization: Organization,
integration_id: int,
**kwds: Any,
) -> Response:
"""
Get the list of repositories available in an integration
````````````````````````````````````````````````````````
Gets all repositories that an integration makes available,
and indicates whether or not you can search repositories
by name.
:qparam string search: Name fragment to search repositories by.
:qparam bool installableOnly: If true, return only repositories that can be installed.
If false or not provided, return all repositories.
"""
integration = self.get_integration(organization.id, integration_id)
if integration.status == ObjectStatus.DISABLED:
return self.respond({"repos": []})
installed_repos = Repository.objects.filter(integration_id=integration.id).exclude(
status=ObjectStatus.HIDDEN
)
installed_repo_names = {installed_repo.name for installed_repo in installed_repos}
install = integration.get_installation(organization_id=organization.id)
if isinstance(install, RepositoryIntegration):
try:
repositories = install.get_repositories(request.GET.get("search"))
except (IntegrationError, IdentityNotValid) as e:
return self.respond({"detail": str(e)}, status=400)
installable_only = request.GET.get("installableOnly", "false").lower() == "true"
# Include a repository if the request is for all repositories, or if we want
# installable-only repositories and the repository isn't already installed
serialized_repositories = [
IntegrationRepository(
name=repo["name"],
identifier=repo["identifier"],
defaultBranch=repo.get("default_branch"),
isInstalled=repo["identifier"] in installed_repo_names,
)
for repo in repositories
if not installable_only or repo["identifier"] not in installed_repo_names
]
return self.respond(
{"repos": serialized_repositories, "searchable": install.repo_search}
)
return self.respond({"detail": "Repositories not supported"}, status=400)
|
OrganizationIntegrationReposEndpoint
|
python
|
doocs__leetcode
|
solution/0700-0799/0778.Swim in Rising Water/Solution.py
|
{
"start": 0,
"end": 780
}
|
class ____:
def swimInWater(self, grid: List[List[int]]) -> int:
def find(x: int) -> int:
if p[x] != x:
p[x] = find(p[x])
return p[x]
n = len(grid)
m = n * n
p = list(range(m))
hi = [0] * m
for i, row in enumerate(grid):
for j, h in enumerate(row):
hi[h] = i * n + j
dirs = (-1, 0, 1, 0, -1)
for t in range(m):
x, y = divmod(hi[t], n)
for dx, dy in pairwise(dirs):
nx, ny = x + dx, y + dy
if 0 <= nx < n and 0 <= ny < n and grid[nx][ny] <= t:
p[find(x * n + y)] = find(nx * n + ny)
if find(0) == find(m - 1):
return t
return 0
|
Solution
|
python
|
numba__numba
|
numba/core/typing/npdatetime.py
|
{
"start": 8119,
"end": 8205
}
|
class ____(DatetimeCmpOp):
key = operator.lt
@infer_global(operator.le)
|
DatetimeCmpLt
|
python
|
numpy__numpy
|
numpy/distutils/system_info.py
|
{
"start": 99450,
"end": 99530
}
|
class ____(_numpy_info):
section = 'numpy'
modulename = 'numpy'
|
numpy_info
|
python
|
apache__airflow
|
providers/microsoft/azure/src/airflow/providers/microsoft/azure/triggers/powerbi.py
|
{
"start": 11338,
"end": 13753
}
|
class ____(BasePowerBITrigger):
"""
Triggers a call to the API to request the available workspace IDs.
:param conn_id: The connection Id to connect to PowerBI.
:param timeout: The HTTP timeout being used by the `KiotaRequestAdapter`. Default is 1 week (60s * 60m * 24h * 7d).
When no timeout is specified or set to None then there is no HTTP timeout on each request.
:param proxies: A dict defining the HTTP proxies to be used (default is None).
:param api_version: The API version of the Microsoft Graph API to be used (default is v1).
You can pass an enum named APIVersion which has 2 possible members v1 and beta,
or you can pass a string as `v1.0` or `beta`.
"""
def __init__(
self,
conn_id: str,
workspace_ids: list[str] | None = None,
timeout: float = 60 * 60 * 24 * 7,
proxies: dict | None = None,
api_version: APIVersion | str | None = None,
):
super().__init__(conn_id=conn_id, timeout=timeout, proxies=proxies, api_version=api_version)
self.workspace_ids = workspace_ids
def serialize(self):
"""Serialize the trigger instance."""
return (
f"{self.__class__.__module__}.{self.__class__.__name__}",
{
"conn_id": self.conn_id,
"proxies": self.proxies,
"api_version": self.api_version,
"timeout": self.timeout,
"workspace_ids": self.workspace_ids,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]:
"""Make async connection to the PowerBI and polls for the list of workspace IDs."""
# Trigger the API to get the workspace list
workspace_ids = await self.hook.get_workspace_list()
if workspace_ids:
self.log.info("Triggered request to get workspace list.")
yield TriggerEvent(
{
"status": "success",
"message": "The workspace list get request has been successful.",
"workspace_ids": workspace_ids,
}
)
return
yield TriggerEvent(
{
"status": "error",
"message": "Error grabbing the workspace list.",
"workspace_ids": None,
}
)
return
|
PowerBIWorkspaceListTrigger
|
python
|
django__django
|
tests/admin_views/models.py
|
{
"start": 2281,
"end": 2554
}
|
class ____(models.Model):
name = models.CharField(max_length=100, verbose_name="¿Name?")
book = models.ForeignKey(Book, models.CASCADE)
author = models.ForeignKey(User, models.SET_NULL, blank=True, null=True)
def __str__(self):
return self.name
|
Promo
|
python
|
doocs__leetcode
|
solution/1500-1599/1572.Matrix Diagonal Sum/Solution.py
|
{
"start": 0,
"end": 246
}
|
class ____:
def diagonalSum(self, mat: List[List[int]]) -> int:
ans = 0
n = len(mat)
for i, row in enumerate(mat):
j = n - i - 1
ans += row[i] + (0 if j == i else row[j])
return ans
|
Solution
|
python
|
jupyterlab__jupyterlab
|
jupyterlab/extensions/manager.py
|
{
"start": 4957,
"end": 5814
}
|
class ____(PluginManagerOptions):
"""Extension manager options.
Attributes:
allowed_extensions_uris: A list of comma-separated URIs to get the allowed extensions list
blocked_extensions_uris: A list of comma-separated URIs to get the blocked extensions list
listings_refresh_seconds: The interval delay in seconds to refresh the lists
listings_tornado_options: The optional kwargs to use for the listings HTTP requests as described on https://www.tornadoweb.org/en/stable/httpclient.html#tornado.httpclient.HTTPRequest
"""
allowed_extensions_uris: set[str] = field(default_factory=set)
blocked_extensions_uris: set[str] = field(default_factory=set)
listings_refresh_seconds: int = 60 * 60
listings_tornado_options: dict = field(default_factory=dict)
@dataclass(frozen=True)
|
ExtensionManagerOptions
|
python
|
walkccc__LeetCode
|
solutions/3081. Replace Question Marks in String to Minimize Its Value/3081.py
|
{
"start": 0,
"end": 715
}
|
class ____:
def minimizeStringValue(self, s: str) -> str:
ans = []
count = collections.Counter(s)
letters = []
del count['?']
def getMinFreqLetter(count: dict[str, int]) -> str:
minFreqLetter = 'a'
for c in string.ascii_lowercase:
if count[c] < count[minFreqLetter]:
minFreqLetter = c
return minFreqLetter
for c in s:
if c == '?':
minFreqLetter = getMinFreqLetter(count)
letters.append(minFreqLetter)
count[minFreqLetter] += 1
letters.sort()
i = 0 # letters' index
for c in s:
if c == '?':
ans.append(letters[i])
i += 1
else:
ans.append(c)
return ''.join(ans)
|
Solution
|
python
|
pytorch__pytorch
|
benchmarks/operator_benchmark/pt/qarithmetic_test.py
|
{
"start": 1991,
"end": 2569
}
|
class ____(_QFunctionalBinaryArithmeticBenchmarkBase):
def init(self, N, dtype, contig, op_func):
super().setup(N, dtype, contig)
self.inputs = {"q_input": self.q_input_a, "scalar_input": 42}
self.op_func = op_func
def forward(self, q_input, scalar_input: int):
return self.op_func(q_input, scalar_input)
op_bench.generate_pt_tests_from_op_list(
qarithmetic_binary_scalar_ops,
qarithmetic_binary_configs,
QFunctionalScalarBenchmark,
)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
|
QFunctionalScalarBenchmark
|
python
|
paramiko__paramiko
|
tests/test_transport.py
|
{
"start": 46772,
"end": 54217
}
|
class ____:
def test_kex_algos_includes_kex_strict_c(self):
with server() as (tc, _):
kex = tc._get_latest_kex_init()
assert "kex-strict-c-v00@openssh.com" in kex["kex_algo_list"]
@mark.parametrize(
"server_active,client_active",
itertools.product([True, False], repeat=2),
)
def test_mode_agreement(self, server_active, client_active):
with server(
server_init=dict(strict_kex=server_active),
client_init=dict(strict_kex=client_active),
) as (tc, ts):
if server_active and client_active:
assert tc.agreed_on_strict_kex is True
assert ts.agreed_on_strict_kex is True
else:
assert tc.agreed_on_strict_kex is False
assert ts.agreed_on_strict_kex is False
def test_mode_advertised_by_default(self):
# NOTE: no explicit strict_kex overrides...
with server() as (tc, ts):
assert all(
(
tc.advertise_strict_kex,
tc.agreed_on_strict_kex,
ts.advertise_strict_kex,
ts.agreed_on_strict_kex,
)
)
@mark.parametrize(
"ptype",
(
# "normal" but definitely out-of-order message
MSG_CHANNEL_OPEN,
# Normally ignored, but not in this case
MSG_IGNORE,
# Normally triggers debug parsing, but not in this case
MSG_DEBUG,
# Normally ignored, but...you get the idea
MSG_UNIMPLEMENTED,
# Not real, so would normally trigger us /sending/
# MSG_UNIMPLEMENTED, but...
MSG_FUGGEDABOUTIT,
),
)
def test_MessageOrderError_non_kex_messages_in_initial_kex(self, ptype):
class AttackTransport(Transport):
# Easiest apparent spot on server side which is:
# - late enough for both ends to have handshook on strict mode
# - early enough to be in the window of opportunity for Terrapin
# attack; essentially during actual kex, when the engine is
# waiting for things like MSG_KEXECDH_REPLY (for eg curve25519).
def _negotiate_keys(self, m):
self.clear_to_send_lock.acquire()
try:
self.clear_to_send.clear()
finally:
self.clear_to_send_lock.release()
if self.local_kex_init is None:
# remote side wants to renegotiate
self._send_kex_init()
self._parse_kex_init(m)
# Here, we would normally kick over to kex_engine, but instead
# we want the server to send the OOO message.
m = Message()
m.add_byte(byte_chr(ptype))
# rest of packet unnecessary...
self._send_message(m)
with raises(MessageOrderError):
with server(server_transport_factory=AttackTransport) as (tc, _):
pass # above should run and except during connect()
def test_SSHException_raised_on_out_of_order_messages_when_not_strict(
self,
):
# This is kind of dumb (either situation is still fatal!) but whatever,
# may as well be strict with our new strict flag...
with raises(SSHException) as info: # would be true either way, but
with server(
client_init=dict(strict_kex=False),
) as (tc, _):
tc._expect_packet(MSG_KEXINIT)
tc.open_session()
assert info.type is SSHException # NOT MessageOrderError!
def test_error_not_raised_when_kexinit_not_seq_0_but_unstrict(self):
with server(
client_init=dict(
# Disable strict kex
strict_kex=False,
# Give our clientside a packetizer that sets all kexinit
# Message objects to have .seqno==17, which would trigger the
# new logic if we'd forgotten to wrap it in strict-kex check
packetizer_class=BadSeqPacketizer,
),
):
pass # kexinit happens at connect...
def test_MessageOrderError_raised_when_kexinit_not_seq_0_and_strict(self):
with raises(MessageOrderError):
with server(
# Give our clientside a packetizer that sets all kexinit
# Message objects to have .seqno==17, which should trigger the
# new logic (given we are NOT disabling strict-mode)
client_init=dict(packetizer_class=BadSeqPacketizer),
):
pass # kexinit happens at connect...
def test_sequence_numbers_reset_on_newkeys_when_strict(self):
with server(defer=True) as (tc, ts):
# When in strict mode, these should all be zero or close to it
# (post-kexinit, pre-auth).
# Server->client will be 1 (EXT_INFO got sent after NEWKEYS)
assert tc.packetizer._Packetizer__sequence_number_in == 1
assert ts.packetizer._Packetizer__sequence_number_out == 1
# Client->server will be 0
assert tc.packetizer._Packetizer__sequence_number_out == 0
assert ts.packetizer._Packetizer__sequence_number_in == 0
def test_sequence_numbers_not_reset_on_newkeys_when_not_strict(self):
with server(defer=True, client_init=dict(strict_kex=False)) as (
tc,
ts,
):
# When not in strict mode, these will all be ~3-4 or so
# (post-kexinit, pre-auth). Not encoding exact values as it will
# change anytime we mess with the test harness...
assert tc.packetizer._Packetizer__sequence_number_in != 0
assert tc.packetizer._Packetizer__sequence_number_out != 0
assert ts.packetizer._Packetizer__sequence_number_in != 0
assert ts.packetizer._Packetizer__sequence_number_out != 0
def test_sequence_number_rollover_detected(self):
class RolloverTransport(Transport):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Induce an about-to-rollover seqno, such that it rolls over
# during initial kex. (Sequence numbers are uint32, so we need
# the largest possible 32bit integer such that incrementing it
# will roll over to 0.)
last_seq = 2**32 - 1
setattr(
self.packetizer,
"_Packetizer__sequence_number_in",
last_seq,
)
setattr(
self.packetizer,
"_Packetizer__sequence_number_out",
last_seq,
)
with raises(
SSHException,
match=r"Sequence number rolled over during initial kex!",
):
with server(
client_init=dict(
# Disable strict kex - this should happen always
strict_kex=False,
),
# Transport which tickles its packetizer seqno's
transport_factory=RolloverTransport,
):
pass # kexinit happens at connect...
|
TestStrictKex
|
python
|
huggingface__transformers
|
tests/models/albert/test_modeling_albert.py
|
{
"start": 1411,
"end": 9516
}
|
class ____:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=32,
embedding_size=8,
hidden_size=16,
num_hidden_layers=2,
# this needs to be the same as `num_hidden_layers`!
num_hidden_groups=2,
num_attention_heads=4,
intermediate_size=20,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=8,
type_vocab_size=2,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_hidden_groups = num_hidden_groups
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def get_config(self):
return AlbertConfig(
vocab_size=self.vocab_size,
embedding_size=self.embedding_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
num_hidden_groups=self.num_hidden_groups,
inner_group_num=1,
)
def create_and_check_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = AlbertModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_for_pretraining(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = AlbertForPreTraining(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
labels=token_labels,
sentence_order_label=sequence_labels,
)
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.sop_logits.shape, (self.batch_size, config.num_labels))
def create_and_check_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = AlbertForMaskedLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = AlbertForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def create_and_check_for_sequence_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = AlbertForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_for_multiple_choice(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_choices = self.num_choices
model = AlbertForMultipleChoice(config=config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
result = model(
multiple_choice_inputs_ids,
attention_mask=multiple_choice_input_mask,
token_type_ids=multiple_choice_token_type_ids,
labels=choice_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
|
AlbertModelTester
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.