language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | python-attrs__attrs | tests/test_functional.py | {
"start": 367,
"end": 477
} | class ____:
x = attr.ib(validator=attr.validators.instance_of(int))
y = attr.ib()
@attr.s(slots=True)
| C1 |
python | getsentry__sentry | src/sentry/integrations/msteams/card_builder/block.py | {
"start": 1515,
"end": 1696
} | class ____(TypedDict):
type: Literal[ActionType.SHOW_CARD]
title: str
card: AdaptiveCard
Action: TypeAlias = OpenUrlAction | SubmitAction | ShowCardAction
| ShowCardAction |
python | keras-team__keras | keras/src/legacy/preprocessing/image.py | {
"start": 18813,
"end": 25291
} | class ____(Iterator):
"""Iterator yielding data from a Numpy array.
DEPRECATED.
"""
def __init__(
self,
x,
y,
image_data_generator,
batch_size=32,
shuffle=False,
sample_weight=None,
seed=None,
data_format=None,
save_to_dir=None,
save_prefix="",
save_format="png",
subset=None,
ignore_class_split=False,
dtype=None,
):
if data_format is None:
data_format = backend.image_data_format()
if dtype is None:
dtype = backend.floatx()
self.dtype = dtype
if isinstance(x, tuple) or isinstance(x, list):
if not isinstance(x[1], list):
x_misc = [np.asarray(x[1])]
else:
x_misc = [np.asarray(xx) for xx in x[1]]
x = x[0]
for xx in x_misc:
if len(x) != len(xx):
raise ValueError(
"All of the arrays in `x` "
"should have the same length. "
"Found a pair with: "
f"len(x[0]) = {len(x)}, len(x[?]) = {len(xx)}"
)
else:
x_misc = []
if y is not None and len(x) != len(y):
raise ValueError(
"`x` (images tensor) and `y` (labels) "
"should have the same length. "
f"Found: x.shape = {np.asarray(x).shape}, "
f"y.shape = {np.asarray(y).shape}"
)
if sample_weight is not None and len(x) != len(sample_weight):
raise ValueError(
"`x` (images tensor) and `sample_weight` "
"should have the same length. "
f"Found: x.shape = {np.asarray(x).shape}, "
f"sample_weight.shape = {np.asarray(sample_weight).shape}"
)
if subset is not None:
if subset not in {"training", "validation"}:
raise ValueError(
f"Invalid subset name: {subset}"
'; expected "training" or "validation".'
)
split_idx = int(len(x) * image_data_generator._validation_split)
if (
y is not None
and not ignore_class_split
and not np.array_equal(
np.unique(y[:split_idx]), np.unique(y[split_idx:])
)
):
raise ValueError(
"Training and validation subsets "
"have different number of classes after "
"the split. If your numpy arrays are "
"sorted by the label, you might want "
"to shuffle them."
)
if subset == "validation":
x = x[:split_idx]
x_misc = [np.asarray(xx[:split_idx]) for xx in x_misc]
if y is not None:
y = y[:split_idx]
else:
x = x[split_idx:]
x_misc = [np.asarray(xx[split_idx:]) for xx in x_misc]
if y is not None:
y = y[split_idx:]
self.x = np.asarray(x, dtype=self.dtype)
self.x_misc = x_misc
if self.x.ndim != 4:
raise ValueError(
"Input data in `NumpyArrayIterator` "
"should have rank 4. You passed an array "
f"with shape {self.x.shape}"
)
channels_axis = 3 if data_format == "channels_last" else 1
if self.x.shape[channels_axis] not in {1, 3, 4}:
warnings.warn(
f"NumpyArrayIterator is set to use the data format convention"
f' "{data_format}" (channels on axis {channels_axis})'
", i.e. expected either 1, 3, or 4 channels "
f"on axis {channels_axis}. "
f"However, it was passed an array with shape {self.x.shape}"
f" ({self.x.shape[channels_axis]} channels)."
)
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
if sample_weight is not None:
self.sample_weight = np.asarray(sample_weight)
else:
self.sample_weight = None
self.image_data_generator = image_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super().__init__(x.shape[0], batch_size, shuffle, seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(
tuple([len(index_array)] + list(self.x.shape)[1:]), dtype=self.dtype
)
for i, j in enumerate(index_array):
x = self.x[j]
params = self.image_data_generator.get_random_transform(x.shape)
x = self.image_data_generator.apply_transform(
x.astype(self.dtype), params
)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i, j in enumerate(index_array):
img = image_utils.array_to_img(
batch_x[i], self.data_format, scale=True
)
fname = "{prefix}_{index}_{hash}.{format}".format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e4),
format=self.save_format,
)
img.save(os.path.join(self.save_to_dir, fname))
batch_x_miscs = [xx[index_array] for xx in self.x_misc]
output = (batch_x if not batch_x_miscs else [batch_x] + batch_x_miscs,)
if self.y is None:
return output[0]
output += (self.y[index_array],)
if self.sample_weight is not None:
output += (self.sample_weight[index_array],)
return output
def validate_filename(filename, white_list_formats):
"""Check if a filename refers to a valid file.
Args:
filename: String, absolute path to a file
white_list_formats: Set, allowed file extensions
Returns:
A boolean value indicating if the filename is valid or not
"""
return filename.lower().endswith(white_list_formats) and os.path.isfile(
filename
)
| NumpyArrayIterator |
python | great-expectations__great_expectations | great_expectations/datasource/fluent/postgres_datasource.py | {
"start": 358,
"end": 1025
} | class ____(SQLDatasource):
"""Adds a postgres datasource to the data context.
Args:
name: The name of this postgres datasource.
connection_string: The SQLAlchemy connection string used to connect to the postgres database.
For example: "postgresql+psycopg2://postgres:@localhost/test_database"
assets: An optional dictionary whose keys are TableAsset or QueryAsset names and whose values
are TableAsset or QueryAsset objects.
""" # noqa: E501 # FIXME CoP
type: Literal["postgres"] = "postgres" # type: ignore[assignment] # FIXME CoP
connection_string: Union[ConfigStr, PostgresDsn]
| PostgresDatasource |
python | getsentry__sentry | fixtures/page_objects/issue_details.py | {
"start": 241,
"end": 5219
} | class ____(BasePage):
def __init__(self, browser, client):
super().__init__(browser)
self.client = client
self.global_selection = GlobalSelectionPage(browser)
def visit_issue(self, org, groupid):
self.browser.get(f"/organizations/{org}/issues/{groupid}/")
self.wait_until_loaded()
def visit_performance_issue(self, org, groupid):
"""
Trace view can take a long time to load, just wait until the trace evidence is loaded
"""
self.browser.get(f"/organizations/{org}/issues/{groupid}/")
self.browser.wait_until("#span-evidence")
def visit_issue_activity(self, org, groupid):
self.browser.get(f"/organizations/{org}/issues/{groupid}/activity/")
self.browser.wait_until_not('[data-test-id="loading-indicator"]')
def visit_issue_in_environment(self, org, groupid, environment):
self.browser.get(f"/organizations/{org}/issues/{groupid}/?environment={environment}")
self.browser.wait_until(".group-detail")
def visit_tag_values(self, org, groupid, tag):
self.browser.get(f"/organizations/{org}/issues/{groupid}/distributions/{tag}/")
self.browser.wait_until('[data-test-id="group-tag-value"]')
def get_environment(self):
return self.browser.find_element(
by=By.CSS_SELECTOR, value='[data-test-id="env-label"'
).text.lower()
def go_back_to_issues(self):
self.global_selection.go_back_to_issues()
def api_issue_get(self, groupid):
return self.client.get(f"/api/0/issues/{groupid}/")
def go_to_subtab(self, key):
tabs = self.browser.find_element(by=By.CSS_SELECTOR, value='[role="tablist"]')
tabs.find_element(by=By.CSS_SELECTOR, value=f'[role="tab"][data-key="{key}"]').click()
self.browser.wait_until_not('[data-test-id="loading-indicator"]')
def open_issue_errors(self):
self.browser.click(".errors-toggle")
self.browser.wait_until(".entries > .errors ul")
def open_curl(self):
self.browser.find_element(by=By.XPATH, value="//a//code[contains(text(), 'curl')]").click()
def resolve_issue(self):
self.browser.click('[aria-label="Resolve"]')
# Resolve should become unresolve
self.browser.wait_until('[aria-label="Resolved"]')
def archive_issue(self):
self.browser.click('[aria-label="Archive"]')
# Ignore should become unresolve
self.browser.wait_until('[aria-label="Archived"]')
def bookmark_issue(self):
self.browser.click('button[aria-label="More Actions"]')
self.browser.wait_until('[data-test-id="bookmark"]')
button = self.browser.element('[data-test-id="bookmark"]')
button.click()
self.browser.click('button[aria-label="More Actions"]')
self.browser.wait_until('[data-test-id="unbookmark"]')
def assign_to(self, user):
assignee = self.browser.find_element(
by=By.CSS_SELECTOR, value='[data-test-id="assigned-to"]'
)
# Open the assignee picker
assignee.find_element(
by=By.CSS_SELECTOR, value='[data-test-id="assignee-selector"]'
).click()
# Wait for the input to be loaded
wait = WebDriverWait(assignee, 10)
wait.until(expected_conditions.presence_of_element_located((By.TAG_NAME, "input")))
assignee.find_element(by=By.TAG_NAME, value="input").send_keys(user)
# Click the member/team
options = assignee.find_elements(by=By.CSS_SELECTOR, value='[role="option"]')
assert len(options) > 0, "No assignees could be found."
options[0].click()
self.browser.wait_until_not('[data-test-id="loading-indicator"]')
def find_comment_form(self):
self.browser.wait_until_test_id("note-input-form")
return self.browser.find_element(
by=By.CSS_SELECTOR, value='[data-test-id="note-input-form"]'
)
def has_comment(self, text):
element = self.browser.element('[data-test-id="activity-note-body"]')
return text in element.text
def wait_until_loaded(self):
self.browser.wait_until_not('[data-test-id="loading-indicator"]')
self.browser.wait_until_not('[data-test-id="event-errors-loading"]')
self.browser.wait_until_test_id("linked-issues")
self.browser.wait_until_test_id("loaded-device-name")
if self.browser.element_exists("#grouping-info"):
self.browser.wait_until_test_id("loaded-grouping-info")
self.browser.wait_until_not('[data-test-id="loading-placeholder"]')
def mark_reviewed(self):
self.browser.click('[aria-label="More Actions"]')
self.browser.wait_until('[data-test-id="mark-review"]')
self.browser.click('[data-test-id="mark-review"]')
self.browser.click('[aria-label="More Actions"]')
self.browser.wait_until('[data-test-id="mark-review"][aria-disabled="true"]')
| IssueDetailsPage |
python | google__jax | jax/_src/typing.py | {
"start": 1465,
"end": 1554
} | class ____(Protocol):
@property
def shape(self, /) -> tuple[int, ...]: ...
| SupportsShape |
python | google__jax | jax/_src/errors.py | {
"start": 11256,
"end": 14296
} | class ____(JAXTypeError):
"""
This error can occur when a JAX Tracer object is used in a context where a
Python integer is expected (see :ref:`faq-different-kinds-of-jax-values` for
more on what a Tracer is). It typically occurs in a few situations.
Passing a tracer in place of an integer
This error can occur if you attempt to pass a traced value to a function
that requires a static integer argument; for example::
>>> from jax import jit
>>> import numpy as np
>>> @jit
... def func(x, axis):
... return np.split(x, 2, axis)
>>> func(np.arange(4), 0) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TracerIntegerConversionError: The __index__() method was called on
traced array with shape int32[0]
When this happens, the solution is often to mark the problematic argument as
static::
>>> from functools import partial
>>> @partial(jit, static_argnums=1)
... def func(x, axis):
... return np.split(x, 2, axis)
>>> func(np.arange(10), 0)
[Array([0, 1, 2, 3, 4], dtype=int32),
Array([5, 6, 7, 8, 9], dtype=int32)]
An alternative is to apply the transformation to a closure that encapsulates
the arguments to be protected, either manually as below or by using
:func:`functools.partial`::
>>> jit(lambda arr: np.split(arr, 2, 0))(np.arange(4))
[Array([0, 1], dtype=int32), Array([2, 3], dtype=int32)]
**Note a new closure is created at every invocation, which defeats the
compilation caching mechanism, which is why static_argnums is preferred.**
Indexing a list with a Tracer
This error can occur if you attempt to index a Python list with a traced
quantity.
For example::
>>> import jax.numpy as jnp
>>> from jax import jit
>>> L = [1, 2, 3]
>>> @jit
... def func(i):
... return L[i]
>>> func(0) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TracerIntegerConversionError: The __index__() method was called on
traced array with shape int32[0]
Depending on the context, you can generally fix this either by converting
the list to a JAX array::
>>> @jit
... def func(i):
... return jnp.array(L)[i]
>>> func(0)
Array(1, dtype=int32)
or by declaring the index as a static argument::
>>> from functools import partial
>>> @partial(jit, static_argnums=0)
... def func(i):
... return L[i]
>>> func(0)
Array(1, dtype=int32, weak_type=True)
To understand more subtleties having to do with tracers vs. regular values,
and concrete vs. abstract values, you may want to read
:ref:`faq-different-kinds-of-jax-values`.
"""
def __init__(self, tracer: core.Tracer):
super().__init__(
f"The __index__() method was called on {tracer._error_repr()}"
f"{tracer._origin_msg()}")
@export
| TracerIntegerConversionError |
python | dagster-io__dagster | python_modules/libraries/dagster-shared/dagster_shared/error.py | {
"start": 1085,
"end": 7158
} | class ____(LegacyNamedTupleMixin):
# serdes log
# * added cause - default to None in constructor to allow loading old entries
# * added context - default to None for similar reasons
message: str
stack: Sequence[str]
cls_name: Optional[str]
cause: Optional["SerializableErrorInfo"] = None
context: Optional["SerializableErrorInfo"] = None
def __str__(self) -> str:
return self.to_string()
def to_string(self) -> str:
stack_str = "\nStack Trace:\n" + "".join(self.stack) if self.stack else ""
cause_str = (
"\nThe above exception was caused by the following exception:\n"
+ self.cause.to_string()
if self.cause
else ""
)
context_str = (
"\nThe above exception occurred during handling of the following exception:\n"
+ self.context.to_string()
if self.context
else ""
)
return f"{self.message}{stack_str}{cause_str}{context_str}"
def to_exception_message_only(self) -> "SerializableErrorInfo":
"""Return a new SerializableErrorInfo with only the message and cause set.
This is done in cases when the context about the error should not be exposed to the user.
"""
return SerializableErrorInfo(message=self.message, stack=[], cls_name=self.cls_name)
@classmethod
def from_traceback(cls, tb: traceback.TracebackException) -> Self:
if sys.version_info >= (3, 13):
name = tb.exc_type_str.split(".")[-1]
else:
name = tb.exc_type.__name__ if tb.exc_type is not None else None
return cls(
# usually one entry, multiple lines for SyntaxError
message="".join(list(tb.format_exception_only())),
stack=tb.stack.format(),
cls_name=name,
cause=cls.from_traceback(tb.__cause__) if tb.__cause__ else None,
context=cls.from_traceback(tb.__context__)
if tb.__context__ and not tb.__suppress_context__
else None,
)
DAGSTER_FRAMEWORK_SUBSTRINGS = [
os.sep + os.path.join("site-packages", "dagster"),
os.sep + os.path.join("python_modules", "dagster"),
os.sep + os.path.join("python_modules", "libraries", "dagster"),
]
IMPORT_MACHINERY_SUBSTRINGS = [
os.path.join("importlib", "__init__.py"),
os.path.join("importlib", "metadata", "__init__.py"),
"importlib._bootstrap",
]
NO_HINT = lambda _, __: None
def remove_system_frames_from_error(
error_info: SerializableErrorInfo,
build_system_frame_removed_hint: Callable[[bool, int], Optional[str]] = NO_HINT,
) -> SerializableErrorInfo:
"""Remove system frames from a SerializableErrorInfo, including Dagster framework boilerplate
and import machinery, which are generally not useful for users to debug their code.
"""
return remove_matching_lines_from_error_info(
error_info,
DAGSTER_FRAMEWORK_SUBSTRINGS + IMPORT_MACHINERY_SUBSTRINGS,
build_system_frame_removed_hint,
)
def make_simple_frames_removed_hint(
additional_first_hint_warning: Optional[str] = None,
) -> Callable[[bool, int], Optional[str]]:
def frames_removed_hint(is_first_hidden_frame: bool, num_hidden_frames: int) -> Optional[str]:
base_hint = f"{num_hidden_frames} dagster system frames hidden"
if is_first_hidden_frame and additional_first_hint_warning:
return f" [{base_hint}, {additional_first_hint_warning}]\n"
else:
return f" [{base_hint}]\n"
return frames_removed_hint
def remove_matching_lines_from_error_info(
error_info: SerializableErrorInfo,
match_substrs: Sequence[str],
build_system_frame_removed_hint: Callable[[bool, int], Optional[str]],
) -> SerializableErrorInfo:
"""Utility which truncates a stacktrace to drop lines which match the given strings.
This is useful for e.g. removing Dagster framework lines from a stacktrace that
involves user code.
Args:
error_info (SerializableErrorInfo): The error info to truncate
matching_lines (Sequence[str]): The lines to truncate from the stacktrace
Returns:
SerializableErrorInfo: A new error info with the stacktrace truncated
"""
return error_info._replace(
stack=remove_matching_lines_from_stack_trace(
error_info.stack, match_substrs, build_system_frame_removed_hint
),
cause=(
remove_matching_lines_from_error_info(
error_info.cause, match_substrs, build_system_frame_removed_hint
)
if error_info.cause
else None
),
context=(
remove_matching_lines_from_error_info(
error_info.context, match_substrs, build_system_frame_removed_hint
)
if error_info.context
else None
),
)
def remove_matching_lines_from_stack_trace(
stack: Sequence[str],
matching_lines: Sequence[str],
build_system_frame_removed_hint: Callable[[bool, int], Optional[str]],
) -> Sequence[str]:
ctr = 0
out = []
is_first_hidden_frame = True
for i in range(len(stack)):
if not _line_contains_matching_string(stack[i], matching_lines):
if ctr > 0:
hint = build_system_frame_removed_hint(is_first_hidden_frame, ctr)
is_first_hidden_frame = False
if hint:
out.append(hint)
ctr = 0
out.append(stack[i])
else:
ctr += 1
if ctr > 0:
hint = build_system_frame_removed_hint(is_first_hidden_frame, ctr)
if hint:
out.append(hint)
return out
def _line_contains_matching_string(line: str, matching_strings: Sequence[str]):
split_by_comma = line.split(",")
if not split_by_comma:
return False
file_portion = split_by_comma[0]
return any(framework_substring in file_portion for framework_substring in matching_strings)
| SerializableErrorInfo |
python | python-excel__xlwt | xlwt/antlr.py | {
"start": 52917,
"end": 53373
} | class ____(object):
def __init__(self):
self.input = None
self.reset()
def reset(self):
self.guessing = 0
self.filename = None
if self.input:
self.input.reset()
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### Parser ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
| ParserSharedInputState |
python | walkccc__LeetCode | solutions/499. The Maze III/499.py | {
"start": 0,
"end": 1149
} | class ____:
def findShortestWay(
self,
maze: list[list[int]],
ball: list[int],
hole: list[int],
) -> str:
ans = 'impossible'
minSteps = math.inf
def dfs(i: int, j: int, dx: int, dy: int, steps: int, path: str):
nonlocal ans
nonlocal minSteps
if steps >= minSteps:
return
if dx != 0 or dy != 0: # Both are zeros for the initial ball position.
while (0 <= i + dx < len(maze) and 0 <= j + dy < len(maze[0]) and
maze[i + dx][j + dy] != 1):
i += dx
j += dy
steps += 1
if i == hole[0] and j == hole[1] and steps < minSteps:
minSteps = steps
ans = path
if maze[i][j] == 0 or steps + 2 < maze[i][j]:
maze[i][j] = steps + 2 # +2 because maze[i][j] == 0 || 1.
if dx == 0:
dfs(i, j, 1, 0, steps, path + 'd')
if dy == 0:
dfs(i, j, 0, -1, steps, path + 'l')
if dy == 0:
dfs(i, j, 0, 1, steps, path + 'r')
if dx == 0:
dfs(i, j, -1, 0, steps, path + 'u')
dfs(ball[0], ball[1], 0, 0, 0, '')
return ans
| Solution |
python | mkdocs__mkdocs | mkdocs/contrib/search/search_index.py | {
"start": 5251,
"end": 5804
} | class ____:
"""
Used by the ContentParser class to capture the information we
need when it is parsing the HTML.
"""
def __init__(
self,
text: list[str] | None = None,
id_: str | None = None,
title: str | None = None,
) -> None:
self.text = text or []
self.id = id_
self.title = title
def __eq__(self, other):
return self.text == other.text and self.id == other.id and self.title == other.title
_HEADER_TAGS = tuple(f"h{x}" for x in range(1, 7))
| ContentSection |
python | Lightning-AI__lightning | tests/tests_pytorch/strategies/launchers/test_subprocess_script.py | {
"start": 758,
"end": 3652
} | class ____(BoringModel):
def on_train_start(self) -> None:
# make sure that the model is on GPU when training
assert self.device == torch.device(f"cuda:{self.trainer.strategy.local_rank}")
@hydra.main(config_path=None, version_base="1.1")
def task_fn(cfg):
trainer = Trainer(accelerator="auto", devices=cfg.devices, strategy=cfg.strategy, fast_dev_run=True)
model = BoringModelGPU()
trainer.fit(model)
trainer.test(model)
if _distributed_is_initialized():
torch.distributed.destroy_process_group()
os.environ.pop("LOCAL_RANK", None)
if __name__ == "__main__":
task_fn()
"""
@RunIf(min_cuda_gpus=2, skip_windows=True, standalone=True)
@pytest.mark.skipif(not _HYDRA_WITH_RUN_PROCESS, reason=str(_HYDRA_WITH_RUN_PROCESS))
@pytest.mark.parametrize("subdir", [None, "null", "dksa", ".hello"])
def test_ddp_with_hydra_runjob(subdir, tmp_path, monkeypatch):
monkeypatch.chdir(tmp_path)
# Save script locally
with open("temp.py", "w") as fn:
fn.write(script)
# Run CLI
devices = 2
run_dir = tmp_path / "hydra_output"
cmd = [sys.executable, "temp.py", f"+devices={devices}", '+strategy="ddp"', f"hydra.run.dir={run_dir}"]
if subdir is not None:
cmd += [f"hydra.output_subdir={subdir}"]
run_process(cmd)
# Make sure no config.yaml was created for additional processes
saved_confs = list(run_dir.glob("**/config.yaml"))
assert len(saved_confs) == (0 if subdir == "null" else 1) # Main process has config.yaml iff subdir!="null"
if saved_confs: # Make sure the parameter was set and used
cfg = OmegaConf.load(saved_confs[0])
assert cfg.devices == devices
# Make sure PL spawned jobs that are logged by Hydra
logs = list(run_dir.glob("**/*.log"))
assert len(logs) == devices
@mock.patch("lightning.fabric.strategies.launchers.subprocess_script._ChildProcessObserver")
def test_kill(_):
launcher = _SubprocessScriptLauncher(Mock(), 1, 1)
proc0 = Mock(autospec=subprocess.Popen)
proc1 = Mock(autospec=subprocess.Popen)
launcher.procs = [proc0, proc1]
launcher.kill(15)
proc0.send_signal.assert_called_once_with(15)
proc1.send_signal.assert_called_once_with(15)
@mock.patch("lightning.fabric.strategies.launchers.subprocess_script.subprocess.Popen")
@mock.patch("lightning.fabric.strategies.launchers.subprocess_script._ChildProcessObserver")
def test_validate_cluster_environment_user_settings(*_):
"""Test that the launcher calls into the cluster environment to validate the user settings."""
cluster_env = Mock(validate_settings=Mock(side_effect=RuntimeError("test")))
cluster_env.creates_processes_externally = True
launcher = _SubprocessScriptLauncher(cluster_env, num_processes=2, num_nodes=1)
with pytest.raises(RuntimeError, match="test"):
launcher.launch(Mock())
| BoringModelGPU |
python | django-import-export__django-import-export | tests/core/migrations/0018_author_resource.py | {
"start": 90,
"end": 406
} | class ____(migrations.Migration):
dependencies = [
("core", "0017_namedauthor_uuidbook_author"),
]
operations = [
migrations.AddField(
model_name="author",
name="resource",
field=models.SmallIntegerField(blank=True, null=True),
),
]
| Migration |
python | sympy__sympy | sympy/polys/agca/ideals.py | {
"start": 156,
"end": 7558
} | class ____(IntegerPowerable):
"""
Abstract base class for ideals.
Do not instantiate - use explicit constructors in the ring class instead:
>>> from sympy import QQ
>>> from sympy.abc import x
>>> QQ.old_poly_ring(x).ideal(x+1)
<x + 1>
Attributes
- ring - the ring this ideal belongs to
Non-implemented methods:
- _contains_elem
- _contains_ideal
- _quotient
- _intersect
- _union
- _product
- is_whole_ring
- is_zero
- is_prime, is_maximal, is_primary, is_radical
- is_principal
- height, depth
- radical
Methods that likely should be overridden in subclasses:
- reduce_element
"""
def _contains_elem(self, x):
"""Implementation of element containment."""
raise NotImplementedError
def _contains_ideal(self, I):
"""Implementation of ideal containment."""
raise NotImplementedError
def _quotient(self, J):
"""Implementation of ideal quotient."""
raise NotImplementedError
def _intersect(self, J):
"""Implementation of ideal intersection."""
raise NotImplementedError
def is_whole_ring(self):
"""Return True if ``self`` is the whole ring."""
raise NotImplementedError
def is_zero(self):
"""Return True if ``self`` is the zero ideal."""
raise NotImplementedError
def _equals(self, J):
"""Implementation of ideal equality."""
return self._contains_ideal(J) and J._contains_ideal(self)
def is_prime(self):
"""Return True if ``self`` is a prime ideal."""
raise NotImplementedError
def is_maximal(self):
"""Return True if ``self`` is a maximal ideal."""
raise NotImplementedError
def is_radical(self):
"""Return True if ``self`` is a radical ideal."""
raise NotImplementedError
def is_primary(self):
"""Return True if ``self`` is a primary ideal."""
raise NotImplementedError
def is_principal(self):
"""Return True if ``self`` is a principal ideal."""
raise NotImplementedError
def radical(self):
"""Compute the radical of ``self``."""
raise NotImplementedError
def depth(self):
"""Compute the depth of ``self``."""
raise NotImplementedError
def height(self):
"""Compute the height of ``self``."""
raise NotImplementedError
# TODO more
# non-implemented methods end here
def __init__(self, ring):
self.ring = ring
def _check_ideal(self, J):
"""Helper to check ``J`` is an ideal of our ring."""
if not isinstance(J, Ideal) or J.ring != self.ring:
raise ValueError(
'J must be an ideal of %s, got %s' % (self.ring, J))
def contains(self, elem):
"""
Return True if ``elem`` is an element of this ideal.
Examples
========
>>> from sympy.abc import x
>>> from sympy import QQ
>>> QQ.old_poly_ring(x).ideal(x+1, x-1).contains(3)
True
>>> QQ.old_poly_ring(x).ideal(x**2, x**3).contains(x)
False
"""
return self._contains_elem(self.ring.convert(elem))
def subset(self, other):
"""
Returns True if ``other`` is is a subset of ``self``.
Here ``other`` may be an ideal.
Examples
========
>>> from sympy.abc import x
>>> from sympy import QQ
>>> I = QQ.old_poly_ring(x).ideal(x+1)
>>> I.subset([x**2 - 1, x**2 + 2*x + 1])
True
>>> I.subset([x**2 + 1, x + 1])
False
>>> I.subset(QQ.old_poly_ring(x).ideal(x**2 - 1))
True
"""
if isinstance(other, Ideal):
return self._contains_ideal(other)
return all(self._contains_elem(x) for x in other)
def quotient(self, J, **opts):
r"""
Compute the ideal quotient of ``self`` by ``J``.
That is, if ``self`` is the ideal `I`, compute the set
`I : J = \{x \in R | xJ \subset I \}`.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy import QQ
>>> R = QQ.old_poly_ring(x, y)
>>> R.ideal(x*y).quotient(R.ideal(x))
<y>
"""
self._check_ideal(J)
return self._quotient(J, **opts)
def intersect(self, J):
"""
Compute the intersection of self with ideal J.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy import QQ
>>> R = QQ.old_poly_ring(x, y)
>>> R.ideal(x).intersect(R.ideal(y))
<x*y>
"""
self._check_ideal(J)
return self._intersect(J)
def saturate(self, J):
r"""
Compute the ideal saturation of ``self`` by ``J``.
That is, if ``self`` is the ideal `I`, compute the set
`I : J^\infty = \{x \in R | xJ^n \subset I \text{ for some } n\}`.
"""
raise NotImplementedError
# Note this can be implemented using repeated quotient
def union(self, J):
"""
Compute the ideal generated by the union of ``self`` and ``J``.
Examples
========
>>> from sympy.abc import x
>>> from sympy import QQ
>>> QQ.old_poly_ring(x).ideal(x**2 - 1).union(QQ.old_poly_ring(x).ideal((x+1)**2)) == QQ.old_poly_ring(x).ideal(x+1)
True
"""
self._check_ideal(J)
return self._union(J)
def product(self, J):
r"""
Compute the ideal product of ``self`` and ``J``.
That is, compute the ideal generated by products `xy`, for `x` an element
of ``self`` and `y \in J`.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy import QQ
>>> QQ.old_poly_ring(x, y).ideal(x).product(QQ.old_poly_ring(x, y).ideal(y))
<x*y>
"""
self._check_ideal(J)
return self._product(J)
def reduce_element(self, x):
"""
Reduce the element ``x`` of our ring modulo the ideal ``self``.
Here "reduce" has no specific meaning: it could return a unique normal
form, simplify the expression a bit, or just do nothing.
"""
return x
def __add__(self, e):
if not isinstance(e, Ideal):
R = self.ring.quotient_ring(self)
if isinstance(e, R.dtype):
return e
if isinstance(e, R.ring.dtype):
return R(e)
return R.convert(e)
self._check_ideal(e)
return self.union(e)
__radd__ = __add__
def __mul__(self, e):
if not isinstance(e, Ideal):
try:
e = self.ring.ideal(e)
except CoercionFailed:
return NotImplemented
self._check_ideal(e)
return self.product(e)
__rmul__ = __mul__
def _zeroth_power(self):
return self.ring.ideal(1)
def _first_power(self):
# Raising to any power but 1 returns a new instance. So we mult by 1
# here so that the first power is no exception.
return self * 1
def __eq__(self, e):
if not isinstance(e, Ideal) or e.ring != self.ring:
return False
return self._equals(e)
def __ne__(self, e):
return not (self == e)
| Ideal |
python | numba__llvmlite | llvmlite/ir/instructions.py | {
"start": 18093,
"end": 18832
} | class ____(Instruction):
def __init__(self, parent, typ, count, name):
operands = [count] if count else ()
super(AllocaInstr, self).__init__(parent, typ.as_pointer(), "alloca",
operands, name)
self.allocated_type = typ
self.align = None
def descr(self, buf):
buf.append("{0} {1}".format(self.opname, self.allocated_type))
if self.operands:
op, = self.operands
buf.append(", {0} {1}".format(op.type, op.get_reference()))
if self.align is not None:
buf.append(", align {0}".format(self.align))
if self.metadata:
buf.append(self._stringify_metadata(leading_comma=True))
| AllocaInstr |
python | django__django | tests/admin_utils/models.py | {
"start": 1800,
"end": 1890
} | class ____(models.Model):
event = models.ForeignKey(Event, models.DO_NOTHING)
| EventGuide |
python | sanic-org__sanic | sanic/cli/inspector_client.py | {
"start": 527,
"end": 3792
} | class ____:
def __init__(
self,
host: str,
port: int,
secure: bool,
raw: bool,
api_key: Optional[str],
) -> None:
self.scheme = "https" if secure else "http"
self.host = host
self.port = port
self.raw = raw
self.api_key = api_key
for scheme in ("http", "https"):
full = f"{scheme}://"
if self.host.startswith(full):
self.scheme = scheme
self.host = self.host[len(full) :] # noqa E203
def do(self, action: str, **kwargs: Any) -> None:
if action == "info":
self.info()
return
result = self.request(action, **kwargs).get("result")
if result:
out = (
dumps(result)
if isinstance(result, (list, dict))
else str(result)
)
sys.stdout.write(out + "\n")
def info(self) -> None:
out = sys.stdout.write
response = self.request("", "GET")
if self.raw or not response:
return
data = response["result"]
display = data.pop("info")
extra = display.pop("extra", {})
display["packages"] = ", ".join(display["packages"])
MOTDTTY(get_logo(), self.base_url, display, extra).display(
version=False,
action="Inspecting",
out=out,
)
for name, info in data["workers"].items():
info = "\n".join(
f"\t{key}: {Colors.BLUE}{value}{Colors.END}"
for key, value in info.items()
)
out(
"\n"
+ indent(
"\n".join(
[
f"{Colors.BOLD}{Colors.SANIC}{name}{Colors.END}",
info,
]
),
" ",
)
+ "\n"
)
def request(self, action: str, method: str = "POST", **kwargs: Any) -> Any:
url = f"{self.base_url}/{action}"
params: dict[str, Any] = {"method": method, "headers": {}}
if kwargs:
params["data"] = dumps(kwargs).encode()
params["headers"]["content-type"] = "application/json"
if self.api_key:
params["headers"]["authorization"] = f"Bearer {self.api_key}"
request = URequest(url, **params)
try:
with urlopen(request) as response: # nosec B310
raw = response.read()
loaded = loads(raw)
if self.raw:
sys.stdout.write(dumps(loaded.get("result")) + "\n")
return {}
return loaded
except (URLError, RemoteDisconnected) as e:
sys.stderr.write(
f"{Colors.RED}Could not connect to inspector at: "
f"{Colors.YELLOW}{self.base_url}{Colors.END}\n"
"Either the application is not running, or it did not start "
f"an inspector instance.\n{e}\n"
)
sys.exit(1)
@property
def base_url(self):
return f"{self.scheme}://{self.host}:{self.port}"
| InspectorClient |
python | ray-project__ray | doc/source/serve/doc_code/model_composition/language_example.py | {
"start": 972,
"end": 1670
} | class ____:
def __init__(
self, spanish_responder: DeploymentHandle, french_responder: DeploymentHandle
):
self.spanish_responder = spanish_responder
self.french_responder = french_responder
async def __call__(self, http_request):
request = await http_request.json()
language, name = request["language"], request["name"]
if language == "spanish":
response = self.spanish_responder.say_hello.remote(name)
elif language == "french":
response = self.french_responder.say_hello.remote(name)
else:
return "Please try again."
return await response
@serve.deployment
| LanguageClassifer |
python | django__django | django/db/models/lookups.py | {
"start": 22430,
"end": 22515
} | class ____(PatternLookup):
lookup_name = "contains"
@Field.register_lookup
| Contains |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/base.py | {
"start": 5021,
"end": 5389
} | class ____(Protocol):
@util.ro_non_memoized_property
def entity_namespace(self) -> _EntityNamespace: ...
def _is_has_entity_namespace(element: Any) -> TypeGuard[_HasEntityNamespace]:
return hasattr(element, "entity_namespace")
# Remove when https://github.com/python/mypy/issues/14640 will be fixed
_Self = TypeVar("_Self", bound=Any)
| _HasEntityNamespace |
python | qdrant__qdrant-client | tools/async_client_generator/local_generator.py | {
"start": 525,
"end": 3075
} | class ____(BaseGenerator):
def __init__(
self,
keep_sync: Optional[list[str]] = None,
class_replace_map: Optional[dict] = None,
import_replace_map: Optional[dict] = None,
exclude_methods: Optional[list[str]] = None,
):
super().__init__()
self._async_methods: Optional[list[str]] = None
self.transformers.append(ImportFromTransformer(import_replace_map=import_replace_map))
self.transformers.append(ClassDefTransformer(class_replace_map=class_replace_map))
self.transformers.append(
LocalCallTransformer(
class_replace_map=class_replace_map, async_methods=self.async_methods
)
)
self.transformers.append(ImportTransformer(import_replace_map=import_replace_map))
self.transformers.append(
ClientFunctionDefTransformer(
keep_sync=keep_sync,
exclude_methods=exclude_methods,
async_methods=self.async_methods,
)
)
self.transformers.append(
NameTransformer(
class_replace_map=class_replace_map,
import_replace_map=import_replace_map,
)
)
@property
def async_methods(self) -> list[str]:
if self._async_methods is None:
self._async_methods = self.get_async_methods(AsyncQdrantBase)
return self._async_methods
@staticmethod
def get_async_methods(class_obj: type) -> list[str]:
async_methods = []
for name, method in inspect.getmembers(class_obj):
if inspect.iscoroutinefunction(method):
async_methods.append(name)
return async_methods
if __name__ == "__main__":
from tools.async_client_generator.config import CLIENT_DIR, CODE_DIR
with open(CLIENT_DIR / "local" / "qdrant_local.py", "r") as source_file:
code = source_file.read()
generator = LocalGenerator(
class_replace_map={
"QdrantBase": "AsyncQdrantBase",
"QdrantLocal": "AsyncQdrantLocal",
},
import_replace_map={
"qdrant_client.client_base": "qdrant_client.async_client_base",
"QdrantBase": "AsyncQdrantBase",
"QdrantLocal": "AsyncQdrantLocal",
},
exclude_methods=[
"migrate",
],
)
modified_code = generator.generate(code)
with open(CODE_DIR / "async_qdrant_local.py", "w") as target_file:
target_file.write(modified_code)
| LocalGenerator |
python | doocs__leetcode | solution/3100-3199/3105.Longest Strictly Increasing or Strictly Decreasing Subarray/Solution.py | {
"start": 0,
"end": 465
} | class ____:
def longestMonotonicSubarray(self, nums: List[int]) -> int:
ans = t = 1
for i, x in enumerate(nums[1:]):
if nums[i] < x:
t += 1
ans = max(ans, t)
else:
t = 1
t = 1
for i, x in enumerate(nums[1:]):
if nums[i] > x:
t += 1
ans = max(ans, t)
else:
t = 1
return ans
| Solution |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_dataplex.py | {
"start": 43922,
"end": 44947
} | class ____:
@mock.patch(ENTRY_STR)
@mock.patch(HOOK_STR)
def test_execute(self, hook_mock, entry_mock):
op = DataplexCatalogDeleteEntryOperator(
project_id=PROJECT_ID,
location=REGION,
entry_id=ENTRY_NAME,
entry_group_id=ENTRY_GROUP_NAME,
task_id="delete_task",
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
op.execute(context=mock.MagicMock())
entry_mock.return_value.to_dict.return_value = None
hook_mock.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
hook_mock.return_value.delete_entry.assert_called_once_with(
project_id=PROJECT_ID,
location=REGION,
entry_id=ENTRY_NAME,
entry_group_id=ENTRY_GROUP_NAME,
retry=DEFAULT,
timeout=None,
metadata=(),
)
| TestDataplexCatalogDeleteEntryOperator |
python | pypa__pip | src/pip/_vendor/rich/syntax.py | {
"start": 4356,
"end": 6026
} | class ____(SyntaxTheme):
"""Syntax theme that delegates to Pygments theme."""
def __init__(self, theme: Union[str, Type[PygmentsStyle]]) -> None:
self._style_cache: Dict[TokenType, Style] = {}
if isinstance(theme, str):
try:
self._pygments_style_class = get_style_by_name(theme)
except ClassNotFound:
self._pygments_style_class = get_style_by_name("default")
else:
self._pygments_style_class = theme
self._background_color = self._pygments_style_class.background_color
self._background_style = Style(bgcolor=self._background_color)
def get_style_for_token(self, token_type: TokenType) -> Style:
"""Get a style from a Pygments class."""
try:
return self._style_cache[token_type]
except KeyError:
try:
pygments_style = self._pygments_style_class.style_for_token(token_type)
except KeyError:
style = Style.null()
else:
color = pygments_style["color"]
bgcolor = pygments_style["bgcolor"]
style = Style(
color="#" + color if color else "#000000",
bgcolor="#" + bgcolor if bgcolor else self._background_color,
bold=pygments_style["bold"],
italic=pygments_style["italic"],
underline=pygments_style["underline"],
)
self._style_cache[token_type] = style
return style
def get_background_style(self) -> Style:
return self._background_style
| PygmentsSyntaxTheme |
python | doocs__leetcode | solution/3600-3699/3606.Coupon Code Validator/Solution.py | {
"start": 0,
"end": 698
} | class ____:
def validateCoupons(
self, code: List[str], businessLine: List[str], isActive: List[bool]
) -> List[str]:
def check(s: str) -> bool:
if not s:
return False
for c in s:
if not (c.isalpha() or c.isdigit() or c == "_"):
return False
return True
idx = []
bs = {"electronics", "grocery", "pharmacy", "restaurant"}
for i, (c, b, a) in enumerate(zip(code, businessLine, isActive)):
if a and b in bs and check(c):
idx.append(i)
idx.sort(key=lambda i: (businessLine[i], code[i]))
return [code[i] for i in idx]
| Solution |
python | apache__airflow | task-sdk/src/airflow/sdk/execution_time/comms.py | {
"start": 31074,
"end": 31271
} | class ____(UpdateHITLDetailPayload):
"""Update the response content part of an existing Human-in-the-loop response."""
type: Literal["UpdateHITLDetail"] = "UpdateHITLDetail"
| UpdateHITLDetail |
python | huggingface__transformers | src/transformers/models/janus/modular_janus.py | {
"start": 22278,
"end": 22750
} | class ____(SiglipEncoderLayer):
def __init__(self, config: JanusVisionConfig):
super().__init__(config)
self.config = config
self.embed_dim = config.hidden_size
self.self_attn = JanusVisionAttention(config)
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.mlp = JanusVisionMLP(config)
| JanusVisionEncoderLayer |
python | fsspec__filesystem_spec | fsspec/implementations/tests/test_archive.py | {
"start": 4025,
"end": 5985
} | class ____:
"""
Describe a test scenario for any type of archive.
"""
def __init__(self, protocol=None, provider=None, variant=None):
# The filesystem protocol identifier. Any of "zip", "tar" or "libarchive".
self.protocol = protocol
# A contextmanager function to provide temporary synthesized archives.
self.provider = provider
# The filesystem protocol variant identifier. Any of "gz", "bz2" or "xz".
self.variant = variant
def pytest_generate_tests(metafunc):
"""
Generate test scenario parametrization arguments with appropriate labels (idlist).
On the one hand, this yields an appropriate output like::
fsspec/implementations/tests/test_archive.py::TestArchive::test_empty[zip] PASSED # noqa
On the other hand, it will support perfect test discovery, like::
pytest fsspec -vvv -k "zip or tar or libarchive"
https://docs.pytest.org/en/latest/example/parametrize.html#a-quick-port-of-testscenarios
"""
idlist = []
argnames = ["scenario"]
argvalues = []
for scenario in metafunc.cls.scenarios:
scenario: ArchiveTestScenario = scenario
label = scenario.protocol
if scenario.variant:
label += "-" + scenario.variant
idlist.append(label)
argvalues.append([scenario])
metafunc.parametrize(argnames, argvalues, ids=idlist, scope="class")
# Define test scenarios.
scenario_zip = ArchiveTestScenario(protocol="zip", provider=tempzip)
scenario_tar = ArchiveTestScenario(protocol="tar", provider=temptar)
scenario_targz = ArchiveTestScenario(protocol="tar", provider=temptargz, variant="gz")
scenario_tarbz2 = ArchiveTestScenario(
protocol="tar", provider=temptarbz2, variant="bz2"
)
scenario_tarxz = ArchiveTestScenario(protocol="tar", provider=temptarxz, variant="xz")
scenario_libarchive = ArchiveTestScenario(protocol="libarchive", provider=temparchive)
| ArchiveTestScenario |
python | kamyu104__LeetCode-Solutions | Python/number-of-valid-words-for-each-puzzle.py | {
"start": 172,
"end": 1566
} | class ____(object):
def findNumOfValidWords(self, words, puzzles):
"""
:type words: List[str]
:type puzzles: List[str]
:rtype: List[int]
"""
L = 7
def search(node, puzzle, start, first, met_first):
result = 0
if "_end" in node and met_first:
result += node["_end"]
for i in xrange(start, len(puzzle)):
if puzzle[i] not in node:
continue
result += search(node[puzzle[i]], puzzle, i+1,
first, met_first or (puzzle[i] == first))
return result
_trie = lambda: collections.defaultdict(_trie)
trie = _trie()
for word in words:
count = set(word)
if len(count) > L:
continue
word = sorted(count)
end = reduce(dict.__getitem__, word, trie)
end["_end"] = end["_end"]+1 if "_end" in end else 1
result = []
for puzzle in puzzles:
first = puzzle[0]
result.append(search(trie, sorted(puzzle), 0, first, False))
return result
# Time: O(m*2^(L-1) + n*(l+m)), m is the number of puzzles, L is the length of puzzles
# , n is the number of words, l is the max length of words
# Space: O(m*2^(L-1))
import collections
| Solution |
python | scipy__scipy | scipy/integrate/_ode.py | {
"start": 43117,
"end": 44380
} | class ____(dopri5):
runner = getattr(_dop, 'dopri853', None)
name = 'dop853'
def __init__(self,
rtol=1e-6, atol=1e-12,
nsteps=500,
max_step=0.0,
first_step=0.0, # determined by solver
safety=0.9,
ifactor=6.0,
dfactor=0.3,
beta=0.0,
method=None,
verbosity=-1, # no messages if negative
):
super().__init__(rtol, atol, nsteps, max_step, first_step, safety,
ifactor, dfactor, beta, method, verbosity)
def reset(self, n, has_jac):
work = zeros((11 * n + 21,), float)
work[1] = self.safety
work[2] = self.dfactor
work[3] = self.ifactor
work[4] = self.beta
work[5] = self.max_step
work[6] = self.first_step
self.work = work
self.iwork = zeros((21,), dtype=np.int32)
self.call_args = [self.rtol, self.atol, self._solout,
self.iout, self.work, self.iwork,
self.nsteps, self.verbosity]
self.success = 1
if dop853.runner is not None:
IntegratorBase.integrator_classes.append(dop853)
| dop853 |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_vendor/rich/align.py | {
"start": 7926,
"end": 10368
} | class ____(JupyterMixin):
"""Vertically aligns a renderable.
Warn:
This class is deprecated and may be removed in a future version. Use Align class with
`vertical="middle"`.
Args:
renderable (RenderableType): A renderable object.
"""
def __init__(
self,
renderable: "RenderableType",
style: Optional[StyleType] = None,
) -> None:
self.renderable = renderable
self.style = style
def __repr__(self) -> str:
return f"VerticalCenter({self.renderable!r})"
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
style = console.get_style(self.style) if self.style is not None else None
lines = console.render_lines(
self.renderable, options.update(height=None), pad=False
)
width, _height = Segment.get_shape(lines)
new_line = Segment.line()
height = options.height or options.size.height
top_space = (height - len(lines)) // 2
bottom_space = height - top_space - len(lines)
blank_line = Segment(f"{' ' * width}", style)
def blank_lines(count: int) -> Iterable[Segment]:
for _ in range(count):
yield blank_line
yield new_line
if top_space > 0:
yield from blank_lines(top_space)
for line in lines:
yield from line
yield new_line
if bottom_space > 0:
yield from blank_lines(bottom_space)
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> Measurement:
measurement = Measurement.get(console, options, self.renderable)
return measurement
if __name__ == "__main__": # pragma: no cover
from pip._vendor.rich.console import Console, Group
from pip._vendor.rich.highlighter import ReprHighlighter
from pip._vendor.rich.panel import Panel
highlighter = ReprHighlighter()
console = Console()
panel = Panel(
Group(
Align.left(highlighter("align='left'")),
Align.center(highlighter("align='center'")),
Align.right(highlighter("align='right'")),
),
width=60,
style="on dark_blue",
title="Align",
)
console.print(
Align.center(panel, vertical="middle", style="on red", height=console.height)
)
| VerticalCenter |
python | huggingface__transformers | src/transformers/models/paligemma/modeling_paligemma.py | {
"start": 9756,
"end": 10390
} | class ____(PreTrainedModel):
config: PaliGemmaConfig
base_model_prefix = "model"
input_modalities = ("image", "text")
supports_gradient_checkpointing = True
_no_split_modules = ["PaliGemmaMultiModalProjector"]
_skip_keys_device_placement = "past_key_values"
_can_compile_fullgraph = False
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_supports_attention_backend = True
@auto_docstring(
custom_intro="""
The Base Paligemma model which consists of a vision backbone and a language model without language modeling head.,
"""
)
| PaliGemmaPreTrainedModel |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_dlp.py | {
"start": 27661,
"end": 28648
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook")
def test_update_stored_info_type(self, mock_hook):
mock_hook.return_value.update_stored_info_type.return_value = StoredInfoType()
operator = CloudDLPUpdateStoredInfoTypeOperator(
stored_info_type_id=STORED_INFO_TYPE_ID,
organization_id=ORGANIZATION_ID,
task_id="id",
)
operator.execute(context=mock.MagicMock())
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.update_stored_info_type.assert_called_once_with(
stored_info_type_id=STORED_INFO_TYPE_ID,
organization_id=ORGANIZATION_ID,
project_id=None,
config=None,
update_mask=None,
retry=DEFAULT,
timeout=None,
metadata=(),
)
| TestCloudDLPUpdateStoredInfoTypeOperator |
python | apache__airflow | providers/microsoft/azure/src/airflow/providers/microsoft/azure/hooks/wasb.py | {
"start": 25448,
"end": 32869
} | class ____(WasbHook):
"""
An async hook that connects to Azure WASB to perform operations.
:param wasb_conn_id: reference to the :ref:`wasb connection <howto/connection:wasb>`
:param public_read: whether an anonymous public read access should be used. default is False
"""
def __init__(
self,
wasb_conn_id: str = "wasb_default",
public_read: bool = False,
) -> None:
"""Initialize the hook instance."""
self.conn_id = wasb_conn_id
self.public_read = public_read
self._blob_service_client: AsyncBlobServiceClient | BlobServiceClient | None = None
async def get_async_conn(self) -> AsyncBlobServiceClient:
"""Return the Async BlobServiceClient object."""
if self._blob_service_client is not None:
self.check_for_variable_type(
"self._blob_service_client", self._blob_service_client, AsyncBlobServiceClient
)
self._blob_service_client = cast("AsyncBlobServiceClient", self._blob_service_client)
return self._blob_service_client
conn = await sync_to_async(self.get_connection)(self.conn_id)
extra = conn.extra_dejson or {}
client_secret_auth_config = extra.pop("client_secret_auth_config", {})
connection_string = self._get_field(extra, "connection_string")
if connection_string:
# connection_string auth takes priority
self.blob_service_client: AsyncBlobServiceClient = AsyncBlobServiceClient.from_connection_string(
connection_string, **extra
)
return self.blob_service_client
account_url = parse_blob_account_url(conn.host, conn.login)
tenant = self._get_field(extra, "tenant_id")
if tenant:
# use Active Directory auth
app_id = conn.login or ""
app_secret = conn.password or ""
token_credential = AsyncClientSecretCredential(
tenant, app_id, app_secret, **client_secret_auth_config
)
self.blob_service_client = AsyncBlobServiceClient(
account_url=account_url,
credential=token_credential,
**extra,
)
return self.blob_service_client
if self.public_read:
# Here we use anonymous public read
# more info
# https://docs.microsoft.com/en-us/azure/storage/blobs/storage-manage-access-to-resources
self.blob_service_client = AsyncBlobServiceClient(account_url=account_url, **extra)
return self.blob_service_client
shared_access_key = self._get_field(extra, "shared_access_key")
if shared_access_key:
# using shared access key
self.blob_service_client = AsyncBlobServiceClient(
account_url=account_url, credential=shared_access_key, **extra
)
return self.blob_service_client
sas_token = self._get_field(extra, "sas_token")
if sas_token:
if sas_token.startswith("https"):
self.blob_service_client = AsyncBlobServiceClient(account_url=sas_token, **extra)
else:
self.blob_service_client = AsyncBlobServiceClient(
account_url=f"{account_url.rstrip('/')}/{sas_token}", **extra
)
return self.blob_service_client
# Fall back to old auth (password) or use managed identity if not provided.
credential: str | AsyncTokenCredential | None
credential = conn.password
if not credential:
# Check for account_key in extra fields before falling back to DefaultAzureCredential
account_key = self._get_field(extra, "account_key")
if account_key:
credential = account_key
else:
managed_identity_client_id = self._get_field(extra, "managed_identity_client_id")
workload_identity_tenant_id = self._get_field(extra, "workload_identity_tenant_id")
credential = get_async_default_azure_credential(
managed_identity_client_id=managed_identity_client_id,
workload_identity_tenant_id=workload_identity_tenant_id,
)
self.log.info("Using DefaultAzureCredential as credential")
self.blob_service_client = AsyncBlobServiceClient(
account_url=account_url,
credential=credential,
**extra,
)
return self.blob_service_client
def _get_blob_client(self, container_name: str, blob_name: str) -> AsyncBlobClient:
"""
Instantiate a blob client.
:param container_name: the name of the blob container
:param blob_name: the name of the blob. This needs not be existing
"""
if self.blob_service_client is None:
raise AirflowException("BlobServiceClient is not initialized")
return self.blob_service_client.get_blob_client(container=container_name, blob=blob_name)
async def check_for_blob_async(self, container_name: str, blob_name: str, **kwargs: Any) -> bool:
"""
Check if a blob exists on Azure Blob Storage.
:param container_name: name of the container
:param blob_name: name of the blob
:param kwargs: optional keyword arguments for ``BlobClient.get_blob_properties``
"""
try:
await self._get_blob_client(container_name, blob_name).get_blob_properties(**kwargs)
except ResourceNotFoundError:
return False
return True
async def get_blobs_list_async(
self,
container_name: str,
prefix: str | None = None,
include: list[str] | None = None,
delimiter: str = "/",
**kwargs: Any,
) -> list[BlobProperties | BlobPrefix]:
"""
List blobs in a given container.
:param container_name: the name of the container
:param prefix: filters the results to return only blobs whose names
begin with the specified prefix.
:param include: specifies one or more additional datasets to include in the
response. Options include: ``snapshots``, ``metadata``, ``uncommittedblobs``,
``copy`, ``deleted``.
:param delimiter: filters objects based on the delimiter (for e.g '.csv')
"""
container = self._get_container_client(container_name)
container = cast("AsyncContainerClient", container)
self.check_for_variable_type("container", container, AsyncContainerClient)
blob_list: list[BlobProperties | BlobPrefix] = []
blobs = container.walk_blobs(name_starts_with=prefix, include=include, delimiter=delimiter, **kwargs)
async for blob in blobs:
blob_list.append(blob)
return blob_list
async def check_for_prefix_async(self, container_name: str, prefix: str, **kwargs: Any) -> bool:
"""
Check if a prefix exists on Azure Blob storage.
:param container_name: Name of the container.
:param prefix: Prefix of the blob.
:param kwargs: Optional keyword arguments for ``ContainerClient.walk_blobs``
"""
blobs = await self.get_blobs_list_async(container_name=container_name, prefix=prefix, **kwargs)
return bool(blobs)
| WasbAsyncHook |
python | mlflow__mlflow | examples/llama_index/workflow/workflow/events.py | {
"start": 247,
"end": 349
} | class ____(Event):
"""Event for triggering BM25 retrieval step."""
query: str
| BM25RetrieveEvent |
python | huggingface__transformers | tests/models/fnet/test_modeling_fnet.py | {
"start": 8961,
"end": 17708
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
FNetModel,
FNetForPreTraining,
FNetForMaskedLM,
FNetForNextSentencePrediction,
FNetForMultipleChoice,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": FNetModel,
"fill-mask": FNetForMaskedLM,
"question-answering": FNetForQuestionAnswering,
"text-classification": FNetForSequenceClassification,
"token-classification": FNetForTokenClassification,
"zero-shot": FNetForSequenceClassification,
}
if is_torch_available()
else {}
)
# Skip Tests
# TODO: Fix the failed tests
def is_pipeline_test_to_skip(
self,
pipeline_test_case_name,
config_class,
model_architecture,
tokenizer_name,
image_processor_name,
feature_extractor_name,
processor_name,
):
if pipeline_test_case_name == "QAPipelineTests" and not tokenizer_name.endswith("Fast"):
return True
return False
# special case for ForPreTraining model
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if return_labels:
if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING):
inputs_dict["labels"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device
)
inputs_dict["next_sentence_label"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
return inputs_dict
# Overridden Tests
@unittest.skip
def test_attention_outputs(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
def test_model_outputs_equivalence(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(t):
t[t != t] = 0
return t
def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
with torch.no_grad():
tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs)
dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple()
def recursive_check(tuple_object, dict_object):
if isinstance(tuple_object, (list, tuple)):
for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif isinstance(tuple_object, dict):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values(), dict_object.values()
):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5
),
msg=(
"Tuple and dict output are not equal. Difference:"
f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:"
f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has"
f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}."
),
)
recursive_check(tuple_output, dict_output)
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs)
# tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
# dict_inputs = self._prepare_for_class(inputs_dict, model_class)
# check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
def test_retain_grad_hidden_states_attentions(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
# no need to test all models as different heads yield the same functionality
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
inputs = self._prepare_for_class(inputs_dict, model_class)
outputs = model(**inputs)
output = outputs[0]
hidden_states = outputs.hidden_states[0]
hidden_states.retain_grad()
output.flatten()[0].backward(retain_graph=True)
self.assertIsNotNone(hidden_states.grad)
def setUp(self):
self.model_tester = FNetModelTester(self)
self.config_tester = FNetConfigTester(self, config_class=FNetConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_pretraining(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
model_name = "google/fnet-base"
model = FNetModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_torch
| FNetModelTest |
python | keon__algorithms | algorithms/queues/queue.py | {
"start": 1084,
"end": 2470
} | class ____(AbstractQueue):
def __init__(self, capacity=10):
"""
Initialize python List with capacity of 10 or user given input.
Python List type is a dynamic array, so we have to restrict its
dynamic nature to make it work like a static array.
"""
super().__init__()
self._array = [None] * capacity
self._front = 0
self._rear = 0
def __iter__(self):
probe = self._front
while True:
if probe == self._rear:
return
yield self._array[probe]
probe += 1
def enqueue(self, value):
if self._rear == len(self._array):
self._expand()
self._array[self._rear] = value
self._rear += 1
self._size += 1
def dequeue(self):
if self.is_empty():
raise IndexError("Queue is empty")
value = self._array[self._front]
self._array[self._front] = None
self._front += 1
self._size -= 1
return value
def peek(self):
"""returns the front element of queue."""
if self.is_empty():
raise IndexError("Queue is empty")
return self._array[self._front]
def _expand(self):
"""expands size of the array.
Time Complexity: O(n)
"""
self._array += [None] * len(self._array)
| ArrayQueue |
python | FactoryBoy__factory_boy | tests/test_django.py | {
"start": 22942,
"end": 23372
} | class ____(django_test.TestCase):
def test_random(self):
class StandardModelFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.StandardModel
foo = factory.Faker('pystr')
o1 = StandardModelFactory()
o2 = StandardModelFactory()
self.assertNotEqual(o1.foo, o2.foo)
@unittest.skipIf(Image is None, "PIL not installed.")
| DjangoFakerTestCase |
python | getsentry__sentry | src/sentry/releases/endpoints/organization_release_details.py | {
"start": 11741,
"end": 22309
} | class ____(
OrganizationReleasesBaseEndpoint,
ReleaseAnalyticsMixin,
OrganizationReleaseDetailsPaginationMixin,
):
owner = ApiOwner.UNOWNED
publish_status = {
"DELETE": ApiPublishStatus.PUBLIC,
"GET": ApiPublishStatus.PUBLIC,
"PUT": ApiPublishStatus.PUBLIC,
}
@extend_schema(
operation_id="Retrieve an Organization's Release",
parameters=[
GlobalParams.ORG_ID_OR_SLUG,
ReleaseParams.VERSION,
ReleaseParams.PROJECT_ID,
ReleaseParams.HEALTH,
ReleaseParams.ADOPTION_STAGES,
ReleaseParams.SUMMARY_STATS_PERIOD,
ReleaseParams.HEALTH_STATS_PERIOD,
ReleaseParams.SORT,
ReleaseParams.STATUS_FILTER,
VisibilityParams.QUERY,
],
responses={
200: inline_sentry_response_serializer("OrgReleaseResponse", ReleaseSerializerResponse),
401: RESPONSE_UNAUTHORIZED,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
examples=OrganizationExamples.RELEASE_DETAILS,
)
def get(self, request: Request, organization, version) -> Response:
"""
Return details on an individual release.
"""
# Dictionary responsible for storing selected project meta data
current_project_meta = {}
project_id = request.GET.get("project")
with_health = request.GET.get("health") == "1"
with_adoption_stages = request.GET.get("adoptionStages") == "1"
summary_stats_period = request.GET.get("summaryStatsPeriod") or "14d"
health_stats_period = request.GET.get("healthStatsPeriod") or ("24h" if with_health else "")
sort = request.GET.get("sort") or "date"
status_filter = request.GET.get("status", "open")
query = request.GET.get("query")
if summary_stats_period not in STATS_PERIODS:
raise ParseError(detail=get_stats_period_detail("summaryStatsPeriod", STATS_PERIODS))
if health_stats_period and health_stats_period not in STATS_PERIODS:
raise ParseError(detail=get_stats_period_detail("healthStatsPeriod", STATS_PERIODS))
try:
release = Release.objects.get(organization_id=organization.id, version=version)
except Release.DoesNotExist:
raise ResourceDoesNotExist
if not self.has_release_permission(request, organization, release):
raise ResourceDoesNotExist
if with_health and project_id:
try:
project = Project.objects.get_from_cache(id=int(project_id))
except (ValueError, Project.DoesNotExist):
raise ParseError(detail="Invalid project")
release._for_project_id = project.id
if project_id:
# Add sessions time bound to current project meta data
environments = set(request.GET.getlist("environment")) or None
current_project_meta.update(
{
**release_health.backend.get_release_sessions_time_bounds(
project_id=int(project_id),
release=release.version,
org_id=organization.id,
environments=environments,
)
}
)
# Get prev and next release to current release
try:
filter_params = self.get_filter_params(request, organization)
current_project_meta.update(
{
**self.get_adjacent_releases_to_current_release(
org=organization,
release=release,
filter_params=filter_params,
stats_period=summary_stats_period,
sort=sort,
status_filter=status_filter,
query=query,
),
**self.get_first_and_last_releases(
org=organization,
environment=filter_params.get("environment"),
project_id=[project_id],
sort=sort,
),
}
)
except InvalidSortException:
return Response({"detail": "invalid sort"}, status=400)
return Response(
serialize(
release,
request.user,
with_health_data=with_health,
with_adoption_stages=with_adoption_stages,
summary_stats_period=summary_stats_period,
health_stats_period=health_stats_period,
current_project_meta=current_project_meta,
)
)
@extend_schema(
operation_id="Update an Organization's Release",
parameters=[
GlobalParams.ORG_ID_OR_SLUG,
ReleaseParams.VERSION,
],
request=OrganizationReleaseSerializer,
responses={
200: inline_sentry_response_serializer("OrgReleaseResponse", ReleaseSerializerResponse),
401: RESPONSE_UNAUTHORIZED,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
examples=OrganizationExamples.RELEASE_DETAILS,
)
def put(self, request: Request, organization: Organization, version) -> Response:
"""
Update a release. This can change some metadata associated with
the release (the ref, url, and dates).
"""
bind_organization_context(organization)
scope = sentry_sdk.get_isolation_scope()
scope.set_tag("version", version)
try:
release = Release.objects.get(organization_id=organization.id, version=version)
projects = release.projects.all()
except Release.DoesNotExist:
scope.set_tag("failure_reason", "Release.DoesNotExist")
raise ResourceDoesNotExist
if not self.has_release_permission(request, organization, release):
scope.set_tag("failure_reason", "no_release_permission")
raise ResourceDoesNotExist
serializer = OrganizationReleaseSerializer(data=request.data)
if not serializer.is_valid():
scope.set_tag("failure_reason", "serializer_error")
return Response(serializer.errors, status=400)
result = serializer.validated_data
was_released = bool(release.date_released)
kwargs = {}
if result.get("dateReleased"):
kwargs["date_released"] = result["dateReleased"]
if result.get("ref"):
kwargs["ref"] = result["ref"]
if result.get("url"):
kwargs["url"] = result["url"]
if result.get("status"):
kwargs["status"] = result["status"]
if kwargs:
release.update(**kwargs)
commit_list = result.get("commits")
if commit_list:
# TODO(dcramer): handle errors with release payloads
try:
release.set_commits(commit_list)
self.track_set_commits_local(
request,
organization_id=organization.id,
project_ids=[project.id for project in projects],
)
except ReleaseCommitError:
raise ConflictError("Release commits are currently being processed")
refs = result.get("refs")
if not refs:
# Handle legacy
if result.get("headCommits", []):
refs = [
{
"repository": r["repository"],
"previousCommit": r.get("previousId"),
"commit": r["currentId"],
}
for r in result.get("headCommits", [])
]
# Clear commits in release
else:
if result.get("refs") == []:
release.clear_commits()
scope.set_tag("has_refs", bool(refs))
if refs:
if not request.user.is_authenticated and not request.auth:
scope.set_tag("failure_reason", "user_not_authenticated")
return Response(
{"refs": ["You must use an authenticated API token to fetch refs"]},
status=400,
)
fetch_commits = not commit_list
try:
release.set_refs(refs, request.user.id, fetch=fetch_commits)
except InvalidRepository as e:
scope.set_tag("failure_reason", "InvalidRepository")
return Response({"refs": [str(e)]}, status=400)
if not was_released and release.date_released:
for project in projects:
Activity.objects.create(
type=ActivityType.RELEASE.value,
project=project,
ident=Activity.get_version_ident(release.version),
data={"version": release.version},
datetime=release.date_released,
)
no_snuba_for_release_creation = options.get("releases.no_snuba_for_release_creation")
return Response(
serialize(
release, request.user, no_snuba_for_release_creation=no_snuba_for_release_creation
)
)
@extend_schema(
operation_id="Delete an Organization's Release",
parameters=[
GlobalParams.ORG_ID_OR_SLUG,
ReleaseParams.VERSION,
],
responses={
204: RESPONSE_NO_CONTENT,
401: RESPONSE_UNAUTHORIZED,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
)
def delete(self, request: Request, organization, version) -> Response:
"""
Permanently remove a release and all of its files.
"""
try:
release = Release.objects.get(organization_id=organization.id, version=version)
except Release.DoesNotExist:
raise ResourceDoesNotExist
if not self.has_release_permission(request, organization, release):
raise ResourceDoesNotExist
try:
release.safe_delete()
except UnsafeReleaseDeletion as e:
return Response({"detail": str(e)}, status=400)
return Response(status=204)
| OrganizationReleaseDetailsEndpoint |
python | pennersr__django-allauth | allauth/socialaccount/providers/twitter_oauth2/provider.py | {
"start": 228,
"end": 612
} | class ____(ProviderAccount):
def get_username(self):
return self.account.extra_data.get("username")
def get_profile_url(self):
username = self.get_username()
if username:
return "https://x.com/" + username
return None
def get_avatar_url(self):
return self.account.extra_data.get("profile_image_url")
| TwitterOAuth2Account |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0111_add_multiple_versions_without_translations.py | {
"start": 148,
"end": 2691
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0110_migrate_versioning_scheme"),
]
operations = [
migrations.AlterField(
model_name="historicalproject",
name="versioning_scheme",
field=models.CharField(
choices=[
(
"multiple_versions_with_translations",
"Multiple versions with translations (/<language>/<version>/<filename>)",
),
(
"multiple_versions_without_translations",
"Multiple versions without translations (/<version>/<filename>)",
),
(
"single_version_without_translations",
"Single version without translations (/<filename>)",
),
],
default="multiple_versions_with_translations",
help_text="This affects how the URL of your documentation looks like, and if it supports translations or multiple versions. Changing the versioning scheme will break your current URLs.",
max_length=120,
null=True,
verbose_name="Versioning scheme",
),
),
migrations.AlterField(
model_name="project",
name="versioning_scheme",
field=models.CharField(
choices=[
(
"multiple_versions_with_translations",
"Multiple versions with translations (/<language>/<version>/<filename>)",
),
(
"multiple_versions_without_translations",
"Multiple versions without translations (/<version>/<filename>)",
),
(
"single_version_without_translations",
"Single version without translations (/<filename>)",
),
],
default="multiple_versions_with_translations",
help_text="This affects how the URL of your documentation looks like, and if it supports translations or multiple versions. Changing the versioning scheme will break your current URLs.",
max_length=120,
null=True,
verbose_name="Versioning scheme",
),
),
]
| Migration |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/transfers/sql_to_s3.py | {
"start": 1835,
"end": 14164
} | class ____(BaseOperator):
"""
Saves data from a specific SQL query into a file in S3.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SqlToS3Operator`
:param query: the sql query to be executed. If you want to execute a file, place the absolute path of it,
ending with .sql extension. (templated)
:param s3_bucket: bucket where the data will be stored. (templated)
:param s3_key: desired key for the file. It includes the name of the file. (templated)
:param replace: whether or not to replace the file in S3 if it previously existed
:param sql_conn_id: reference to a specific database.
:param sql_hook_params: Extra config params to be passed to the underlying hook.
Should match the desired hook constructor params.
:param parameters: (optional) the parameters to render the SQL query with.
:param read_kwargs: arguments to include in DataFrame when reading from SQL (supports both pandas and polars).
:param df_type: the type of DataFrame to use ('pandas' or 'polars'). Defaults to 'pandas'.
:param aws_conn_id: reference to a specific S3 connection
:param verify: Whether or not to verify SSL certificates for S3 connection.
By default SSL certificates are verified.
You can provide the following values:
- ``False``: do not validate SSL certificates. SSL will still be used
(unless use_ssl is False), but SSL certificates will not be verified.
- ``path/to/cert/bundle.pem``: A filename of the CA cert bundle to uses.
You can specify this argument if you want to use a different
CA cert bundle than the one used by botocore.
:param file_format: the destination file format, only string 'csv', 'json' or 'parquet' is accepted.
:param max_rows_per_file: (optional) argument to set destination file number of rows limit, if source data
is larger than that, it will be dispatched into multiple files.
Will be ignored if ``groupby_kwargs`` argument is specified.
:param df_kwargs: arguments to include in DataFrame ``.to_parquet()``, ``.to_json()`` or ``.to_csv()``.
:param groupby_kwargs: argument to include in DataFrame ``groupby()``.
"""
template_fields: Sequence[str] = (
"s3_bucket",
"s3_key",
"query",
"sql_conn_id",
)
template_ext: Sequence[str] = (".sql",)
template_fields_renderers = {
"query": "sql",
"df_kwargs": "json",
"pd_kwargs": "json",
"read_kwargs": "json",
}
def __init__(
self,
*,
query: str,
s3_bucket: str,
s3_key: str,
sql_conn_id: str,
sql_hook_params: dict | None = None,
parameters: None | Mapping[str, Any] | list | tuple = None,
read_kwargs: dict | None = None,
read_pd_kwargs: dict | None = None,
df_type: Literal["pandas", "polars"] = "pandas",
replace: bool = False,
aws_conn_id: str | None = "aws_default",
verify: bool | str | None = None,
file_format: Literal["csv", "json", "parquet"] = "csv",
max_rows_per_file: int = 0,
df_kwargs: dict | None = None,
pd_kwargs: dict | None = None,
groupby_kwargs: dict | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.query = query
self.s3_bucket = s3_bucket
self.s3_key = s3_key
self.sql_conn_id = sql_conn_id
self.aws_conn_id = aws_conn_id
self.verify = verify
self.replace = replace
self.parameters = parameters
self.max_rows_per_file = max_rows_per_file
self.groupby_kwargs = groupby_kwargs or {}
self.sql_hook_params = sql_hook_params
self.df_type = df_type
if read_pd_kwargs is not None:
warnings.warn(
"The 'read_pd_kwargs' parameter is deprecated. Use 'read_kwargs' instead.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
self.read_kwargs = read_kwargs if read_kwargs is not None else read_pd_kwargs or {}
if pd_kwargs is not None:
warnings.warn(
"The 'pd_kwargs' parameter is deprecated. Use 'df_kwargs' instead.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
self.df_kwargs = df_kwargs if df_kwargs is not None else pd_kwargs or {}
if "path_or_buf" in self.df_kwargs:
raise AirflowException("The argument path_or_buf is not allowed, please remove it")
if self.max_rows_per_file and self.groupby_kwargs:
raise AirflowException(
"SqlToS3Operator arguments max_rows_per_file and groupby_kwargs "
"can not be both specified. Please choose one."
)
try:
self.file_format = FILE_FORMAT[file_format.upper()]
except KeyError:
raise AirflowException(f"The argument file_format doesn't support {file_format} value.")
@staticmethod
def _fix_dtypes(df: pd.DataFrame, file_format: FILE_FORMAT) -> None:
"""
Mutate DataFrame to set dtypes for float columns containing NaN values.
Set dtype of object to str to allow for downstream transformations.
"""
try:
import numpy as np
import pandas as pd
except ImportError as e:
from airflow.exceptions import AirflowOptionalProviderFeatureException
raise AirflowOptionalProviderFeatureException(e)
for col in df:
if df[col].dtype.name == "object" and file_format == FILE_FORMAT.PARQUET:
# if the type wasn't identified or converted, change it to a string so if can still be
# processed.
df[col] = df[col].astype(str)
if "float" in df[col].dtype.name and df[col].hasnans:
# inspect values to determine if dtype of non-null values is int or float
notna_series: Any = df[col].dropna().values
if np.equal(notna_series, notna_series.astype(int)).all():
# set to dtype that retains integers and supports NaNs
# The type ignore can be removed here if https://github.com/numpy/numpy/pull/23690
# is merged and released as currently NumPy does not consider None as valid for x/y.
df[col] = np.where(df[col].isnull(), None, df[col]) # type: ignore[call-overload]
df[col] = df[col].astype(pd.Int64Dtype())
elif np.isclose(notna_series, notna_series.astype(int)).all():
# set to float dtype that retains floats and supports NaNs
# The type ignore can be removed here if https://github.com/numpy/numpy/pull/23690
# is merged and released
df[col] = np.where(df[col].isnull(), None, df[col]) # type: ignore[call-overload]
df[col] = df[col].astype(pd.Float64Dtype())
@staticmethod
def _strip_suffixes(
path: str,
) -> str:
suffixes = [".json.gz", ".csv.gz", ".json", ".csv", ".parquet"]
for suffix in sorted(suffixes, key=len, reverse=True):
if path.endswith(suffix):
return path[: -len(suffix)]
return path
def execute(self, context: Context) -> None:
sql_hook = self._get_hook()
s3_conn = S3Hook(aws_conn_id=self.aws_conn_id, verify=self.verify)
data_df = sql_hook.get_df(
sql=self.query, parameters=self.parameters, df_type=self.df_type, **self.read_kwargs
)
self.log.info("Data from SQL obtained")
# Only apply dtype fixes to pandas DataFrames since Polars doesn't have the same NaN/None inconsistencies as panda
if ("dtype_backend", "pyarrow") not in self.read_kwargs.items() and self.df_type == "pandas":
self._fix_dtypes(data_df, self.file_format) # type: ignore[arg-type]
file_options = FILE_OPTIONS_MAP[self.file_format]
for group_name, df in self._partition_dataframe(df=data_df):
buf = io.BytesIO()
self.log.info("Writing data to in-memory buffer")
clean_key = self._strip_suffixes(self.s3_key)
object_key = (
f"{clean_key}_{group_name}{file_options.suffix}"
if group_name
else f"{clean_key}{file_options.suffix}"
)
if self.file_format != FILE_FORMAT.PARQUET and self.df_kwargs.get("compression") == "gzip":
object_key += ".gz"
df_kwargs = {k: v for k, v in self.df_kwargs.items() if k != "compression"}
with gzip.GzipFile(fileobj=buf, mode="wb", filename=object_key) as gz:
getattr(df, file_options.function)(gz, **df_kwargs)
else:
if self.file_format == FILE_FORMAT.PARQUET:
getattr(df, file_options.function)(buf, **self.df_kwargs)
else:
text_buf = io.TextIOWrapper(buf, encoding="utf-8", write_through=True)
getattr(df, file_options.function)(text_buf, **self.df_kwargs)
text_buf.flush()
buf.seek(0)
self.log.info("Uploading data to S3")
s3_conn.load_file_obj(
file_obj=buf, key=object_key, bucket_name=self.s3_bucket, replace=self.replace
)
def _partition_dataframe(
self, df: pd.DataFrame | pl.DataFrame
) -> Iterable[tuple[str, pd.DataFrame | pl.DataFrame]]:
"""Partition dataframe using pandas or polars groupby() method."""
try:
import secrets
import string
import numpy as np
import pandas as pd
import polars as pl
except ImportError:
pass
# if max_rows_per_file argument is specified, a temporary column with a random unusual name will be
# added to the dataframe. This column is used to dispatch the dataframe into smaller ones using groupby()
random_column_name = None
if self.max_rows_per_file and not self.groupby_kwargs:
random_column_name = "".join(secrets.choice(string.ascii_letters) for _ in range(20))
self.groupby_kwargs = {"by": random_column_name}
if random_column_name:
if isinstance(df, pd.DataFrame):
df[random_column_name] = np.arange(len(df)) // self.max_rows_per_file
elif isinstance(df, pl.DataFrame):
df = df.with_columns(
(pl.int_range(pl.len()) // self.max_rows_per_file).alias(random_column_name)
)
if not self.groupby_kwargs:
yield "", df
return
if isinstance(df, pd.DataFrame):
for group_label in (grouped_df := df.groupby(**self.groupby_kwargs)).groups:
group_df = grouped_df.get_group(group_label)
if random_column_name:
group_df = group_df.drop(random_column_name, axis=1, errors="ignore")
yield (
cast("str", group_label[0] if isinstance(group_label, tuple) else group_label),
group_df.reset_index(drop=True),
)
elif isinstance(df, pl.DataFrame):
for group_label, group_df_in in df.group_by(**self.groupby_kwargs): # type: ignore[assignment]
group_df2 = group_df_in.drop(random_column_name) if random_column_name else group_df_in
yield (
cast("str", group_label[0] if isinstance(group_label, tuple) else group_label),
group_df2,
)
def _get_hook(self) -> DbApiHook:
self.log.debug("Get connection for %s", self.sql_conn_id)
conn = BaseHook.get_connection(self.sql_conn_id)
hook = conn.get_hook(hook_params=self.sql_hook_params)
if not callable(getattr(hook, "get_df", None)):
raise AirflowException("This hook is not supported. The hook class must have get_df method.")
return hook
| SqlToS3Operator |
python | numba__numba | numba/tests/test_map_filter_reduce.py | {
"start": 1473,
"end": 2031
} | class ____(unittest.TestCase):
def test_basic_reduce_external_func(self):
func = njit(lambda x, y: x + y)
def impl():
return reduce(func, range(-10, 10))
cfunc = njit(impl)
self.assertEqual(impl(), cfunc())
def test_basic_reduce_closure(self):
def impl():
def func(x, y):
return x + y
return reduce(func, range(-10, 10), 100)
cfunc = njit(impl)
self.assertEqual(impl(), cfunc())
if __name__ == '__main__':
unittest.main()
| TestReduce |
python | streamlit__streamlit | lib/streamlit/testing/v1/element_tree.py | {
"start": 36813,
"end": 38299
} | class ____(Widget):
"""A representation of ``st.text_area``."""
_value: str | None | InitialValue
proto: TextAreaProto = field(repr=False)
label: str
max_chars: int
placeholder: str
help: str
form_id: str
def __init__(self, proto: TextAreaProto, root: ElementTree) -> None:
super().__init__(proto, root)
self._value = InitialValue()
self.type = "text_area"
def set_value(self, v: str | None) -> TextArea:
"""Set the value of the widget."""
self._value = v
return self
@property
def _widget_state(self) -> WidgetState:
ws = WidgetState()
ws.id = self.id
if self.value is not None:
ws.string_value = self.value
return ws
@property
def value(self) -> str | None:
"""The current value of the widget. (str)""" # noqa: D400
if not isinstance(self._value, InitialValue):
return self._value
state = self.root.session_state
assert state
# Awkward to do this with `cast`
return state[self.id] # type: ignore
def input(self, v: str) -> TextArea:
"""
Set the value of the widget only if the value does not exceed the\
maximum allowed characters.
"""
# TODO: should input be setting or appending?
if self.max_chars and len(v) > self.max_chars:
return self
return self.set_value(v)
@dataclass(repr=False)
| TextArea |
python | numpy__numpy | numpy/distutils/system_info.py | {
"start": 55959,
"end": 56133
} | class ____(atlas_3_10_info):
dir_env_var = ['PTATLAS', 'ATLAS']
_lib_names = ['tatlas']
_lib_atlas = _lib_names
_lib_lapack = _lib_names
| atlas_3_10_threads_info |
python | pytorch__pytorch | test/dynamo/cpython/3_13/seq_tests.py | {
"start": 1976,
"end": 2298
} | class ____:
'Sequence using iterator protocol'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
| IterFunc |
python | getsentry__sentry | src/sentry/notifications/types.py | {
"start": 1477,
"end": 2010
} | class ____(ValueEqualityEnum):
DEFAULT = "default"
NEVER = "never"
ALWAYS = "always"
SUBSCRIBE_ONLY = "subscribe_only"
COMMITTED_ONLY = "committed_only"
# default is not a choice anymore, we just delete the row if we want to the default
NOTIFICATION_SETTING_CHOICES = [
NotificationSettingsOptionEnum.ALWAYS.value,
NotificationSettingsOptionEnum.NEVER.value,
NotificationSettingsOptionEnum.SUBSCRIBE_ONLY.value,
NotificationSettingsOptionEnum.COMMITTED_ONLY.value,
]
| NotificationSettingsOptionEnum |
python | pytorch__pytorch | benchmarks/dynamo/pr_time_benchmarks/benchmarks/benchmark_base.py | {
"start": 2598,
"end": 9731
} | class ____(ABC):
# Measure total number of instruction spent in _work.
# Garbage collection is NOT disabled during _work().
_enable_instruction_count = False
# Measure total number of instruction spent in convert_frame.compile_inner
# Garbage collection is disabled during _work() to avoid noise.
_enable_compile_time_instruction_count = False
# number of iterations used to run when collecting instruction_count or compile_time_instruction_count.
_num_iterations = 5
def __init__(
self,
category: str,
device: str,
backend: str = "",
mode: str = "",
dynamic=None,
) -> None:
# These individual attributes are used to support different filters on the
# dashboard later
self._category = category
self._device = device
self._backend = backend
self._mode = mode # Training or inference
self._dynamic = dynamic
def with_iterations(self, value: int) -> Self:
self._num_iterations = value
return self
def enable_instruction_count(self) -> Self:
self._enable_instruction_count = True
return self
def enable_compile_time_instruction_count(self) -> Self:
self._enable_compile_time_instruction_count = True
return self
def name(self) -> str:
return ""
def backend(self) -> str:
return self._backend
def mode(self) -> str:
return self._mode
def category(self) -> str:
return self._category
def device(self) -> str:
return self._device
def is_dynamic(self) -> Optional[bool]:
return self._dynamic
def description(self) -> str:
return ""
@abstractmethod
def _prepare(self) -> None:
pass
@abstractmethod
def _work(self) -> None:
pass
def _prepare_once(self) -> None: # noqa: B027
pass
def _count_instructions(self) -> int:
print(f"collecting instruction count for {self.name()}")
results = []
for i in range(self._num_iterations):
self._prepare()
id = i_counter.start()
self._work()
count = i_counter.end(id)
print(f"instruction count for iteration {i} is {count}")
results.append(count)
return min(results)
def _count_compile_time_instructions(self) -> int:
gc.disable()
try:
print(f"collecting compile time instruction count for {self.name()}")
config.record_compile_time_instruction_count = True
results = []
for i in range(self._num_iterations):
self._prepare()
gc.collect()
# CompileTimeInstructionCounter.record is only called on convert_frame._compile_inner
# hence this will only count instruction count spent in compile_inner.
CompileTimeInstructionCounter.clear()
self._work()
count = CompileTimeInstructionCounter.value()
if count == 0:
raise RuntimeError(
"compile time instruction count is 0, please check your benchmarks"
)
print(f"compile time instruction count for iteration {i} is {count}")
results.append(count)
config.record_compile_time_instruction_count = False
return min(results)
finally:
gc.enable()
def _write_to_json(self, output_dir: str) -> None:
"""
Write the result into JSON format, so that it can be uploaded to the benchmark database
to be displayed on OSS dashboard. The JSON format is defined at
https://github.com/pytorch/pytorch/wiki/How-to-integrate-with-PyTorch-OSS-benchmark-database
"""
records = []
for entry in self.results:
metric_name = entry[1]
value = entry[2]
if not metric_name or value is None:
continue
records.append(
{
"benchmark": {
"name": "pr_time_benchmarks",
"mode": self.mode(),
"extra_info": {
"is_dynamic": self.is_dynamic(),
"device": self.device(),
"description": self.description(),
},
},
"model": {
"name": self.name(),
"type": self.category(),
"backend": self.backend(),
},
"metric": {
"name": metric_name,
"benchmark_values": [value],
},
}
)
with open(os.path.join(output_dir, f"{self.name()}.json"), "w") as f:
json.dump(records, f)
def append_results(self, path: str) -> None:
with open(path, "a", newline="") as csvfile:
# Create a writer object
writer = csv.writer(csvfile)
# Write the data to the CSV file
for entry in self.results:
writer.writerow(entry)
# TODO (huydhn) This requires the path to write to, so it needs to be in the same place
# as the CSV writer for now
self._write_to_json(os.path.dirname(os.path.abspath(path)))
def print(self) -> None:
for entry in self.results:
print(f"{entry[0]},{entry[1]},{entry[2]}")
def collect_all(self) -> Self:
self._prepare_once()
self.results = []
if (
self._enable_instruction_count
and self._enable_compile_time_instruction_count
):
raise RuntimeError(
"not supported until we update the logger, both logs to the same field now"
)
if self._enable_instruction_count:
r = self._count_instructions()
self.results.append((self.name(), "instruction_count", r))
if log_to_scuba:
scribe_log_torch_benchmark_compile_time(
name=self.name(),
instruction_count=r,
)
if self._enable_compile_time_instruction_count:
# enable_cpp_symbolic_shape_guards has impact on these benchmarks
# Keep using False value for consistency.
with config.patch("enable_cpp_symbolic_shape_guards", False):
r = self._count_compile_time_instructions()
self.results.append(
(
self.name(),
"compile_time_instruction_count",
r,
)
)
if log_to_scuba:
# TODO add a new field compile_time_instruction_count to the logger.
scribe_log_torch_benchmark_compile_time(
name=self.name(),
instruction_count=r,
)
return self
| BenchmarkBase |
python | realpython__materials | python-annotations/forward_references_strings.py | {
"start": 128,
"end": 189
} | class ____:
value: Any
next: Optional["Node"] = None
| Node |
python | ansible__ansible | test/units/module_utils/facts/test_collector.py | {
"start": 909,
"end": 1972
} | class ____(unittest.TestCase):
def test(self):
compat_platforms = [{'system': 'Generic'}]
res = collector.find_collectors_for_platform(default_collectors.collectors,
compat_platforms)
for coll_class in res:
self.assertIn(coll_class._platform, ('Generic'))
def test_linux(self):
compat_platforms = [{'system': 'Linux'}]
res = collector.find_collectors_for_platform(default_collectors.collectors,
compat_platforms)
for coll_class in res:
self.assertIn(coll_class._platform, ('Linux'))
def test_linux_or_generic(self):
compat_platforms = [{'system': 'Generic'}, {'system': 'Linux'}]
res = collector.find_collectors_for_platform(default_collectors.collectors,
compat_platforms)
for coll_class in res:
self.assertIn(coll_class._platform, ('Generic', 'Linux'))
| TestFindCollectorsForPlatform |
python | PyCQA__pylint | doc/exts/pylint_messages.py | {
"start": 2463,
"end": 17866
} | class ____(str, Enum):
GOOD = "good"
BAD = "bad"
MessagesDict = dict[str, list[MessageData]]
OldMessagesDict = dict[str, defaultdict[tuple[str, str], list[tuple[str, str]]]]
"""DefaultDict is indexed by tuples of (old name symbol, old name id) and values are
tuples of (new name symbol, new name category).
"""
def _register_all_checkers_and_extensions(linter: PyLinter) -> None:
"""Registers all checkers and extensions found in the default folders."""
initialize_checkers(linter)
initialize_extensions(linter)
def _get_example_code(data_path: Path) -> str:
"""Get the example code from the specified path."""
if not data_path.exists():
raise AssertionError(
f"Documentation examples path {data_path} does not exist. "
"Please create it and add an example."
)
good_code = _get_demo_code_for(data_path, ExampleType.GOOD)
bad_code = _get_demo_code_for(data_path, ExampleType.BAD)
pylintrc = _get_pylintrc_code(data_path)
details = _get_titled_rst(
title="Additional details", text=_get_rst_as_str(data_path / "details.rst")
)
related = _get_titled_rst(
title="Related links", text=_get_rst_as_str(data_path / "related.rst")
)
_check_placeholders(data_path, bad_code, details, related)
return "\n".join((bad_code, good_code, pylintrc, details, related)) + "\n"
def _get_pylintrc_code(data_path: Path) -> str:
if (data_path / "pylintrc").exists():
pylintrc = _get_titled_rst(
title="Configuration file", text=_get_ini_as_rst(data_path / "pylintrc")
)
else:
pylintrc = ""
return pylintrc
def _get_demo_code_for(data_path: Path, example_type: ExampleType) -> str:
"""Get code examples while handling multi-file code templates."""
if data_path.name in MESSAGES_WITHOUT_EXAMPLES or (
data_path.name in MESSAGES_WITHOUT_BAD_EXAMPLES
and example_type is ExampleType.BAD
):
return ""
single_file_path = data_path / f"{example_type.value}.py"
multiple_code_path = data_path / f"{example_type.value}"
if single_file_path.exists() and multiple_code_path.exists():
raise ValueError(
f"You cannot have a single file '{example_type.value}.py' and multiple files "
f"example '{multiple_code_path}' existing at the same time."
)
title = "Problematic code" if example_type is ExampleType.BAD else "Correct code"
if single_file_path.exists():
return _get_titled_rst(
title=title, text=_get_python_code_as_rst(single_file_path)
)
if multiple_code_path.exists():
files: list[str] = []
# Sort so the order of the files makes sense
for file_as_str in sorted([str(p) for p in multiple_code_path.iterdir()]):
file = Path(file_as_str)
if file.suffix == ".py":
files.append(
f"""\
``{file.name}``:
.. literalinclude:: /{file.relative_to(Path.cwd())}
:language: python
"""
)
return _get_titled_rst(title=title, text="\n".join(files))
raise AssertionError(
f"Please add a {example_type.value} code example for {data_path}"
)
def _check_placeholders(
data_path: Path, bad_code: str, details: str, related: str
) -> None:
# Check if the placeholder file can even be presented by checking if its path exists
good_path = data_path / "good.py"
if not good_path.exists():
return
if bad_code or related:
placeholder_details = "help us make the doc better" in details
with open(good_path, encoding="utf-8") as f:
placeholder_good = "placeholder" in f.read()
assert_msg = (
f"Please remove placeholders in '{data_path}' "
f"as you started completing the documentation"
)
assert not placeholder_good and not placeholder_details, assert_msg
def _get_titled_rst(title: str, text: str) -> str:
"""Return rst code with a title if there is anything in the section."""
return f"**{title}:**\n\n{text}" if text else ""
def _get_rst_as_str(rst_path: Path) -> str:
"""Return the content of an 'rst' file or an empty string if the file does not
exist.
"""
if not rst_path.exists():
return ""
with open(rst_path, encoding="utf-8") as f:
return f.read()
def _get_python_code_as_rst(code_path: Path) -> str:
"""Return the 'rst' representation of a python file or an empty string if the file
does not exist.
"""
if not code_path.exists():
return ""
return f"""\
.. literalinclude:: /{code_path.relative_to(Path.cwd())}
:language: python
"""
def _get_ini_as_rst(code_path: Path) -> str:
return f"""\
.. literalinclude:: /{code_path.relative_to(Path.cwd())}
:language: ini
"""
def _get_all_messages(linter: PyLinter) -> tuple[MessagesDict, OldMessagesDict]:
"""Get all messages registered to a linter and return a dictionary indexed by
message type.
Also return a dictionary of old message and the new messages they can be mapped to.
"""
messages_dict: MessagesDict = {
"fatal": [],
"error": [],
"warning": [],
"convention": [],
"refactor": [],
"information": [],
}
old_messages: OldMessagesDict = {
"fatal": defaultdict(list),
"error": defaultdict(list),
"warning": defaultdict(list),
"convention": defaultdict(list),
"refactor": defaultdict(list),
"information": defaultdict(list),
}
checker_message_mapping = chain.from_iterable(
((checker, msg) for msg in checker.messages)
for checker in linter.get_checkers()
)
for checker, message in checker_message_mapping:
example_code = _get_example_code(_get_message_data_path(message))
checker_module = getmodule(checker)
assert (
checker_module and checker_module.__file__
), f"Cannot find module for checker {checker}"
message_data = MessageData(
message.checker_name,
message.msgid,
message.symbol,
message,
example_code,
checker_module.__name__,
checker_module.__file__,
message.shared,
message.default_enabled,
)
msg_type = MSG_TYPES_DOC[message.msgid[0]]
messages_dict[msg_type].append(message_data)
if message.old_names:
for old_name in message.old_names:
category = MSG_TYPES_DOC[old_name[0][0]]
# We check if the message is already in old_messages, so we don't
# duplicate shared messages.
if (message.symbol, msg_type) not in old_messages[category][
(old_name[1], old_name[0])
]:
old_messages[category][(old_name[1], old_name[0])].append(
(message.symbol, msg_type)
)
return messages_dict, old_messages
def _get_message_data_path(message: MessageDefinition) -> Path:
return PYLINT_MESSAGES_DATA_PATH / message.symbol[0] / message.symbol
def _message_needs_update(message_data: MessageData, category: str) -> bool:
"""Do we need to regenerate this message .rst ?"""
message_path = _get_message_path(category, message_data)
if not message_path.exists():
return True
message_path_stats = message_path.stat().st_mtime
checker_path_stats = Path(message_data.checker_module_path).stat().st_mtime
return checker_path_stats > message_path_stats
def _get_category_directory(category: str) -> Path:
return PYLINT_MESSAGES_PATH / category
def _get_message_path(category: str, message: MessageData) -> Path:
category_dir = _get_category_directory(category)
return category_dir / f"{message.name}.rst"
def _write_message_page(messages_dict: MessagesDict) -> None:
"""Create or overwrite the file for each message."""
for category, messages in messages_dict.items():
category_dir = _get_category_directory(category)
if not category_dir.exists():
category_dir.mkdir(parents=True, exist_ok=True)
for message in messages:
if message.shared:
continue
if not _message_needs_update(message, category):
continue
_write_single_message_page(category_dir, message)
for _, shared_messages in groupby(
sorted(
(message for message in messages if message.shared), key=lambda m: m.id
),
key=lambda m: m.id,
):
shared_messages_list = list(shared_messages)
if len(shared_messages_list) > 1:
_write_single_shared_message_page(category_dir, shared_messages_list)
else:
_write_single_message_page(category_dir, shared_messages_list[0])
def _generate_single_message_body(message: MessageData) -> str:
body = f""".. _{message.name}:
{get_rst_title(f"{message.name} / {message.id}", "=")}
**Message emitted:**
``{message.definition.msg}``
**Description:**
*{message.definition.description}*
"""
if not message.default_enabled:
body += f"""
.. caution::
This message is disabled by default. To enable it, add ``{message.name}`` to the ``enable`` option.
"""
if message.id.startswith("I"):
body += f"""
.. caution::
By default, this message will not fail the execution (pylint will return 0).
To make pylint fail for this message use the ``--fail-on={message.id}`` option
or ``--fail-on=I`` to fail on all enabled informational messages.
"""
body += f"\n{message.example_code}\n"
if message.checker_module_name.startswith("pylint.extensions."):
body += f"""
.. note::
This message is emitted by the optional :ref:`'{message.checker}'<{message.checker_module_name}>`
checker, which requires the ``{message.checker_module_name}`` plugin to be loaded.
"""
return body
def _generate_checker_url(message: MessageData) -> str:
checker_module_rel_path = os.path.relpath(
message.checker_module_path, PYLINT_BASE_PATH
)
return f"https://github.com/pylint-dev/pylint/blob/main/{checker_module_rel_path}"
def _write_single_shared_message_page(
category_dir: Path, messages: list[MessageData]
) -> None:
message = messages[0]
with open(category_dir / f"{message.name}.rst", "w", encoding="utf-8") as stream:
stream.write(_generate_single_message_body(message))
checker_urls = ", ".join(
[
f"`{message.checker} <{_generate_checker_url(message)}>`__"
for message in messages
]
)
stream.write(f"Created by the {checker_urls} checkers.")
def _write_single_message_page(category_dir: Path, message: MessageData) -> None:
with open(category_dir / f"{message.name}.rst", "w", encoding="utf-8") as stream:
stream.write(_generate_single_message_body(message))
checker_url = _generate_checker_url(message)
stream.write(f"Created by the `{message.checker} <{checker_url}>`__ checker.")
def _write_messages_list_page(
messages_dict: MessagesDict, old_messages_dict: OldMessagesDict
) -> None:
"""Create or overwrite the page with the list of all messages."""
messages_file = os.path.join(PYLINT_MESSAGES_PATH, "messages_overview.rst")
with open(messages_file, "w", encoding="utf-8") as stream:
# Write header of file
title = "Messages overview"
stream.write(
f"""
.. _messages-overview:
{"#" * len(title)}
{get_rst_title(title, "#")}
.. This file is auto-generated. Make any changes to the associated
.. docs extension in 'doc/exts/pylint_messages.py'.
Pylint can emit the following messages:
"""
)
# Iterate over tuple to keep same order
for category in (
"fatal",
"error",
"warning",
"convention",
"refactor",
"information",
):
# We need to remove all duplicated shared messages
messages = sorted(
{msg.id: msg for msg in messages_dict[category]}.values(),
key=lambda item: item.name,
)
old_messages = sorted(old_messages_dict[category], key=lambda item: item[0])
messages_string = "".join(
f" {category}/{message.name}\n" for message in messages
)
old_messages_string = "".join(
f" {category}/{old_message[0]}\n" for old_message in old_messages
)
# Write list per category. We need the '-category' suffix in the reference
# because 'fatal' is also a message's symbol
stream.write(
f"""
.. _{category.lower()}-category:
{get_rst_title(category.capitalize(), "*")}
All messages in the {category} category:
.. toctree::
:maxdepth: 2
:titlesonly:
{messages_string}
All renamed messages in the {category} category:
.. toctree::
:maxdepth: 1
:titlesonly:
{old_messages_string}"""
)
def _write_redirect_pages(old_messages: OldMessagesDict) -> None:
"""Create redirect pages for old-messages."""
for category, old_names in old_messages.items():
category_dir = PYLINT_MESSAGES_PATH / category
if not os.path.exists(category_dir):
os.makedirs(category_dir)
for old_name, new_names in old_names.items():
_write_redirect_old_page(category_dir, old_name, new_names)
def _write_redirect_old_page(
category_dir: Path,
old_name: tuple[str, str],
new_names: list[tuple[str, str]],
) -> None:
old_name_file = os.path.join(category_dir, f"{old_name[0]}.rst")
new_names_string = "".join(
f" ../{new_name[1]}/{new_name[0]}.rst\n" for new_name in new_names
)
content = f""".. _{old_name[0]}:
{get_rst_title(" / ".join(old_name), "=")}
'{old_name[0]}' has been renamed. The new message can be found at:
.. toctree::
:maxdepth: 2
:titlesonly:
{new_names_string}
"""
with open(old_name_file, "w", encoding="utf-8") as stream:
stream.write(content)
# pylint: disable-next=unused-argument
def build_messages_pages(app: Sphinx | None) -> None:
"""Overwrite messages files by printing the documentation to a stream.
Documentation is written in ReST format.
"""
# Create linter, register all checkers and extensions and get all messages
linter = PyLinter()
_register_all_checkers_and_extensions(linter)
messages, old_messages = _get_all_messages(linter)
# Write message and category pages
_write_message_page(messages)
_write_messages_list_page(messages, old_messages)
# Write redirect pages
_write_redirect_pages(old_messages)
def setup(app: Sphinx) -> dict[str, bool]:
"""Connects the extension to the Sphinx process."""
# Register callback at the builder-inited Sphinx event
# See https://www.sphinx-doc.org/en/master/extdev/appapi.html
app.connect("builder-inited", build_messages_pages)
return {"parallel_read_safe": True}
if __name__ == "__main__":
pass
# Uncomment to allow running this script by your local python interpreter
# build_messages_pages(None)
| ExampleType |
python | PrefectHQ__prefect | src/prefect/events/schemas/events.py | {
"start": 2173,
"end": 3135
} | class ____(Resource):
"""A Resource with a specific role in an Event"""
@model_validator(mode="after")
def requires_resource_role(self) -> Self:
if "prefect.resource.role" not in self.root:
raise ValueError(
"Related Resources must include the prefect.resource.role label"
)
if not self.root["prefect.resource.role"]:
raise ValueError("The prefect.resource.role label must be non-empty")
return self
@property
def role(self) -> str:
return self["prefect.resource.role"]
def _validate_related_resources(value) -> List:
from prefect.settings import PREFECT_EVENTS_MAXIMUM_RELATED_RESOURCES
if len(value) > PREFECT_EVENTS_MAXIMUM_RELATED_RESOURCES.value():
raise ValueError(
"The maximum number of related resources "
f"is {PREFECT_EVENTS_MAXIMUM_RELATED_RESOURCES.value()}"
)
return value
| RelatedResource |
python | pytorch__pytorch | torch/distributed/elastic/rendezvous/dynamic_rendezvous.py | {
"start": 33321,
"end": 49373
} | class ____(RendezvousHandler):
"""Represent a handler that sets up a rendezvous among a set of nodes."""
# Static
_node_desc_generator = _NodeDescGenerator()
_this_node: _NodeDesc
_settings: RendezvousSettings
_backend_name: str
_store: Store
_state_holder: _RendezvousStateHolder
_op_executor: _RendezvousOpExecutor
_heartbeat_lock: threading.Lock
_keep_alive_timer: _PeriodicTimer | None
@classmethod
def from_backend(
cls,
run_id: str,
store: Store,
backend: RendezvousBackend,
min_nodes: int,
max_nodes: int,
local_addr: str | None = None,
timeout: RendezvousTimeout | None = None,
keep_alive_interval: int = 5,
keep_alive_max_attempt: int = 3,
):
"""Create a new :py:class:`DynamicRendezvousHandler`.
Args:
run_id:
The run id of the rendezvous.
store:
The C10d store to return as part of the rendezvous.
backend:
The backend to use to hold the rendezvous state.
min_nodes:
The minimum number of nodes to admit to the rendezvous.
max_nodes:
The maximum number of nodes to admit to the rendezvous.
local_addr:
The local node address.
timeout:
The timeout configuration of the rendezvous.
keep_alive_interval:
The amount of time a node waits before sending a heartbeat to keep
it alive in the rendezvous.
keep_alive_max_attempt:
The maximum number of failed heartbeat attempts after which a node
is considered dead.
"""
# We associate each handler instance with a unique node descriptor.
node = cls._node_desc_generator.generate(local_addr)
settings = RendezvousSettings(
run_id,
min_nodes,
max_nodes,
timeout or RendezvousTimeout(),
keep_alive_interval=timedelta(seconds=keep_alive_interval),
keep_alive_max_attempt=keep_alive_max_attempt,
)
state_holder = _BackendRendezvousStateHolder(backend, settings)
return cls(node, settings, backend.name, store, state_holder)
def __init__(
self,
node: _NodeDesc,
settings: RendezvousSettings,
backend_name: str,
store: Store,
state_holder: _RendezvousStateHolder,
) -> None:
if not settings.run_id:
raise ValueError("The run id must be a non-empty string.")
if settings.min_nodes < 1:
raise ValueError(
f"The minimum number of nodes ({settings.min_nodes}) must be greater than zero."
)
if settings.max_nodes < settings.min_nodes:
raise ValueError(
f"The maximum number of nodes ({settings.max_nodes}) must be greater than or equal "
f"to the minimum number of nodes ({settings.min_nodes})."
)
self._this_node = node
self._settings = settings
self._backend_name = backend_name
self._store = store
self._state_holder = state_holder
self._op_executor = _DistributedRendezvousOpExecutor(
self._this_node, self._state_holder, self._settings
)
self._heartbeat_lock = threading.Lock()
self._keep_alive_timer = None
# Cached shared store server reference
self._shared_tcp_store_server: dist.Store | None = None
self._bootstrap_store_info: RendezvousStoreInfo | None = None
def _record(
self,
message: str,
node_state: NodeState = NodeState.RUNNING,
rank: int | None = None,
) -> None:
construct_and_record_rdzv_event(
name=f"{self.__class__.__name__}.{get_method_name()}",
run_id=self._settings.run_id,
message=message,
node_state=node_state,
hostname=self._this_node.addr,
pid=self._this_node.pid,
local_id=self._this_node.local_id,
rank=rank,
)
def _create_tcp_store_server(self, master_addr, master_port) -> dist.TCPStore:
return dist.TCPStore(
host_name=master_addr,
port=master_port,
is_master=True,
multi_tenant=True,
)
@property
def settings(self) -> RendezvousSettings:
"""Get the settings of the rendezvous."""
return self._settings
def get_backend(self) -> str:
"""See base class."""
return self._backend_name
@property
def use_agent_store(self) -> bool:
"""See base class."""
return os.getenv("TORCH_DISABLE_SHARE_RDZV_TCP_STORE", "0") != "1"
def next_rendezvous(self) -> RendezvousInfo:
"""See base class."""
msg = (
f"The node '{self._this_node}' attempts to join the next round of the rendezvous "
f"'{self._settings.run_id}'."
)
self._record(message=msg)
logger.info(msg)
try:
self._stop_heartbeats()
# Delay the execution for a small random amount of time if this is our
# first run. This will slightly skew the rendezvous attempts across the
# nodes and reduce the load on the backend.
if self._state_holder.state.round == 0:
_delay(seconds=(0, 0.3))
exit_op = _RendezvousExitOp()
join_op = _RendezvousJoinOp()
deadline = self._get_deadline(self._settings.timeout.join)
self._op_executor.run(exit_op, deadline)
self._op_executor.run(join_op, deadline, self._get_deadline)
self._start_heartbeats()
rank, world_size = self._get_world()
store = self._get_store()
except Exception as e:
self._record(
message=f"{type(e).__name__}: {str(e)}",
node_state=NodeState.FAILED,
)
raise
msg = (
f"The node '{self._this_node}' has joined round {self._state_holder.state.round} of "
f"the rendezvous '{self._settings.run_id}' as rank {rank} in a world of size "
f"{world_size}."
)
self._record(message=msg, rank=rank)
logger.info(msg)
# opt-out option of TCPStore sharing
if os.getenv("TORCH_DISABLE_SHARE_RDZV_TCP_STORE", "0") == "1":
bootstrap_store_info = RendezvousStoreInfo.build(
rank, store, local_addr=self._this_node.addr
)
return RendezvousInfo(
store,
rank,
world_size,
bootstrap_store_info,
)
# This will only be hit when TCPStore sharing is enabled.
if self._bootstrap_store_info is None:
# To avoid race in get_free_port because we release the port after the call,
# we want to create a TCPStore server soon afterwards.
server_port = 0
if rank == 0:
self._shared_tcp_store_server = self._create_tcp_store_server(
self._this_node.addr, server_port
)
server_port = self._shared_tcp_store_server.port
self._bootstrap_store_info = RendezvousStoreInfo.build(
rank,
store,
local_addr=self._this_node.addr,
server_port=server_port, # For non-0 rank, this is a no-op
)
assert self._bootstrap_store_info is not None
if rank == 0:
assert self._shared_tcp_store_server is not None
return RendezvousInfo(
store,
rank,
world_size,
self._bootstrap_store_info, # type: ignore[assignment]
)
def is_closed(self) -> bool:
"""See base class."""
try:
with self._heartbeat_lock:
self._state_holder.sync()
return self._state_holder.state.closed
except Exception as e:
self._record(
message=f"{type(e).__name__}: {str(e)}",
node_state=NodeState.FAILED,
)
raise
def set_closed(self) -> None:
"""See base class."""
try:
with self._heartbeat_lock:
self._close()
except Exception as e:
self._record(
message=f"{type(e).__name__}: {str(e)}",
node_state=NodeState.FAILED,
)
raise
def num_nodes_waiting(self) -> int:
"""See base class."""
try:
with self._heartbeat_lock:
self._state_holder.sync()
return len(self._state_holder.state.wait_list)
except Exception as e:
self._record(
message=f"{type(e).__name__}: {str(e)}",
node_state=NodeState.FAILED,
)
raise
def get_run_id(self) -> str:
"""See base class."""
return self._settings.run_id
def shutdown(self) -> bool:
"""See base class."""
self._stop_heartbeats()
try:
self._close()
return True
except RendezvousError as ex:
msg = (
f"The node '{self._this_node}' has failed to shutdown the rendezvous "
f"'{self._settings.run_id}' due to an error of type {type(ex).__name__}."
)
self._record(message=msg, node_state=NodeState.FAILED)
logger.warning(msg)
return False
except Exception as e:
self._record(
message=f"{type(e).__name__}: {str(e)}",
node_state=NodeState.FAILED,
)
raise
def _close(self) -> None:
op = _RendezvousCloseOp()
deadline = self._get_deadline(self._settings.timeout.close)
self._op_executor.run(op, deadline)
msg = f"The node '{self._this_node}' has closed the rendezvous '{self._settings.run_id}'."
self._record(message=msg, node_state=NodeState.SUCCEEDED)
logger.info(msg)
@staticmethod
def _keep_alive_weak(weak_self) -> None:
self = weak_self()
if self is not None:
self._keep_alive()
def _keep_alive(self) -> None:
self._heartbeat_lock.acquire()
op = _RendezvousKeepAliveOp()
deadline = self._get_deadline(self._settings.timeout.heartbeat)
try:
self._op_executor.run(op, deadline)
msg = (
f"The node '{self._this_node}' has sent a keep-alive heartbeat to the rendezvous "
f"'{self._settings.run_id}'."
)
self._record(message=msg)
logger.debug(msg)
except RendezvousError as ex:
msg = (
f"The node '{self._this_node}' has failed to send a keep-alive heartbeat to the "
f"rendezvous '{self._settings.run_id}' due to an error of type {type(ex).__name__}."
)
self._record(message=msg, node_state=NodeState.FAILED)
logger.warning(msg)
finally:
self._heartbeat_lock.release()
def _start_heartbeats(self) -> None:
self._keep_alive_timer = _PeriodicTimer(
self._settings.keep_alive_interval, self._keep_alive_weak, weakref.ref(self)
)
self._keep_alive_timer.set_name(
f"RendezvousKeepAliveTimer_{self._this_node.local_id}"
)
self._keep_alive_timer.start()
def _stop_heartbeats(self) -> None:
if self._keep_alive_timer is None:
return
self._keep_alive_timer.cancel()
def _get_world(self) -> tuple[int, int]:
state = self._state_holder.state
return state.participants[self._this_node], len(state.participants)
def _wrap_store(self, store: Store) -> Store:
key_prefix = (
f"torch.rendezvous.{self._settings.run_id}.{self._state_holder.state.round}"
)
return dist.PrefixStore(key_prefix, store)
def _get_store(self) -> Store:
return self._wrap_store(self._store)
def _get_deadline(self, timeout: timedelta) -> float:
return time.monotonic() + timeout.total_seconds()
def _get_timeout(params: RendezvousParameters, key: str) -> timedelta | None:
timeout = params.get_as_int(key + "_timeout")
if timeout is None:
return None
return timedelta(seconds=timeout)
def create_handler(
store: Store, backend: RendezvousBackend, params: RendezvousParameters
) -> DynamicRendezvousHandler:
"""Create a new :py:class:`DynamicRendezvousHandler` from the specified parameters.
Args:
store:
The C10d store to return as part of the rendezvous.
backend:
The backend to use to hold the rendezvous state.
+-------------------+------------------------------------------------------+
| Parameter | Description |
+===================+======================================================+
| join_timeout | The total time, in seconds, within which the |
| | rendezvous is expected to complete. Defaults to 600 |
| | seconds. |
+-------------------+------------------------------------------------------+
| last_call_timeout | An additional wait amount, in seconds, before |
| | completing the rendezvous once the minimum number of |
| | nodes has been reached. Defaults to 30 seconds. |
+-------------------+------------------------------------------------------+
| close_timeout | The time, in seconds, within which the rendezvous is |
| | expected to close after a call to |
| | :py:meth:`RendezvousHandler.set_closed` or |
| | :py:meth:`RendezvousHandler.shutdown`. Defaults to |
| | 30 seconds. |
+-------------------+------------------------------------------------------+
| heartbeat | The time, in seconds, within which a keep-alive |
| | heartbeat is expected to complete |
+-------------------+------------------------------------------------------+
"""
try:
timeout = RendezvousTimeout(
_get_timeout(params, "join"),
_get_timeout(params, "last_call"),
_get_timeout(params, "close"),
_get_timeout(params, "heartbeat"),
)
keep_alive_interval = params.get_as_int("keep_alive_interval", 5)
if keep_alive_interval is None:
raise TypeError(
"You passed 'keep_alive_interval=None' as a rendezvous configuration option"
)
keep_alive_max_attempt = params.get_as_int("keep_alive_max_attempt", 3)
if keep_alive_max_attempt is None:
raise TypeError(
"You passed 'keep_alive_max_attempt=None' as a rendezvous configuration option"
)
return DynamicRendezvousHandler.from_backend(
params.run_id,
store,
backend,
params.min_nodes,
params.max_nodes,
params.local_addr,
timeout,
keep_alive_interval=keep_alive_interval,
keep_alive_max_attempt=keep_alive_max_attempt,
)
except Exception as e:
construct_and_record_rdzv_event(
message=f"{type(e).__name__}: {str(e)}",
run_id=params.run_id,
node_state=NodeState.FAILED,
)
raise
| DynamicRendezvousHandler |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 618936,
"end": 619527
} | class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("SponsorEdge"), graphql_name="edges")
nodes = sgqlc.types.Field(sgqlc.types.list_of("Sponsor"), graphql_name="nodes")
page_info = sgqlc.types.Field(
sgqlc.types.non_null(PageInfo), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
| SponsorConnection |
python | readthedocs__readthedocs.org | readthedocs/organizations/filters.py | {
"start": 3088,
"end": 3791
} | class ____(OrganizationFilterSet):
"""Filter and sorting for organization listing page."""
slug = FilteredModelChoiceFilter(
label=_("Organization"),
empty_label=_("All organizations"),
to_field_name="slug",
queryset_method="get_organization_queryset",
method="get_organization",
label_attribute="name",
)
sort = OrganizationSortOrderingFilter(
field_name="sort",
label=_("Sort by"),
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def get_organization(self, queryset, field_name, organization):
return queryset.filter(slug=organization.slug)
| OrganizationListFilterSet |
python | numba__numba | numba/tests/test_try_except.py | {
"start": 21315,
"end": 23877
} | class ____(TestCase):
def test_try_in_prange_reduction(self):
# The try-except is transformed basically into chains of if-else
def udt(n):
c = 0
for i in prange(n):
try:
c += 1
except Exception:
c += 1
return c
args = [10]
expect = udt(*args)
self.assertEqual(njit(parallel=False)(udt)(*args), expect)
self.assertEqual(njit(parallel=True)(udt)(*args), expect)
def test_try_outside_prange_reduction(self):
# The try-except is transformed basically into chains of if-else
def udt(n):
c = 0
try:
for i in prange(n):
c += 1
except Exception:
return 0xdead
else:
return c
args = [10]
expect = udt(*args)
self.assertEqual(njit(parallel=False)(udt)(*args), expect)
# Parfors transformation didn't happen
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', NumbaPerformanceWarning)
self.assertEqual(njit(parallel=True)(udt)(*args), expect)
self.assertEqual(len(w), 1)
self.assertIn("no transformation for parallel execution was possible",
str(w[0]))
def test_try_in_prange_map(self):
def udt(arr, x):
out = arr.copy()
for i in prange(arr.size):
try:
if i == x:
raise ValueError
out[i] = arr[i] + i
except Exception:
out[i] = -1
return out
args = [np.arange(10), 6]
expect = udt(*args)
self.assertPreciseEqual(njit(parallel=False)(udt)(*args), expect)
self.assertPreciseEqual(njit(parallel=True)(udt)(*args), expect)
def test_try_outside_prange_map(self):
def udt(arr, x):
out = arr.copy()
try:
for i in prange(arr.size):
if i == x:
raise ValueError
out[i] = arr[i] + i
except Exception:
out[i] = -1
return out
args = [np.arange(10), 6]
expect = udt(*args)
self.assertPreciseEqual(njit(parallel=False)(udt)(*args), expect)
self.assertPreciseEqual(njit(parallel=True)(udt)(*args), expect)
if __name__ == '__main__':
unittest.main()
| TestTryExceptParfors |
python | cython__cython | Cython/Compiler/PyrexTypes.py | {
"start": 184105,
"end": 184589
} | class ____(BuiltinTypeConstructorObjectType):
def specialize_here(self, pos, env, template_values=None):
if (template_values and None not in template_values and
not any(v.is_pyobject for v in template_values)):
entry = env.declare_tuple_type(pos, template_values)
if entry:
entry.used = True
return entry.type
return super().specialize_here(pos, env, template_values)
| PythonTupleTypeConstructor |
python | numpy__numpy | numpy/_core/code_generators/generate_umath.py | {
"start": 537,
"end": 929
} | class ____:
@staticmethod
def get(place):
"""
Returns the C #definition name of docstring according
to ufunc place. C #definitions are generated by generate_umath_doc.py
in a separate C header.
"""
return 'DOC_' + place.upper().replace('.', '_')
# Sentinel value to specify using the full type description in the
# function name
| docstrings |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/genericType46.py | {
"start": 181,
"end": 418
} | class ____(Generic[T]):
def __init__(self, vals: list[U], func: Callable[[U], T]):
self._vals = list(map(func, vals))
def method1(self, func: Callable[[T], W]) -> "ClassA[W]":
return ClassA(self._vals, func)
| ClassA |
python | kamyu104__LeetCode-Solutions | Python/day-of-the-year.py | {
"start": 621,
"end": 1063
} | class ____(object):
def dayOfYear(self, date):
"""
:type date: str
:rtype: int
"""
def numberOfDays(Y, M):
leap = 1 if ((Y % 4 == 0) and (Y % 100 != 0)) or (Y % 400 == 0) else 0
return (28+leap if (M == 2) else 31-(M-1)%7%2)
Y, M, result = map(int, date.split("-"))
for i in xrange(1, M):
result += numberOfDays(Y, i)
return result
| Solution2 |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1170336,
"end": 1171661
} | class ____(VegaLiteSchema):
"""
SelectionConfig schema wrapper.
Parameters
----------
interval : dict, :class:`IntervalSelectionConfigWithoutType`
The default definition for an `interval
<https://vega.github.io/vega-lite/docs/parameter.html#select>`__ selection. All
properties and transformations for an interval selection definition (except
``type``) may be specified here.
For instance, setting ``interval`` to ``{"translate": false}`` disables the ability
to move interval selections by default.
point : dict, :class:`PointSelectionConfigWithoutType`
The default definition for a `point
<https://vega.github.io/vega-lite/docs/parameter.html#select>`__ selection. All
properties and transformations for a point selection definition (except ``type``)
may be specified here.
For instance, setting ``point`` to ``{"on": "dblclick"}`` populates point selections
on double-click by default.
"""
_schema = {"$ref": "#/definitions/SelectionConfig"}
def __init__(
self,
interval: Optional[SchemaBase | Map] = Undefined,
point: Optional[SchemaBase | Map] = Undefined,
**kwds,
):
super().__init__(interval=interval, point=point, **kwds)
| SelectionConfig |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/scheduler/instigation.py | {
"start": 11637,
"end": 20796
} | class ____(NamedTuple("_InstigatorTick", [("tick_id", int), ("tick_data", "TickData")])):
def __new__(cls, tick_id: int, tick_data: "TickData"):
return super().__new__(
cls,
check.int_param(tick_id, "tick_id"),
check.inst_param(tick_data, "tick_data", TickData),
)
def with_status(self, status: TickStatus, **kwargs: Any):
check.inst_param(status, "status", TickStatus)
end_timestamp = get_current_timestamp() if status != TickStatus.STARTED else None
kwargs["end_timestamp"] = end_timestamp
return self._replace(tick_data=self.tick_data.with_status(status, **kwargs))
def with_run_requests(
self, run_requests: Sequence[RunRequest], **kwargs: Any
) -> "InstigatorTick":
return self._replace(tick_data=self.tick_data.with_run_requests(run_requests, **kwargs))
def with_reason(self, skip_reason: str) -> "InstigatorTick":
check.opt_str_param(skip_reason, "skip_reason")
return self._replace(tick_data=self.tick_data.with_reason(skip_reason))
def with_run_info(self, run_id: Optional[str] = None, run_key: Optional[str] = None):
return self._replace(tick_data=self.tick_data.with_run_info(run_id, run_key))
def with_cursor(self, cursor: Optional[str]) -> "InstigatorTick":
return self._replace(tick_data=self.tick_data.with_cursor(cursor))
def with_origin_run(self, origin_run_id: str) -> "InstigatorTick":
return self._replace(tick_data=self.tick_data.with_origin_run(origin_run_id))
def with_log_key(self, log_key: Sequence[str]) -> "InstigatorTick":
return self._replace(tick_data=self.tick_data.with_log_key(log_key))
def with_dynamic_partitions_request_result(
self,
dynamic_partitions_request_result: DynamicPartitionsRequestResult,
) -> "InstigatorTick":
return self._replace(
tick_data=self.tick_data.with_dynamic_partitions_request_result(
dynamic_partitions_request_result
)
)
def with_user_interrupted(self, user_interrupted: bool) -> "InstigatorTick":
return self._replace(
tick_data=self.tick_data.with_user_interrupted(user_interrupted=user_interrupted)
)
@property
def instigator_origin_id(self) -> str:
return self.tick_data.instigator_origin_id
@property
def selector_id(self) -> Optional[str]:
return self.tick_data.selector_id
@property
def instigator_name(self) -> str:
return self.tick_data.instigator_name
@property
def instigator_type(self) -> InstigatorType:
return self.tick_data.instigator_type
@property
def timestamp(self) -> float:
return self.tick_data.timestamp
@property
def end_timestamp(self) -> Optional[float]:
return self.tick_data.end_timestamp
@property
def status(self) -> TickStatus:
return self.tick_data.status
@property
def run_ids(self) -> Sequence[str]:
return self.tick_data.run_ids
@property
def run_keys(self) -> Sequence[str]:
return self.tick_data.run_keys
@property
def error(self) -> Optional[SerializableErrorInfo]:
return self.tick_data.error
@property
def skip_reason(self) -> Optional[str]:
return self.tick_data.skip_reason
@property
def cursor(self) -> Optional[str]:
return self.tick_data.cursor
@property
def origin_run_ids(self) -> Optional[Sequence[str]]:
return self.tick_data.origin_run_ids
@property
def failure_count(self) -> int:
return self.tick_data.failure_count
@property
def log_key(self) -> Optional[list[str]]:
return self.tick_data.log_key
@property
def consecutive_failure_count(self) -> int:
return self.tick_data.consecutive_failure_count
@property
def is_completed(self) -> bool:
return (
self.tick_data.status == TickStatus.SUCCESS
or self.tick_data.status == TickStatus.FAILURE
or self.tick_data.status == TickStatus.SKIPPED
)
@property
def is_failure(self) -> bool:
return self.tick_data.status == TickStatus.FAILURE
@property
def is_success(self) -> bool:
return self.tick_data.status == TickStatus.SUCCESS
@property
def dynamic_partitions_request_results(
self,
) -> Sequence[DynamicPartitionsRequestResult]:
return self.tick_data.dynamic_partitions_request_results
@property
def requested_asset_materialization_count(self) -> int:
if self.tick_data.status != TickStatus.SUCCESS:
return 0
asset_partitions_from_single_runs = set()
num_assets_requested_from_backfill_runs = 0
num_requested_checks = 0
for run_request in self.tick_data.run_requests or []:
if run_request.requires_backfill_daemon():
asset_graph_subset = check.not_none(run_request.asset_graph_subset)
num_assets_requested_from_backfill_runs += (
asset_graph_subset.num_partitions_and_non_partitioned_assets
)
else:
for asset_key in run_request.asset_selection or []:
asset_partitions_from_single_runs.add(
AssetKeyPartitionKey(asset_key, run_request.partition_key)
)
for asset_check_key in run_request.asset_check_keys or []:
num_requested_checks += 1
return (
len(asset_partitions_from_single_runs)
+ num_assets_requested_from_backfill_runs
+ num_requested_checks
)
@property
def requested_assets_and_partitions(self) -> Mapping[AssetKey, AbstractSet[str]]:
if self.tick_data.status != TickStatus.SUCCESS:
return {}
partitions_by_asset_key = {}
for run_request in self.tick_data.run_requests or []:
if run_request.requires_backfill_daemon():
asset_graph_subset = check.not_none(run_request.asset_graph_subset)
for asset_key_partition_key in asset_graph_subset.iterate_asset_partitions():
if asset_key_partition_key.asset_key not in partitions_by_asset_key:
partitions_by_asset_key[asset_key_partition_key.asset_key] = set()
if asset_key_partition_key.partition_key:
partitions_by_asset_key[asset_key_partition_key.asset_key].add(
asset_key_partition_key.partition_key
)
else:
for asset_key in run_request.asset_selection or []:
if asset_key not in partitions_by_asset_key:
partitions_by_asset_key[asset_key] = set()
if run_request.partition_key:
partitions_by_asset_key[asset_key].add(run_request.partition_key)
for asset_check_key in run_request.asset_check_keys or []:
asset_key = asset_check_key.asset_key
if asset_key not in partitions_by_asset_key:
partitions_by_asset_key[asset_key] = set()
partitions_by_asset_key[asset_key].add(asset_check_key.name)
return partitions_by_asset_key
@property
def requested_asset_keys(self) -> AbstractSet[AssetKey]:
if self.tick_data.status != TickStatus.SUCCESS:
return set()
return set(self.requested_assets_and_partitions.keys())
@property
def run_requests(self) -> Optional[Sequence[RunRequest]]:
return self.tick_data.run_requests
@property
def reserved_run_ids_with_requests(self) -> Iterable[tuple[str, RunRequest]]:
reserved_run_ids = self.tick_data.reserved_run_ids or []
return zip(reserved_run_ids, self.run_requests or [])
@property
def unsubmitted_run_ids_with_requests(self) -> Sequence[tuple[str, RunRequest]]:
reserved_run_ids = self.tick_data.reserved_run_ids or []
unrequested_run_ids = set(reserved_run_ids) - set(self.tick_data.run_ids)
return [
(run_id, run_request)
for run_id, run_request in self.reserved_run_ids_with_requests
if run_id in unrequested_run_ids
]
@property
def automation_condition_evaluation_id(self) -> int:
"""Returns a unique identifier for the current automation condition evaluation. In general,
this will be identical to the current tick id, but in cases where an evaluation needs to
be retried, an override value may be set.
"""
if self.tick_data.auto_materialize_evaluation_id is not None:
return self.tick_data.auto_materialize_evaluation_id
else:
return self.tick_id
@whitelist_for_serdes(
old_storage_names={"JobTickData"},
storage_field_names={
"instigator_origin_id": "job_origin_id",
"instigator_name": "job_name",
"instigator_type": "job_type",
},
)
| InstigatorTick |
python | readthedocs__readthedocs.org | readthedocs/api/v2/serializers.py | {
"start": 6448,
"end": 6891
} | class ____(serializers.ModelSerializer):
run_time = serializers.ReadOnlyField()
class Meta:
model = BuildCommandResult
exclude = []
def update(self, instance, validated_data):
# Build isn't allowed to be updated after creation
# (e.g. to avoid moving commands to another build).
validated_data.pop("build", None)
return super().update(instance, validated_data)
| BuildCommandSerializer |
python | mlflow__mlflow | mlflow/utils/async_logging/run_operations.py | {
"start": 0,
"end": 1944
} | class ____:
"""Class that helps manage the futures of MLflow async logging."""
def __init__(self, operation_futures):
self._operation_futures = operation_futures or []
def wait(self):
"""Blocks on completion of all futures."""
from mlflow.exceptions import MlflowException
failed_operations = []
for future in self._operation_futures:
try:
future.result()
except Exception as e:
failed_operations.append(e)
if len(failed_operations) > 0:
raise MlflowException(
"The following failures occurred while performing one or more async logging "
f"operations: {failed_operations}"
)
def get_combined_run_operations(run_operations_list: list[RunOperations]) -> RunOperations:
"""Combine a list of RunOperations objects into a single RunOperations object.
Given a list of `RunOperations`, returns a single `RunOperations` object that represents the
combined set of operations. If the input list is empty, returns None. If the input list
contains only one element, returns that element. Otherwise, creates a new `RunOperations`
object that combines the operation futures from each input RunOperations object.
Args:
run_operations_list: A list of `RunOperations` objects to combine.
Returns:
A single `RunOperations` object that represents the combined set of operations.
"""
if not run_operations_list:
return None
if len(run_operations_list) == 1:
return run_operations_list[0]
if len(run_operations_list) > 1:
operation_futures = []
for run_operations in run_operations_list:
if run_operations and run_operations._operation_futures:
operation_futures.extend(run_operations._operation_futures)
return RunOperations(operation_futures)
| RunOperations |
python | wandb__wandb | wandb/sdk/lib/console_capture.py | {
"start": 1363,
"end": 7998
} | class ____(Protocol):
"""A callback that receives intercepted bytes or string data.
This may be called from any thread, but is only called from one thread
at a time.
Note on errors: Any error raised during the callback will clear all
callbacks. This means that if a user presses Ctrl-C at an unlucky time
during a run, we will stop uploading console output---but it's not
likely to be a problem unless something catches the KeyboardInterrupt.
Regular Exceptions are caught and logged instead of bubbling up to the
user's print() statements; other exceptions like KeyboardInterrupt are
re-raised.
Callbacks should handle all exceptions---a callback that raises any
Exception is considered buggy.
"""
def __call__(
self,
data: bytes | str,
written: int,
/,
) -> None:
"""Intercept data passed to `write()`.
See the protocol docstring for information about exceptions.
Args:
data: The object passed to stderr's or stdout's `write()`.
written: The number of bytes or characters written.
This is the return value of `write()`.
"""
_module_lock = threading.Lock()
# See _enter_callbacks().
_is_writing = False
_is_caused_by_callback = contextvars.ContextVar(
"_is_caused_by_callback",
default=False,
)
_patch_exception: CannotCaptureConsoleError | None = None
_next_callback_id: int = 1
_stdout_callbacks: dict[int, _WriteCallback] = {}
_stderr_callbacks: dict[int, _WriteCallback] = {}
def capture_stdout(callback: _WriteCallback) -> Callable[[], None]:
"""Install a callback that runs after every write to sys.stdout.
Args:
callback: A callback to invoke after running `sys.stdout.write`.
Returns:
A function to uninstall the callback.
Raises:
CannotCaptureConsoleError: If patching failed on import.
"""
with _module_lock:
if _patch_exception:
raise _patch_exception
return _insert_disposably(
_stdout_callbacks,
callback,
)
def capture_stderr(callback: _WriteCallback) -> Callable[[], None]:
"""Install a callback that runs after every write to sys.sdterr.
Args:
callback: A callback to invoke after running `sys.stderr.write`.
Returns:
A function to uninstall the callback.
Raises:
CannotCaptureConsoleError: If patching failed on import.
"""
with _module_lock:
if _patch_exception:
raise _patch_exception
return _insert_disposably(
_stderr_callbacks,
callback,
)
def _insert_disposably(
callback_dict: dict[int, _WriteCallback],
callback: _WriteCallback,
) -> Callable[[], None]:
global _next_callback_id
id = _next_callback_id
_next_callback_id += 1
disposed = False
def dispose() -> None:
nonlocal disposed
with _module_lock:
if disposed:
return
callback_dict.pop(id, None)
disposed = True
callback_dict[id] = callback
return dispose
def _patch(
stdout_or_stderr: IO[AnyStr],
callbacks: dict[int, _WriteCallback],
) -> None:
orig_write: Callable[[AnyStr], int]
def write_with_callbacks(s: AnyStr, /) -> int:
n = orig_write(s)
with contextlib.ExitStack() as stack:
stack.enter_context(_reset_on_exception())
stack.enter_context(wb_logging.log_to_all_runs())
callbacks_list = stack.enter_context(_enter_callbacks(callbacks))
for cb in callbacks_list:
cb(s, n)
return n
orig_write = stdout_or_stderr.write
# mypy==1.14.1 fails to type-check this:
# Incompatible types in assignment (expression has type
# "Callable[[bytes], int]", variable has type overloaded function)
stdout_or_stderr.write = write_with_callbacks # type: ignore
@contextlib.contextmanager
def _enter_callbacks(
callbacks: dict[int, _WriteCallback],
) -> Iterator[list[_WriteCallback]]:
"""Returns a list of callbacks to invoke.
This prevents deadlocks and some infinite loops by returning an empty list
when:
* A callback prints
* A callback blocks on a thread that's printing
* A callback schedules an async task that prints
It is impossible to prevent all infinite loops: a callback could put
a message into a queue, causing an unrelated thread to print later,
invoking the same callback and repeating forever.
"""
global _is_writing
# The global _is_writing variable is necessary despite the contextvar
# because it's possible to create a thread without copying the context.
# This is the default behavior for threading.Thread() before Python 3.14.
#
# A side effect of it is that when multiple threads print simultaneously,
# some messages will not be captured.
with _module_lock:
if _is_writing or _is_caused_by_callback.get():
callbacks_list = None
else:
callbacks_list = list(callbacks.values())
_is_writing = True
_is_caused_by_callback.set(True)
if callbacks_list is None:
yield []
return
try:
yield callbacks_list
finally:
with _module_lock:
_is_writing = False
_is_caused_by_callback.set(False)
@contextlib.contextmanager
def _reset_on_exception() -> Iterator[None]:
"""Clear all callbacks on any exception, suppressing it.
This prevents infinite loops:
* If we re-raise, an exception handler is likely to print
the exception to the console and trigger callbacks again
* If we log, we can't guarantee that this doesn't print
to console.
This is especially important for KeyboardInterrupt.
"""
try:
yield
except BaseException as e:
with _module_lock:
_stderr_callbacks.clear()
_stdout_callbacks.clear()
if isinstance(e, Exception):
# We suppress Exceptions so that bugs in W&B code don't
# cause the user's print() statements to raise errors.
_logger.exception("Error in console callback, clearing all!")
else:
# Re-raise errors like KeyboardInterrupt.
raise
try:
_patch(sys.stdout, _stdout_callbacks)
_patch(sys.stderr, _stderr_callbacks)
except Exception as _patch_exception_cause:
_patch_exception = CannotCaptureConsoleError()
_patch_exception.__cause__ = _patch_exception_cause
| _WriteCallback |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/partial.py | {
"start": 4016,
"end": 4267
} | class ____:
@NestedDefineDecorator
def __init__(self, x: str, y: str) -> None:
self.x = x
self.y = y
def dunder_call_nested_define_constructor(x: str, y: str) -> C:
return NestedDefineConstructor(x, y)
| NestedDefineConstructor |
python | pypa__warehouse | tests/unit/legacy/api/test_json.py | {
"start": 14676,
"end": 15415
} | class ____:
def test_normalizing_redirects(self, db_request):
project = ProjectFactory.create()
release = ReleaseFactory.create(project=project, version="1.0")
db_request.matchdict = {"name": project.name.swapcase()}
db_request.current_route_path = pretend.call_recorder(
lambda name: "/project/the-redirect/"
)
resp = json.json_project_slash(release, db_request)
assert isinstance(resp, HTTPMovedPermanently)
assert resp.headers["Location"] == "/project/the-redirect/"
_assert_has_cors_headers(resp.headers)
assert db_request.current_route_path.calls == [
pretend.call(name=project.normalized_name)
]
| TestJSONProjectSlash |
python | falconry__falcon | tests/test_httperror.py | {
"start": 4366,
"end": 4453
} | class ____:
def on_get(self, req, resp):
raise falcon.HTTPGone()
| GoneResource |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/transfers/test_sql_to_s3.py | {
"start": 1127,
"end": 25087
} | class ____:
@pytest.mark.parametrize(
("file_format", "dtype_backend", "df_kwargs", "expected_key_suffix"),
[
("csv", "numpy_nullable", {"index": False, "header": False}, ".csv"),
("csv", "pyarrow", {"index": False, "header": False}, ".csv"),
("parquet", "numpy_nullable", {}, ".parquet"),
("parquet", "pyarrow", {}, ".parquet"),
(
"json",
None,
{"date_format": "iso", "lines": True, "orient": "records"},
".json",
),
],
)
@mock.patch("airflow.providers.amazon.aws.transfers.sql_to_s3.S3Hook")
def test_execute_formats(self, mock_s3_hook, file_format, dtype_backend, df_kwargs, expected_key_suffix):
query = "query"
s3_bucket = "bucket"
s3_key = "key"
mock_dbapi_hook = mock.Mock()
test_df = pd.DataFrame({"a": "1", "b": "2"}, index=[0, 1])
get_df_mock = mock_dbapi_hook.return_value.get_df
get_df_mock.return_value = test_df
read_df_kwargs = {"dtype_backend": dtype_backend} if dtype_backend else {}
op = SqlToS3Operator(
query=query,
s3_bucket=s3_bucket,
s3_key=s3_key,
sql_conn_id="mysql_conn_id",
aws_conn_id="aws_conn_id",
task_id="task_id",
file_format=file_format,
replace=True,
read_kwargs=read_df_kwargs,
df_kwargs=df_kwargs,
dag=None,
)
op._get_hook = mock_dbapi_hook
op.execute(None)
mock_s3_hook.assert_called_once_with(aws_conn_id="aws_conn_id", verify=None)
expected_df_kwargs = {
"sql": query,
"parameters": None,
"df_type": "pandas",
}
if dtype_backend:
expected_df_kwargs["dtype_backend"] = dtype_backend
get_df_mock.assert_called_once_with(**expected_df_kwargs)
file_obj = mock_s3_hook.return_value.load_file_obj.call_args[1]["file_obj"]
assert isinstance(file_obj, io.BytesIO)
mock_s3_hook.return_value.load_file_obj.assert_called_once_with(
file_obj=file_obj,
key=f"{s3_key}{expected_key_suffix}",
bucket_name=s3_bucket,
replace=True,
)
@mock.patch("airflow.providers.amazon.aws.transfers.sql_to_s3.S3Hook")
def test_execute_gzip_with_bytesio(self, mock_s3_hook):
query = "query"
s3_bucket = "bucket"
s3_key = "key.csv.gz"
mock_dbapi_hook = mock.Mock()
test_df = pd.DataFrame({"a": "1", "b": "2"}, index=[0, 1])
get_df_mock = mock_dbapi_hook.return_value.get_df
get_df_mock.return_value = test_df
op = SqlToS3Operator(
query=query,
s3_bucket=s3_bucket,
s3_key=s3_key,
sql_conn_id="mysql_conn_id",
aws_conn_id="aws_conn_id",
task_id="task_id",
replace=True,
df_kwargs={"index": False, "compression": "gzip"},
dag=None,
)
op._get_hook = mock_dbapi_hook
op.execute(None)
mock_s3_hook.assert_called_once_with(aws_conn_id="aws_conn_id", verify=None)
get_df_mock.assert_called_once_with(sql=query, parameters=None, df_type="pandas")
file_obj = mock_s3_hook.return_value.load_file_obj.call_args[1]["file_obj"]
assert isinstance(file_obj, io.BytesIO)
mock_s3_hook.return_value.load_file_obj.assert_called_once_with(
file_obj=file_obj, key=s3_key, bucket_name=s3_bucket, replace=True
)
file_obj.seek(0)
with gzip.GzipFile(fileobj=file_obj, mode="rb") as gz:
decompressed_buf = io.BytesIO(gz.read())
decompressed_buf.seek(0)
read_df = pd.read_csv(decompressed_buf, dtype={"a": str, "b": str})
assert read_df.equals(test_df)
@pytest.mark.parametrize(
"params",
[
pytest.param({"file_format": "csv", "null_string_result": None}, id="with-csv"),
pytest.param({"file_format": "parquet", "null_string_result": "None"}, id="with-parquet"),
],
)
def test_fix_dtypes(self, params):
op = SqlToS3Operator(
query="query",
s3_bucket="s3_bucket",
s3_key="s3_key",
task_id="task_id",
sql_conn_id="mysql_conn_id",
file_format=params["file_format"],
)
dirty_df = pd.DataFrame({"strings": ["a", "b", None], "ints": [1, 2, None]})
op._fix_dtypes(df=dirty_df, file_format=op.file_format)
assert dirty_df["strings"].values[2] == params["null_string_result"]
assert dirty_df["ints"].dtype.kind == "i"
@mock.patch("airflow.providers.amazon.aws.transfers.sql_to_s3.S3Hook")
def test_fix_dtypes_not_called(self, mock_s3_hook):
query = "query"
s3_bucket = "bucket"
s3_key = "key"
mock_dbapi_hook = mock.Mock()
test_df = pd.DataFrame({"a": "1", "b": "2"}, index=[0, 1])
get_df_mock = mock_dbapi_hook.return_value.get_df
get_df_mock.return_value = test_df
op = SqlToS3Operator(
query=query,
s3_bucket=s3_bucket,
s3_key=s3_key,
sql_conn_id="mysql_conn_id",
aws_conn_id="aws_conn_id",
task_id="task_id",
read_kwargs={"dtype_backend": "pyarrow"},
file_format="parquet",
replace=True,
dag=None,
)
op._get_hook = mock_dbapi_hook
with mock.patch.object(SqlToS3Operator, "_fix_dtypes") as mock_fix_dtypes:
op.execute(None)
mock_fix_dtypes.assert_not_called()
def test_invalid_file_format(self):
with pytest.raises(AirflowException):
SqlToS3Operator(
query="query",
s3_bucket="bucket",
s3_key="key",
sql_conn_id="mysql_conn_id",
task_id="task_id",
file_format="invalid_format",
dag=None,
)
def test_with_groupby_kwarg(self):
"""
Test operator when the groupby_kwargs is specified
"""
query = "query"
s3_bucket = "bucket"
s3_key = "key"
op = SqlToS3Operator(
query=query,
s3_bucket=s3_bucket,
s3_key=s3_key,
sql_conn_id="mysql_conn_id",
aws_conn_id="aws_conn_id",
task_id="task_id",
replace=True,
df_kwargs={"index": False, "header": False},
groupby_kwargs={"by": "Team"},
dag=None,
)
example = {
"Team": ["Australia", "Australia", "India", "India"],
"Player": ["Ricky", "David Warner", "Virat Kohli", "Rohit Sharma"],
"Runs": [345, 490, 672, 560],
}
df = pd.DataFrame(example)
data = []
for group_name, df in op._partition_dataframe(df):
data.append((group_name, df))
data.sort(key=lambda d: d[0])
team, df = data[0]
assert df.equals(
pd.DataFrame(
{
"Team": ["Australia", "Australia"],
"Player": ["Ricky", "David Warner"],
"Runs": [345, 490],
}
)
)
team, df = data[1]
assert df.equals(
pd.DataFrame(
{
"Team": ["India", "India"],
"Player": ["Virat Kohli", "Rohit Sharma"],
"Runs": [672, 560],
}
)
)
def test_without_groupby_kwarg(self):
"""
Test operator when the groupby_kwargs is not specified
"""
query = "query"
s3_bucket = "bucket"
s3_key = "key"
op = SqlToS3Operator(
query=query,
s3_bucket=s3_bucket,
s3_key=s3_key,
sql_conn_id="mysql_conn_id",
aws_conn_id="aws_conn_id",
task_id="task_id",
replace=True,
df_kwargs={"index": False, "header": False},
dag=None,
)
example = {
"Team": ["Australia", "Australia", "India", "India"],
"Player": ["Ricky", "David Warner", "Virat Kohli", "Rohit Sharma"],
"Runs": [345, 490, 672, 560],
}
df = pd.DataFrame(example)
data = []
for group_name, df in op._partition_dataframe(df):
data.append((group_name, df))
assert len(data) == 1
team, df = data[0]
assert df.equals(
pd.DataFrame(
{
"Team": ["Australia", "Australia", "India", "India"],
"Player": ["Ricky", "David Warner", "Virat Kohli", "Rohit Sharma"],
"Runs": [345, 490, 672, 560],
}
)
)
def test_with_max_rows_per_file(self):
"""
Test operator when the max_rows_per_file is specified
"""
query = "query"
s3_bucket = "bucket"
s3_key = "key"
op = SqlToS3Operator(
query=query,
s3_bucket=s3_bucket,
s3_key=s3_key,
sql_conn_id="mysql_conn_id",
aws_conn_id="aws_conn_id",
task_id="task_id",
replace=True,
df_kwargs={"index": False, "header": False},
max_rows_per_file=3,
dag=None,
)
example = {
"Team": ["Australia", "Australia", "India", "India"],
"Player": ["Ricky", "David Warner", "Virat Kohli", "Rohit Sharma"],
"Runs": [345, 490, 672, 560],
}
df = pd.DataFrame(example)
data = []
for group_name, df in op._partition_dataframe(df):
data.append((group_name, df))
data.sort(key=lambda d: d[0])
team, df = data[0]
assert df.equals(
pd.DataFrame(
{
"Team": ["Australia", "Australia", "India"],
"Player": ["Ricky", "David Warner", "Virat Kohli"],
"Runs": [345, 490, 672],
}
)
)
team, df = data[1]
assert df.equals(
pd.DataFrame(
{
"Team": ["India"],
"Player": ["Rohit Sharma"],
"Runs": [560],
}
)
)
@mock.patch("airflow.providers.common.sql.operators.sql.BaseHook.get_connection")
def test_hook_params(self, mock_get_conn):
mock_get_conn.return_value = Connection(conn_id="postgres_test", conn_type="postgres")
op = SqlToS3Operator(
query="query",
s3_bucket="bucket",
s3_key="key",
sql_conn_id="postgres_test",
task_id="task_id",
sql_hook_params={
"log_sql": False,
},
dag=None,
)
hook = op._get_hook()
assert hook.log_sql == op.sql_hook_params["log_sql"]
@pytest.mark.parametrize(
("df_type_param", "expected_df_type"),
[
pytest.param("polars", "polars", id="with-polars"),
pytest.param("pandas", "pandas", id="with-pandas"),
pytest.param(None, "pandas", id="with-default"),
],
)
@mock.patch("airflow.providers.amazon.aws.transfers.sql_to_s3.S3Hook")
def test_execute_with_df_type(self, mock_s3_hook, df_type_param, expected_df_type):
query = "query"
s3_bucket = "bucket"
s3_key = "key.csv"
mock_dbapi_hook = mock.Mock()
test_df = pd.DataFrame({"a": "1", "b": "2"}, index=[0, 1])
get_df_mock = mock_dbapi_hook.return_value.get_df
get_df_mock.return_value = test_df
kwargs = {
"query": query,
"s3_bucket": s3_bucket,
"s3_key": s3_key,
"sql_conn_id": "mysql_conn_id",
"aws_conn_id": "aws_conn_id",
"task_id": "task_id",
"replace": True,
"dag": None,
}
if df_type_param is not None:
kwargs["df_type"] = df_type_param
op = SqlToS3Operator(**kwargs)
op._get_hook = mock_dbapi_hook
op.execute(None)
mock_s3_hook.assert_called_once_with(aws_conn_id="aws_conn_id", verify=None)
get_df_mock.assert_called_once_with(sql=query, parameters=None, df_type=expected_df_type)
file_obj = mock_s3_hook.return_value.load_file_obj.call_args[1]["file_obj"]
assert isinstance(file_obj, io.BytesIO)
mock_s3_hook.return_value.load_file_obj.assert_called_once_with(
file_obj=file_obj, key=s3_key, bucket_name=s3_bucket, replace=True
)
@pytest.mark.parametrize(
("df_type", "input_df_creator"),
[
pytest.param(
"pandas",
lambda: pd.DataFrame({"category": ["A", "A", "B", "B"], "value": [1, 2, 3, 4]}),
id="with-pandas-dataframe",
),
pytest.param(
"polars",
lambda: pytest.importorskip("polars").DataFrame(
{"category": ["A", "A", "B", "B"], "value": [1, 2, 3, 4]}
),
id="with-polars-dataframe",
),
],
)
def test_partition_dataframe(self, df_type, input_df_creator):
"""Test that _partition_dataframe works with both pandas and polars DataFrames."""
op = SqlToS3Operator(
query="query",
s3_bucket="bucket",
s3_key="key",
sql_conn_id="mysql_conn_id",
task_id="task_id",
df_type=df_type,
groupby_kwargs={"by": "category"},
)
input_df = input_df_creator()
partitions = list(op._partition_dataframe(input_df))
assert len(partitions) == 2
for group_name, df in partitions:
if df_type == "polars":
assert isinstance(df, pl.DataFrame)
else:
assert isinstance(df, pd.DataFrame)
assert group_name in ["A", "B"]
@pytest.mark.parametrize(
("kwargs", "expected_warning", "expected_error", "expected_read_kwargs"),
[
pytest.param(
{"read_pd_kwargs": {"dtype_backend": "pyarrow"}},
"The 'read_pd_kwargs' parameter is deprecated",
None,
{"dtype_backend": "pyarrow"},
id="deprecated-read-pd-kwargs-warning",
),
pytest.param(
{
"read_kwargs": {"dtype_backend": "pyarrow"},
"read_pd_kwargs": {"dtype_backend": "numpy_nullable"},
},
"The 'read_pd_kwargs' parameter is deprecated",
None,
{"dtype_backend": "pyarrow"},
id="read-kwargs-priority-over-deprecated",
),
pytest.param(
{"max_rows_per_file": 2, "groupby_kwargs": {"by": "category"}},
None,
"can not be both specified",
None,
id="max-rows-groupby-conflict-error",
),
pytest.param(
{"pd_kwargs": {"index": False}},
"The 'pd_kwargs' parameter is deprecated",
None,
None,
id="deprecated-pd-kwargs-warning",
),
pytest.param(
{"df_kwargs": {"index": False}, "pd_kwargs": {"header": False}},
"The 'pd_kwargs' parameter is deprecated",
None,
None,
id="df-kwargs-priority-over-deprecated",
),
],
)
def test_parameter_validation(self, kwargs, expected_warning, expected_error, expected_read_kwargs):
"""Test parameter validation and deprecation warnings."""
base_kwargs = {
"query": "query",
"s3_bucket": "bucket",
"s3_key": "key",
"sql_conn_id": "mysql_conn_id",
"task_id": "task_id",
}
base_kwargs.update(kwargs)
if expected_error:
with pytest.raises(AirflowException, match=expected_error):
SqlToS3Operator(**base_kwargs)
elif expected_warning:
with pytest.warns(AirflowProviderDeprecationWarning, match=expected_warning):
op = SqlToS3Operator(**base_kwargs)
if expected_read_kwargs:
assert op.read_kwargs == expected_read_kwargs
else:
op = SqlToS3Operator(**base_kwargs)
if expected_read_kwargs:
assert op.read_kwargs == expected_read_kwargs
@pytest.mark.parametrize(
("df_type", "should_call_fix_dtypes"),
[
pytest.param("pandas", True, id="pandas-calls-fix-dtypes"),
pytest.param("polars", False, id="polars-skips-fix-dtypes"),
],
)
@mock.patch("airflow.providers.amazon.aws.transfers.sql_to_s3.S3Hook")
def test_fix_dtypes_behavior_by_df_type(self, mock_s3_hook, df_type, should_call_fix_dtypes):
"""Test that _fix_dtypes is called/not called based on df_type."""
query = "query"
s3_bucket = "bucket"
s3_key = "key"
mock_dbapi_hook = mock.Mock()
test_df = pd.DataFrame({"a": "1", "b": "2"}, index=[0, 1])
get_df_mock = mock_dbapi_hook.return_value.get_df
get_df_mock.return_value = test_df
op = SqlToS3Operator(
query=query,
s3_bucket=s3_bucket,
s3_key=s3_key,
sql_conn_id="mysql_conn_id",
aws_conn_id="aws_conn_id",
task_id="task_id",
df_type=df_type,
replace=True,
dag=None,
)
op._get_hook = mock_dbapi_hook
with mock.patch.object(SqlToS3Operator, "_fix_dtypes") as mock_fix_dtypes:
op.execute(None)
if should_call_fix_dtypes:
mock_fix_dtypes.assert_called_once()
else:
mock_fix_dtypes.assert_not_called()
@pytest.mark.parametrize(
("kwargs", "expected_warning", "expected_read_kwargs", "expected_df_kwargs"),
[
pytest.param(
{
"read_kwargs": {"dtype_backend": "pyarrow"},
"read_pd_kwargs": {"dtype_backend": "numpy_nullable"},
},
"The 'read_pd_kwargs' parameter is deprecated",
{"dtype_backend": "pyarrow"},
{},
id="read-kwargs-priority-over-deprecated",
),
pytest.param(
{"read_pd_kwargs": {"dtype_backend": "numpy_nullable"}},
"The 'read_pd_kwargs' parameter is deprecated",
{"dtype_backend": "numpy_nullable"},
{},
id="read-pd-kwargs-used-when-read-kwargs-none",
),
pytest.param(
{
"df_kwargs": {"index": False},
"pd_kwargs": {"header": False},
},
"The 'pd_kwargs' parameter is deprecated",
{},
{"index": False},
id="df-kwargs-priority-over-deprecated",
),
pytest.param(
{"pd_kwargs": {"header": False}},
"The 'pd_kwargs' parameter is deprecated",
{},
{"header": False},
id="pd-kwargs-used-when-df-kwargs-none",
),
],
)
def test_deprecated_kwargs_priority_behavior(
self, kwargs, expected_warning, expected_read_kwargs, expected_df_kwargs
):
"""Test priority behavior and deprecation warnings for deprecated parameters."""
base_kwargs = {
"query": "query",
"s3_bucket": "bucket",
"s3_key": "key",
"sql_conn_id": "mysql_conn_id",
"task_id": "task_id",
}
base_kwargs.update(kwargs)
with pytest.warns(AirflowProviderDeprecationWarning, match=expected_warning):
op = SqlToS3Operator(**base_kwargs)
assert op.read_kwargs == expected_read_kwargs
assert op.df_kwargs == expected_df_kwargs
@pytest.mark.parametrize(
("fmt", "df_kwargs", "expected_key"),
[
("csv", {"compression": "gzip", "index": False}, "data.csv.gz"),
("csv", {"index": False}, "data.csv"),
("json", {"compression": "gzip"}, "data.json.gz"),
("json", {}, "data.json"),
("parquet", {"compression": "gzip"}, "data.parquet"),
("parquet", {}, "data.parquet"),
],
)
@mock.patch("airflow.providers.amazon.aws.transfers.sql_to_s3.S3Hook")
@mock.patch("airflow.providers.common.sql.hooks.sql.DbApiHook")
def test_file_format_handling(self, mock_dbapi_hook, mock_s3_hook, fmt, df_kwargs, expected_key):
s3_bucket = "bucket"
s3_key = "data." + fmt
test_df = pd.DataFrame({"x": [1, 2]})
mock_dbapi_hook.return_value.get_df.return_value = test_df
op = SqlToS3Operator(
query="SELECT * FROM test",
s3_bucket=s3_bucket,
s3_key=s3_key,
sql_conn_id="sqlite_conn",
aws_conn_id="aws_default",
task_id="task_id",
file_format=fmt,
df_kwargs=df_kwargs,
replace=True,
dag=None,
)
op._get_hook = lambda: mock_dbapi_hook.return_value
op.execute(context=None)
uploaded_key = mock_s3_hook.return_value.load_file_obj.call_args[1]["key"]
assert uploaded_key == expected_key
@pytest.mark.parametrize(
("file_format", "df_kwargs", "expected_suffix"),
[
("csv", {"compression": "gzip", "index": False}, ".csv.gz"),
("csv", {"index": False}, ".csv"),
("json", {"compression": "gzip"}, ".json.gz"),
("json", {}, ".json"),
("parquet", {"compression": "gzip"}, ".parquet"),
("parquet", {}, ".parquet"),
],
)
@mock.patch("airflow.providers.amazon.aws.transfers.sql_to_s3.S3Hook")
@mock.patch("airflow.providers.common.sql.hooks.sql.DbApiHook")
def test_file_format_handling_with_groupby(
self, mock_dbapi_hook, mock_s3_hook, file_format, df_kwargs, expected_suffix
):
s3_bucket = "bucket"
s3_key = "data"
# Input DataFrame with groups
test_data = pd.DataFrame(
{"x": [1, 2, 3, 4, 5, 6], "group": ["group1", "group1", "group2", "group2", "group3", "group4"]}
)
mock_dbapi_hook.return_value.get_df.return_value = test_data
op = SqlToS3Operator(
query="SELECT * FROM test",
s3_bucket=s3_bucket,
s3_key=s3_key,
sql_conn_id="sqlite_conn",
aws_conn_id="aws_default",
task_id="task_id",
file_format=file_format,
df_kwargs=df_kwargs,
groupby_kwargs={"by": "group"},
replace=True,
dag=None,
)
op._get_hook = lambda: mock_dbapi_hook.return_value
op.execute(context=None)
expected_groups = test_data["group"].unique()
assert mock_s3_hook.return_value.load_file_obj.call_count == len(expected_groups)
called_keys = [call.kwargs["key"] for call in mock_s3_hook.return_value.load_file_obj.call_args_list]
for group in expected_groups:
expected_key = f"{s3_key}_{group}{expected_suffix}"
assert expected_key in called_keys, f"Missing expected key: {expected_key}"
| TestSqlToS3Operator |
python | pallets__jinja | src/jinja2/sandbox.py | {
"start": 13757,
"end": 14258
} | class ____(SandboxedEnvironment):
"""Works exactly like the regular `SandboxedEnvironment` but does not
permit modifications on the builtin mutable objects `list`, `set`, and
`dict` by using the :func:`modifies_known_mutable` function.
"""
def is_safe_attribute(self, obj: t.Any, attr: str, value: t.Any) -> bool:
if not super().is_safe_attribute(obj, attr, value):
return False
return not modifies_known_mutable(obj, attr)
| ImmutableSandboxedEnvironment |
python | huggingface__transformers | src/transformers/models/roformer/modeling_roformer.py | {
"start": 12605,
"end": 13714
} | class ____(nn.Module):
def __init__(self, config, layer_idx=None):
super().__init__()
self.self = RoFormerSelfAttention(config, layer_idx=layer_idx)
self.output = RoFormerSelfOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
sinusoidal_pos=None,
encoder_hidden_states=None,
past_key_values=None,
output_attentions=False,
cache_position=None,
):
self_outputs = self.self(
hidden_states,
attention_mask=attention_mask,
sinusoidal_pos=sinusoidal_pos,
encoder_hidden_states=encoder_hidden_states,
past_key_values=past_key_values,
output_attentions=output_attentions,
cache_position=cache_position,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->RoFormer
| RoFormerAttention |
python | doocs__leetcode | solution/0800-0899/0886.Possible Bipartition/Solution.py | {
"start": 0,
"end": 584
} | class ____:
def possibleBipartition(self, n: int, dislikes: List[List[int]]) -> bool:
def dfs(i, c):
color[i] = c
for j in g[i]:
if color[j] == c:
return False
if color[j] == 0 and not dfs(j, 3 - c):
return False
return True
g = defaultdict(list)
color = [0] * n
for a, b in dislikes:
a, b = a - 1, b - 1
g[a].append(b)
g[b].append(a)
return all(c or dfs(i, 1) for i, c in enumerate(color))
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1340666,
"end": 1368185
} | class ____(
sgqlc.types.Type,
Node,
Assignable,
Closable,
Comment,
Updatable,
UpdatableComment,
Labelable,
Lockable,
Reactable,
RepositoryNode,
Subscribable,
UniformResourceLocatable,
ProjectV2Owner,
):
"""A repository pull request."""
__schema__ = github_schema
__field_names__ = (
"additions",
"auto_merge_request",
"base_ref",
"base_ref_name",
"base_ref_oid",
"base_repository",
"can_be_rebased",
"changed_files",
"checks_resource_path",
"checks_url",
"closing_issues_references",
"comments",
"commits",
"deletions",
"files",
"head_ref",
"head_ref_name",
"head_ref_oid",
"head_repository",
"head_repository_owner",
"hovercard",
"is_cross_repository",
"is_draft",
"is_read_by_viewer",
"latest_opinionated_reviews",
"latest_reviews",
"maintainer_can_modify",
"merge_commit",
"merge_state_status",
"merge_queue_entry",
"mergeable",
"merged",
"merged_at",
"merged_by",
"milestone",
"number",
"participants",
"permalink",
"potential_merge_commit",
"project_cards",
"project_items",
"revert_resource_path",
"revert_url",
"review_decision",
"review_requests",
"review_threads",
"reviews",
"state",
"suggested_reviewers",
"timeline_items",
"title",
"title_html",
"total_comments_count",
"viewer_can_apply_suggestion",
"viewer_can_delete_head_ref",
"viewer_can_disable_auto_merge",
"viewer_can_edit_files",
"viewer_can_enable_auto_merge",
"viewer_can_merge_as_admin",
"viewer_can_update_branch",
"viewer_latest_review",
"viewer_latest_review_request",
"viewer_merge_body_text",
"viewer_merge_headline_text",
)
additions = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="additions")
"""The number of additions in this pull request."""
auto_merge_request = sgqlc.types.Field(AutoMergeRequest, graphql_name="autoMergeRequest")
"""Returns the auto-merge request object if one exists for this pull
request.
"""
base_ref = sgqlc.types.Field("Ref", graphql_name="baseRef")
"""Identifies the base Ref associated with the pull request."""
base_ref_name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="baseRefName")
"""Identifies the name of the base Ref associated with the pull
request, even if the ref has been deleted.
"""
base_ref_oid = sgqlc.types.Field(sgqlc.types.non_null(GitObjectID), graphql_name="baseRefOid")
"""Identifies the oid of the base ref associated with the pull
request, even if the ref has been deleted.
"""
base_repository = sgqlc.types.Field("Repository", graphql_name="baseRepository")
"""The repository associated with this pull request's base Ref."""
can_be_rebased = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="canBeRebased")
"""Whether or not the pull request is rebaseable."""
changed_files = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="changedFiles")
"""The number of changed files in this pull request."""
checks_resource_path = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="checksResourcePath")
"""The HTTP path for the checks of this pull request."""
checks_url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="checksUrl")
"""The HTTP URL for the checks of this pull request."""
closing_issues_references = sgqlc.types.Field(
IssueConnection,
graphql_name="closingIssuesReferences",
args=sgqlc.types.ArgDict(
(
("user_linked_only", sgqlc.types.Arg(Boolean, graphql_name="userLinkedOnly", default=False)),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
("order_by", sgqlc.types.Arg(IssueOrder, graphql_name="orderBy", default=None)),
)
),
)
"""List of issues that were may be closed by this pull request
Arguments:
* `user_linked_only` (`Boolean`): Return only manually linked
Issues (default: `false`)
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
* `order_by` (`IssueOrder`): Ordering options for issues returned
from the connection
"""
comments = sgqlc.types.Field(
sgqlc.types.non_null(IssueCommentConnection),
graphql_name="comments",
args=sgqlc.types.ArgDict(
(
("order_by", sgqlc.types.Arg(IssueCommentOrder, graphql_name="orderBy", default=None)),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of comments associated with the pull request.
Arguments:
* `order_by` (`IssueCommentOrder`): Ordering options for issue
comments returned from the connection.
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
commits = sgqlc.types.Field(
sgqlc.types.non_null(PullRequestCommitConnection),
graphql_name="commits",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of commits present in this pull request's head branch not
present in the base branch.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
deletions = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="deletions")
"""The number of deletions in this pull request."""
files = sgqlc.types.Field(
PullRequestChangedFileConnection,
graphql_name="files",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""Lists the files changed within this pull request.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
head_ref = sgqlc.types.Field("Ref", graphql_name="headRef")
"""Identifies the head Ref associated with the pull request."""
head_ref_name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="headRefName")
"""Identifies the name of the head Ref associated with the pull
request, even if the ref has been deleted.
"""
head_ref_oid = sgqlc.types.Field(sgqlc.types.non_null(GitObjectID), graphql_name="headRefOid")
"""Identifies the oid of the head ref associated with the pull
request, even if the ref has been deleted.
"""
head_repository = sgqlc.types.Field("Repository", graphql_name="headRepository")
"""The repository associated with this pull request's head Ref."""
head_repository_owner = sgqlc.types.Field(RepositoryOwner, graphql_name="headRepositoryOwner")
"""The owner of the repository associated with this pull request's
head Ref.
"""
hovercard = sgqlc.types.Field(
sgqlc.types.non_null(Hovercard),
graphql_name="hovercard",
args=sgqlc.types.ArgDict(
(("include_notification_contexts", sgqlc.types.Arg(Boolean, graphql_name="includeNotificationContexts", default=True)),)
),
)
"""The hovercard information for this issue
Arguments:
* `include_notification_contexts` (`Boolean`): Whether or not to
include notification contexts (default: `true`)
"""
is_cross_repository = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isCrossRepository")
"""The head and base repositories are different."""
is_draft = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isDraft")
"""Identifies if the pull request is a draft."""
is_read_by_viewer = sgqlc.types.Field(Boolean, graphql_name="isReadByViewer")
"""Is this pull request read by the viewer"""
latest_opinionated_reviews = sgqlc.types.Field(
PullRequestReviewConnection,
graphql_name="latestOpinionatedReviews",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
("writers_only", sgqlc.types.Arg(Boolean, graphql_name="writersOnly", default=False)),
)
),
)
"""A list of latest reviews per user associated with the pull
request.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
* `writers_only` (`Boolean`): Only return reviews from user who
have write access to the repository (default: `false`)
"""
latest_reviews = sgqlc.types.Field(
PullRequestReviewConnection,
graphql_name="latestReviews",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of latest reviews per user associated with the pull request
that are not also pending review.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
maintainer_can_modify = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="maintainerCanModify")
"""Indicates whether maintainers can modify the pull request."""
merge_commit = sgqlc.types.Field(Commit, graphql_name="mergeCommit")
"""The commit that was created when this pull request was merged."""
merge_state_status = sgqlc.types.Field(sgqlc.types.non_null(MergeStateStatus), graphql_name="mergeStateStatus")
"""Detailed information about the current pull request merge state
status.
"""
merge_queue_entry = sgqlc.types.Field(MergeQueueEntry, graphql_name="mergeQueueEntry")
"""The merge queue entry of the pull request in the base branch's
merge queue
"""
mergeable = sgqlc.types.Field(sgqlc.types.non_null(MergeableState), graphql_name="mergeable")
"""Whether or not the pull request can be merged based on the
existence of merge conflicts.
"""
merged = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="merged")
"""Whether or not the pull request was merged."""
merged_at = sgqlc.types.Field(DateTime, graphql_name="mergedAt")
"""The date and time that the pull request was merged."""
merged_by = sgqlc.types.Field(Actor, graphql_name="mergedBy")
"""The actor who merged the pull request."""
milestone = sgqlc.types.Field(Milestone, graphql_name="milestone")
"""Identifies the milestone associated with the pull request."""
number = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="number")
"""Identifies the pull request number."""
participants = sgqlc.types.Field(
sgqlc.types.non_null(UserConnection),
graphql_name="participants",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of Users that are participating in the Pull Request
conversation.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
permalink = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="permalink")
"""The permalink to the pull request."""
potential_merge_commit = sgqlc.types.Field(Commit, graphql_name="potentialMergeCommit")
"""The commit that GitHub automatically generated to test if this
pull request could be merged. This field will not return a value
if the pull request is merged, or if the test merge commit is
still being generated. See the `mergeable` field for more details
on the mergeability of the pull request.
"""
project_cards = sgqlc.types.Field(
sgqlc.types.non_null(ProjectCardConnection),
graphql_name="projectCards",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"archived_states",
sgqlc.types.Arg(
sgqlc.types.list_of(ProjectCardArchivedState), graphql_name="archivedStates", default=("ARCHIVED", "NOT_ARCHIVED")
),
),
)
),
)
"""List of project cards associated with this pull request.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
* `archived_states` (`[ProjectCardArchivedState]`): A list of
archived states to filter the cards by (default: `[ARCHIVED,
NOT_ARCHIVED]`)
"""
project_items = sgqlc.types.Field(
sgqlc.types.non_null(ProjectV2ItemConnection),
graphql_name="projectItems",
args=sgqlc.types.ArgDict(
(
("include_archived", sgqlc.types.Arg(Boolean, graphql_name="includeArchived", default=True)),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""List of project items associated with this pull request.
Arguments:
* `include_archived` (`Boolean`): Include archived items.
(default: `true`)
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
revert_resource_path = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="revertResourcePath")
"""The HTTP path for reverting this pull request."""
revert_url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="revertUrl")
"""The HTTP URL for reverting this pull request."""
review_decision = sgqlc.types.Field(PullRequestReviewDecision, graphql_name="reviewDecision")
"""The current status of this pull request with respect to code
review.
"""
review_requests = sgqlc.types.Field(
ReviewRequestConnection,
graphql_name="reviewRequests",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of review requests associated with the pull request.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
review_threads = sgqlc.types.Field(
sgqlc.types.non_null(PullRequestReviewThreadConnection),
graphql_name="reviewThreads",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""The list of all review threads for this pull request.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
reviews = sgqlc.types.Field(
PullRequestReviewConnection,
graphql_name="reviews",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"states",
sgqlc.types.Arg(sgqlc.types.list_of(sgqlc.types.non_null(PullRequestReviewState)), graphql_name="states", default=None),
),
("author", sgqlc.types.Arg(String, graphql_name="author", default=None)),
)
),
)
"""A list of reviews associated with the pull request.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
* `states` (`[PullRequestReviewState!]`): A list of states to
filter the reviews.
* `author` (`String`): Filter by author of the review.
"""
state = sgqlc.types.Field(sgqlc.types.non_null(PullRequestState), graphql_name="state")
"""Identifies the state of the pull request."""
suggested_reviewers = sgqlc.types.Field(sgqlc.types.non_null(sgqlc.types.list_of(SuggestedReviewer)), graphql_name="suggestedReviewers")
"""A list of reviewer suggestions based on commit history and past
review comments.
"""
timeline_items = sgqlc.types.Field(
sgqlc.types.non_null(PullRequestTimelineItemsConnection),
graphql_name="timelineItems",
args=sgqlc.types.ArgDict(
(
("since", sgqlc.types.Arg(DateTime, graphql_name="since", default=None)),
("skip", sgqlc.types.Arg(Int, graphql_name="skip", default=None)),
(
"item_types",
sgqlc.types.Arg(
sgqlc.types.list_of(sgqlc.types.non_null(PullRequestTimelineItemsItemType)), graphql_name="itemTypes", default=None
),
),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of events, comments, commits, etc. associated with the pull
request.
Arguments:
* `since` (`DateTime`): Filter timeline items by a `since`
timestamp.
* `skip` (`Int`): Skips the first _n_ elements in the list.
* `item_types` (`[PullRequestTimelineItemsItemType!]`): Filter
timeline items by type.
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
title = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="title")
"""Identifies the pull request title."""
title_html = sgqlc.types.Field(sgqlc.types.non_null(HTML), graphql_name="titleHTML")
"""Identifies the pull request title rendered to HTML."""
total_comments_count = sgqlc.types.Field(Int, graphql_name="totalCommentsCount")
"""Returns a count of how many comments this pull request has
received.
"""
viewer_can_apply_suggestion = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="viewerCanApplySuggestion")
"""Whether or not the viewer can apply suggestion."""
viewer_can_delete_head_ref = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="viewerCanDeleteHeadRef")
"""Check if the viewer can restore the deleted head ref."""
viewer_can_disable_auto_merge = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="viewerCanDisableAutoMerge")
"""Whether or not the viewer can disable auto-merge"""
viewer_can_edit_files = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="viewerCanEditFiles")
"""Can the viewer edit files within this pull request."""
viewer_can_enable_auto_merge = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="viewerCanEnableAutoMerge")
"""Whether or not the viewer can enable auto-merge"""
viewer_can_merge_as_admin = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="viewerCanMergeAsAdmin")
"""Indicates whether the viewer can bypass branch protections and
merge the pull request immediately
"""
viewer_can_update_branch = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="viewerCanUpdateBranch")
"""Whether or not the viewer can update the head ref of this PR, by
merging or rebasing the base ref. If the head ref is up to date or
unable to be updated by this user, this will return false.
"""
viewer_latest_review = sgqlc.types.Field("PullRequestReview", graphql_name="viewerLatestReview")
"""The latest review given from the viewer."""
viewer_latest_review_request = sgqlc.types.Field("ReviewRequest", graphql_name="viewerLatestReviewRequest")
"""The person who has requested the viewer for review on this pull
request.
"""
viewer_merge_body_text = sgqlc.types.Field(
sgqlc.types.non_null(String),
graphql_name="viewerMergeBodyText",
args=sgqlc.types.ArgDict((("merge_type", sgqlc.types.Arg(PullRequestMergeMethod, graphql_name="mergeType", default=None)),)),
)
"""The merge body text for the viewer and method.
Arguments:
* `merge_type` (`PullRequestMergeMethod`): The merge method for
the message.
"""
viewer_merge_headline_text = sgqlc.types.Field(
sgqlc.types.non_null(String),
graphql_name="viewerMergeHeadlineText",
args=sgqlc.types.ArgDict((("merge_type", sgqlc.types.Arg(PullRequestMergeMethod, graphql_name="mergeType", default=None)),)),
)
"""The merge headline text for the viewer and method.
Arguments:
* `merge_type` (`PullRequestMergeMethod`): The merge method for
the message.
"""
| PullRequest |
python | pytest-dev__pytest | src/_pytest/raises.py | {
"start": 13864,
"end": 20469
} | class ____(ABC, Generic[BaseExcT_co]):
"""ABC with common functionality shared between RaisesExc and RaisesGroup"""
def __init__(
self,
*,
match: str | Pattern[str] | None,
check: Callable[[BaseExcT_co], bool] | None,
) -> None:
if isinstance(match, str):
# juggle error in order to avoid context to fail (necessary?)
re_error = None
try:
self.match: Pattern[str] | None = re.compile(match)
except re.error as e:
re_error = e
if re_error is not None:
fail(f"Invalid regex pattern provided to 'match': {re_error}")
if match == "":
warnings.warn(
PytestWarning(
"matching against an empty string will *always* pass. If you want "
"to check for an empty message you need to pass '^$'. If you don't "
"want to match you should pass `None` or leave out the parameter."
),
stacklevel=2,
)
else:
self.match = match
# check if this is a fully escaped regex and has ^$ to match fully
# in which case we can do a proper diff on error
self.rawmatch: str | None = None
if isinstance(match, str) or (
isinstance(match, Pattern) and match.flags == _REGEX_NO_FLAGS
):
if isinstance(match, Pattern):
match = match.pattern
if (
match
and match[0] == "^"
and match[-1] == "$"
and is_fully_escaped(match[1:-1])
):
self.rawmatch = unescape(match[1:-1])
self.check = check
self._fail_reason: str | None = None
# used to suppress repeated printing of `repr(self.check)`
self._nested: bool = False
# set in self._parse_exc
self.is_baseexception = False
def _parse_exc(
self, exc: type[BaseExcT_1] | types.GenericAlias, expected: str
) -> type[BaseExcT_1]:
if isinstance(exc, type) and issubclass(exc, BaseException):
if not issubclass(exc, Exception):
self.is_baseexception = True
return exc
# because RaisesGroup does not support variable number of exceptions there's
# still a use for RaisesExc(ExceptionGroup[Exception]).
origin_exc: type[BaseException] | None = get_origin(exc)
if origin_exc and issubclass(origin_exc, BaseExceptionGroup):
exc_type = get_args(exc)[0]
if (
issubclass(origin_exc, ExceptionGroup) and exc_type in (Exception, Any)
) or (
issubclass(origin_exc, BaseExceptionGroup)
and exc_type in (BaseException, Any)
):
if not issubclass(origin_exc, ExceptionGroup):
self.is_baseexception = True
return cast(type[BaseExcT_1], origin_exc)
else:
raise ValueError(
f"Only `ExceptionGroup[Exception]` or `BaseExceptionGroup[BaseException]` "
f"are accepted as generic types but got `{exc}`. "
f"As `raises` will catch all instances of the specified group regardless of the "
f"generic argument specific nested exceptions has to be checked "
f"with `RaisesGroup`."
)
# unclear if the Type/ValueError distinction is even helpful here
msg = f"Expected {expected}, but got "
if isinstance(exc, type): # type: ignore[unreachable]
raise ValueError(msg + f"{exc.__name__!r}")
if isinstance(exc, BaseException): # type: ignore[unreachable]
raise TypeError(msg + f"an exception instance: {type(exc).__name__}")
raise TypeError(msg + repr(type(exc).__name__))
@property
def fail_reason(self) -> str | None:
"""Set after a call to :meth:`matches` to give a human-readable reason for why the match failed.
When used as a context manager the string will be printed as the reason for the
test failing."""
return self._fail_reason
def _check_check(
self: AbstractRaises[BaseExcT_1],
exception: BaseExcT_1,
) -> bool:
if self.check is None:
return True
if self.check(exception):
return True
check_repr = "" if self._nested else " " + repr_callable(self.check)
self._fail_reason = f"check{check_repr} did not return True"
return False
# TODO: harmonize with ExceptionInfo.match
def _check_match(self, e: BaseException) -> bool:
if self.match is None or re.search(
self.match,
stringified_exception := stringify_exception(
e, include_subexception_msg=False
),
):
return True
# if we're matching a group, make sure we're explicit to reduce confusion
# if they're trying to match an exception contained within the group
maybe_specify_type = (
f" the `{_exception_type_name(type(e))}()`"
if isinstance(e, BaseExceptionGroup)
else ""
)
if isinstance(self.rawmatch, str):
# TODO: it instructs to use `-v` to print leading text, but that doesn't work
# I also don't know if this is the proper entry point, or tool to use at all
from _pytest.assertion.util import _diff_text
from _pytest.assertion.util import dummy_highlighter
diff = _diff_text(self.rawmatch, stringified_exception, dummy_highlighter)
self._fail_reason = ("\n" if diff[0][0] == "-" else "") + "\n".join(diff)
return False
self._fail_reason = (
f"Regex pattern did not match{maybe_specify_type}.\n"
f" Expected regex: {_match_pattern(self.match)!r}\n"
f" Actual message: {stringified_exception!r}"
)
if _match_pattern(self.match) == stringified_exception:
self._fail_reason += "\n Did you mean to `re.escape()` the regex?"
return False
@abstractmethod
def matches(
self: AbstractRaises[BaseExcT_1], exception: BaseException
) -> TypeGuard[BaseExcT_1]:
"""Check if an exception matches the requirements of this AbstractRaises.
If it fails, :meth:`AbstractRaises.fail_reason` should be set.
"""
@final
| AbstractRaises |
python | networkx__networkx | networkx/drawing/tests/test_pydot.py | {
"start": 189,
"end": 4973
} | class ____:
@pytest.mark.parametrize("G", (nx.Graph(), nx.DiGraph()))
@pytest.mark.parametrize("prog", ("neato", "dot"))
def test_pydot(self, G, prog, tmp_path):
"""
Validate :mod:`pydot`-based usage of the passed NetworkX graph with the
passed basename of an external GraphViz command (e.g., `dot`, `neato`).
"""
# Set the name of this graph to... "G". Failing to do so will
# subsequently trip an assertion expecting this name.
G.graph["name"] = "G"
# Add arbitrary nodes and edges to the passed empty graph.
G.add_edges_from([("A", "B"), ("A", "C"), ("B", "C"), ("A", "D")])
G.add_node("E")
# Validate layout of this graph with the passed GraphViz command.
graph_layout = nx.nx_pydot.pydot_layout(G, prog=prog)
assert isinstance(graph_layout, dict)
# Convert this graph into a "pydot.Dot" instance.
P = nx.nx_pydot.to_pydot(G)
# Convert this "pydot.Dot" instance back into a graph of the same type.
G2 = G.__class__(nx.nx_pydot.from_pydot(P))
# Validate the original and resulting graphs to be the same.
assert graphs_equal(G, G2)
fname = tmp_path / "out.dot"
# Serialize this "pydot.Dot" instance to a temporary file in dot format
P.write_raw(fname)
# Deserialize a list of new "pydot.Dot" instances back from this file.
Pin_list = pydot.graph_from_dot_file(path=fname, encoding="utf-8")
# Validate this file to contain only one graph.
assert len(Pin_list) == 1
# The single "pydot.Dot" instance deserialized from this file.
Pin = Pin_list[0]
# Sorted list of all nodes in the original "pydot.Dot" instance.
n1 = sorted(p.get_name() for p in P.get_node_list())
# Sorted list of all nodes in the deserialized "pydot.Dot" instance.
n2 = sorted(p.get_name() for p in Pin.get_node_list())
# Validate these instances to contain the same nodes.
assert n1 == n2
# Sorted list of all edges in the original "pydot.Dot" instance.
e1 = sorted((e.get_source(), e.get_destination()) for e in P.get_edge_list())
# Sorted list of all edges in the original "pydot.Dot" instance.
e2 = sorted((e.get_source(), e.get_destination()) for e in Pin.get_edge_list())
# Validate these instances to contain the same edges.
assert e1 == e2
# Deserialize a new graph of the same type back from this file.
Hin = nx.nx_pydot.read_dot(fname)
Hin = G.__class__(Hin)
# Validate the original and resulting graphs to be the same.
assert graphs_equal(G, Hin)
def test_read_write(self):
G = nx.MultiGraph()
G.graph["name"] = "G"
G.add_edge("1", "2", key="0") # read assumes strings
fh = StringIO()
nx.nx_pydot.write_dot(G, fh)
fh.seek(0)
H = nx.nx_pydot.read_dot(fh)
assert graphs_equal(G, H)
def test_pydot_issue_7581(tmp_path):
"""Validate that `nx_pydot.pydot_layout` handles nodes
with characters like "\n", " ".
Those characters cause `pydot` to escape and quote them on output,
which caused #7581.
"""
G = nx.Graph()
G.add_edges_from([("A\nbig test", "B"), ("A\nbig test", "C"), ("B", "C")])
graph_layout = nx.nx_pydot.pydot_layout(G, prog="dot")
assert isinstance(graph_layout, dict)
# Convert the graph to pydot and back into a graph. There should be no difference.
P = nx.nx_pydot.to_pydot(G)
G2 = nx.Graph(nx.nx_pydot.from_pydot(P))
assert graphs_equal(G, G2)
@pytest.mark.parametrize(
"graph_type", [nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph]
)
def test_hashable_pydot(graph_type):
# gh-5790
G = graph_type()
G.add_edge("5", frozenset([1]), t='"Example:A"', l=False)
G.add_edge("1", 2, w=True, t=("node1",), l=frozenset(["node1"]))
G.add_edge("node", (3, 3), w="string")
assert [
{"t": '"Example:A"', "l": "False"},
{"w": "True", "t": "('node1',)", "l": "frozenset({'node1'})"},
{"w": "string"},
] == [
attr
for _, _, attr in nx.nx_pydot.from_pydot(nx.nx_pydot.to_pydot(G)).edges.data()
]
assert {str(i) for i in G.nodes()} == set(
nx.nx_pydot.from_pydot(nx.nx_pydot.to_pydot(G)).nodes
)
def test_pydot_numerical_name():
G = nx.Graph()
G.add_edges_from([("A", "B"), (0, 1)])
graph_layout = nx.nx_pydot.pydot_layout(G, prog="dot")
assert isinstance(graph_layout, dict)
assert "0" not in graph_layout
assert 0 in graph_layout
assert "1" not in graph_layout
assert 1 in graph_layout
assert "A" in graph_layout
assert "B" in graph_layout
| TestPydot |
python | fluentpython__example-code-2e | 21-async/mojifinder/bottle.py | {
"start": 118315,
"end": 118528
} | class ____(ServerAdapter):
""" Fast server written in C: https://github.com/jonashaag/bjoern """
def run(self, handler):
from bjoern import run
run(handler, self.host, self.port)
| BjoernServer |
python | getsentry__sentry | src/sentry/notifications/serializers.py | {
"start": 1173,
"end": 1545
} | class ____(NotificationSettingsBaseSerializer):
def serialize(
self,
obj: NotificationSettingProvider,
*args: Any,
**kwargs: Any,
) -> Mapping[str, str | None]:
output = super().serialize(obj, **kwargs)
return {
**output,
"provider": obj.provider,
}
| NotificationSettingsProviderSerializer |
python | django__django | django/contrib/gis/db/models/functions.py | {
"start": 8164,
"end": 8597
} | class ____(GeoFunc):
output_field = TextField()
def __init__(self, expression, relative=False, precision=8, **extra):
relative = (
relative if hasattr(relative, "resolve_expression") else int(relative)
)
expressions = [
expression,
relative,
self._handle_param(precision, "precision", int),
]
super().__init__(*expressions, **extra)
| AsSVG |
python | huggingface__transformers | src/transformers/models/lightglue/modular_lightglue.py | {
"start": 24895,
"end": 46048
} | class ____(LightGluePreTrainedModel):
"""
LightGlue is a model matching keypoints in images by leveraging detections from a keypoint detector such as
SuperPoint. It is based on the SuperGlue architecture and is designed to be lightweight and efficient.
It consists of :
1. Keypoint Encoder
2. A Graph Neural Network with self and cross attention layers
3. Matching Assignment layers
The correspondence ids use -1 to indicate non-matching points.
Philipp Lindenberger, Paul-Edouard Sarlin and Marc Pollefeys. LightGlue: Local Feature Matching at Light Speed.
In ICCV 2023. https://huggingface.co/papers/2306.13643
"""
def __init__(self, config: LightGlueConfig):
super().__init__(config)
self.keypoint_detector = AutoModelForKeypointDetection.from_config(
config.keypoint_detector_config, trust_remote_code=config.trust_remote_code
)
self.keypoint_detector_descriptor_dim = config.keypoint_detector_config.descriptor_decoder_dim
self.descriptor_dim = config.descriptor_dim
self.num_layers = config.num_hidden_layers
self.filter_threshold = config.filter_threshold
self.depth_confidence = config.depth_confidence
self.width_confidence = config.width_confidence
if self.descriptor_dim != self.keypoint_detector_descriptor_dim:
self.input_projection = nn.Linear(self.keypoint_detector_descriptor_dim, self.descriptor_dim, bias=True)
else:
self.input_projection = nn.Identity()
self.positional_encoder = LightGluePositionalEncoder(config)
self.transformer_layers = nn.ModuleList(
[LightGlueTransformerLayer(config, layer_idx=i) for i in range(config.num_hidden_layers)]
)
self.match_assignment_layers = nn.ModuleList(
[LightGlueMatchAssignmentLayer(config) for _ in range(config.num_hidden_layers)]
)
self.token_confidence = nn.ModuleList(
[LightGlueTokenConfidenceLayer(config) for _ in range(config.num_hidden_layers - 1)]
)
self.post_init()
def _get_confidence_threshold(self, layer_index: int) -> float:
"""scaled confidence threshold for a given layer"""
threshold = 0.8 + 0.1 * np.exp(-4.0 * layer_index / self.num_layers)
return np.clip(threshold, 0, 1)
def _keypoint_processing(
self, descriptors: torch.Tensor, keypoints: torch.Tensor, output_hidden_states: Optional[bool] = False
) -> tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor]]:
descriptors = descriptors.detach().contiguous()
projected_descriptors = self.input_projection(descriptors)
keypoint_encoding_output = self.positional_encoder(keypoints, output_hidden_states=output_hidden_states)
return projected_descriptors, keypoint_encoding_output
def _get_early_stopped_image_pairs(
self, keypoint_confidences: torch.Tensor, layer_index: int, mask: torch.Tensor, num_points: torch.Tensor
) -> torch.Tensor:
"""evaluate whether we should stop inference based on the confidence of the keypoints"""
batch_size, _ = mask.shape
if layer_index < self.num_layers - 1:
# If the current layer is not the last layer, we compute the confidence of the keypoints and check
# if we should stop the forward pass through the transformer layers for each pair of images.
keypoint_confidences = keypoint_confidences.masked_fill(mask == 0, 1)
keypoint_confidences = keypoint_confidences.reshape(batch_size // 2, -1)
threshold = self._get_confidence_threshold(layer_index)
ratio_confident = 1.0 - (keypoint_confidences < threshold).float().sum(dim=1) / num_points
early_stopped_pairs = ratio_confident > self.depth_confidence
else:
# If the current layer is the last layer, we stop the forward pass through the transformer layers for
# all pairs of images.
early_stopped_pairs = torch.ones(batch_size, dtype=torch.bool)
return early_stopped_pairs
def _get_keypoint_matching(self, descriptors, mask, layer_index, early_stops=None):
if early_stops is not None:
descriptors = descriptors[early_stops]
mask = mask[early_stops]
scores = self.match_assignment_layers[layer_index](descriptors, mask)
matches, matching_scores = get_matches_from_scores(scores, self.filter_threshold)
return matches, matching_scores
def _get_pruning_mask(self, confidences: torch.Tensor, scores: torch.Tensor, layer_index: int) -> torch.Tensor:
"""mask points which should be removed"""
keep = scores > (1 - self.width_confidence)
if confidences is not None: # Low-confidence points are never pruned.
keep |= confidences <= self._get_confidence_threshold(layer_index)
return keep
def _do_layer_keypoint_pruning(
self,
descriptors: torch.Tensor,
keypoints: torch.Tensor,
mask: torch.Tensor,
indices: torch.Tensor,
prune_output: torch.Tensor,
keypoint_confidences: torch.Tensor,
layer_index: int,
):
"""
For a given layer, prune keypoints based on the confidence of the keypoints and the matchability of the
descriptors.
"""
batch_size, _, _ = descriptors.shape
descriptors_matchability = self.match_assignment_layers[layer_index].get_matchability(descriptors)
pruned_keypoints_mask = self._get_pruning_mask(keypoint_confidences, descriptors_matchability, layer_index)
pruned_keypoints_mask = pruned_keypoints_mask.masked_fill(mask == 0, torch.tensor(False))
# For each image, we extract the pruned indices and the corresponding descriptors and keypoints.
pruned_descriptors, pruned_keypoints_0, pruned_keypoints_1, pruned_mask, pruned_indices = (
[t[mask] for t, mask in zip(tensor, pruned_keypoints_mask)]
for tensor in [descriptors, keypoints[0], keypoints[1], pruned_keypoints_mask, indices]
)
for i in range(batch_size):
prune_output[i, pruned_indices[i]] += 1
# Pad the pruned descriptors, keypoints, indices and mask to have the same shape across the batch.
pruned_descriptors, pruned_keypoints_0, pruned_keypoints_1, pruned_mask = (
pad_sequence(pruned_tensor, batch_first=True)
for pruned_tensor in [pruned_descriptors, pruned_keypoints_0, pruned_keypoints_1, pruned_mask]
)
pruned_keypoints = (pruned_keypoints_0, pruned_keypoints_1)
pruned_indices = pad_sequence(pruned_indices, batch_first=True, padding_value=-1)
return pruned_descriptors, pruned_keypoints, pruned_indices, pruned_mask, prune_output
def _concat_early_stopped_outputs(
self,
early_stops_indices,
final_pruned_keypoints_indices,
final_pruned_keypoints_iterations,
matches,
matching_scores,
):
early_stops_indices = torch.stack(early_stops_indices)
# Rearrange tensors to have the same order as the input batch
ids = torch.arange(early_stops_indices.shape[0])
order_indices = early_stops_indices[ids]
early_stops_indices = early_stops_indices[order_indices]
matches, final_pruned_keypoints_indices = (
pad_sequence(tensor, batch_first=True, padding_value=-1)
for tensor in [matches, final_pruned_keypoints_indices]
)
matching_scores, final_pruned_keypoints_iterations = (
pad_sequence(tensor, batch_first=True, padding_value=0)
for tensor in [matching_scores, final_pruned_keypoints_iterations]
)
matches, matching_scores, final_pruned_keypoints_indices, final_pruned_keypoints_iterations = (
tensor[early_stops_indices]
for tensor in [
matches,
matching_scores,
final_pruned_keypoints_indices,
final_pruned_keypoints_iterations,
]
)
return final_pruned_keypoints_indices, final_pruned_keypoints_iterations, matches, matching_scores
def _do_final_keypoint_pruning(
self,
indices: torch.Tensor,
matches: torch.Tensor,
matching_scores: torch.Tensor,
num_keypoints: torch.Tensor,
) -> tuple[torch.Tensor, torch.Tensor]:
# (batch_size, num_keypoints) -> (batch_size // 2, 2, num_keypoints) -> 2 * (batch_size // 2, num_keypoints) to
# have tensors from
batch_size, _ = indices.shape
indices, matches, matching_scores = (
tensor.reshape(batch_size // 2, 2, -1) for tensor in [indices, matches, matching_scores]
)
indices0 = indices[:, 0]
indices1 = indices[:, 1]
matches0 = matches[:, 0]
matches1 = matches[:, 1]
matching_scores0 = matching_scores[:, 0]
matching_scores1 = matching_scores[:, 1]
# Prepare final matches and matching scores
_matches = torch.full((batch_size // 2, 2, num_keypoints), -1, device=indices.device, dtype=matches.dtype)
_matching_scores = torch.zeros(
(batch_size // 2, 2, num_keypoints), device=indices.device, dtype=matching_scores.dtype
)
# Fill the matches and matching scores for each image pair
for i in range(batch_size // 2):
_matches[i, 0, indices0[i]] = torch.where(
matches0[i] == -1, -1, indices1[i].gather(0, matches0[i].clamp(min=0))
)
_matches[i, 1, indices1[i]] = torch.where(
matches1[i] == -1, -1, indices0[i].gather(0, matches1[i].clamp(min=0))
)
_matching_scores[i, 0, indices0[i]] = matching_scores0[i]
_matching_scores[i, 1, indices1[i]] = matching_scores1[i]
return _matches, _matching_scores
def _match_image_pair(
self,
keypoints: torch.Tensor,
descriptors: torch.Tensor,
height: int,
width: int,
mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, tuple, tuple]:
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
if keypoints.shape[2] == 0: # no keypoints
shape = keypoints.shape[:-1]
return (
keypoints.new_full(shape, -1, dtype=torch.int),
keypoints.new_zeros(shape),
keypoints.new_zeros(shape),
all_hidden_states,
all_attentions,
)
device = keypoints.device
batch_size, _, initial_num_keypoints, _ = keypoints.shape
num_points_per_pair = torch.sum(mask.reshape(batch_size, -1), dim=1)
# (batch_size, 2, num_keypoints, 2) -> (batch_size * 2, num_keypoints, 2)
keypoints = keypoints.reshape(batch_size * 2, initial_num_keypoints, 2)
mask = mask.reshape(batch_size * 2, initial_num_keypoints) if mask is not None else None
descriptors = descriptors.reshape(batch_size * 2, initial_num_keypoints, self.keypoint_detector_descriptor_dim)
image_indices = torch.arange(batch_size * 2, device=device)
# Keypoint normalization
keypoints = normalize_keypoints(keypoints, height, width)
descriptors, keypoint_encoding_output = self._keypoint_processing(
descriptors, keypoints, output_hidden_states=output_hidden_states
)
keypoints = keypoint_encoding_output[0]
# Early stop consists of stopping the forward pass through the transformer layers when the confidence of the
# keypoints is above a certain threshold.
do_early_stop = self.depth_confidence > 0
# Keypoint pruning consists of removing keypoints from the input of the transformer layers when the confidence of
# the keypoints is below a certain threshold.
do_keypoint_pruning = self.width_confidence > 0
early_stops_indices = []
matches = []
matching_scores = []
final_pruned_keypoints_indices = []
final_pruned_keypoints_iterations = []
pruned_keypoints_indices = torch.arange(0, initial_num_keypoints, device=device).expand(batch_size * 2, -1)
pruned_keypoints_iterations = torch.ones_like(pruned_keypoints_indices)
for layer_index in range(self.num_layers):
input_shape = descriptors.size()
if mask is not None:
extended_attention_mask = self.get_extended_attention_mask(mask, input_shape)
else:
extended_attention_mask = torch.ones((batch_size, input_shape[-2]), device=keypoints.device)
layer_output = self.transformer_layers[layer_index](
descriptors,
keypoints,
attention_mask=extended_attention_mask,
output_hidden_states=output_hidden_states,
output_attentions=output_attentions,
)
descriptors, hidden_states, attention = layer_output
if output_hidden_states:
all_hidden_states = all_hidden_states + hidden_states
if output_attentions:
all_attentions = all_attentions + attention
if do_early_stop:
if layer_index < self.num_layers - 1:
# Get the confidence of the keypoints for the current layer
keypoint_confidences = self.token_confidence[layer_index](descriptors)
# Determine which pairs of images should be early stopped based on the confidence of the keypoints for
# the current layer.
early_stopped_pairs = self._get_early_stopped_image_pairs(
keypoint_confidences, layer_index, mask, num_points=num_points_per_pair
)
else:
# Early stopping always occurs at the last layer
early_stopped_pairs = torch.ones(batch_size, dtype=torch.bool)
if torch.any(early_stopped_pairs):
# If a pair of images is considered early stopped, we compute the matches for the remaining
# keypoints and stop the forward pass through the transformer layers for this pair of images.
early_stops = early_stopped_pairs.repeat_interleave(2)
early_stopped_image_indices = image_indices[early_stops]
early_stopped_matches, early_stopped_matching_scores = self._get_keypoint_matching(
descriptors, mask, layer_index, early_stops=early_stops
)
early_stops_indices.extend(list(early_stopped_image_indices))
matches.extend(list(early_stopped_matches))
matching_scores.extend(list(early_stopped_matching_scores))
if do_keypoint_pruning:
final_pruned_keypoints_indices.extend(list(pruned_keypoints_indices[early_stops]))
final_pruned_keypoints_iterations.extend(list(pruned_keypoints_iterations[early_stops]))
# Remove image pairs that have been early stopped from the forward pass
num_points_per_pair = num_points_per_pair[~early_stopped_pairs]
descriptors, keypoints_0, keypoint_1, mask, image_indices = tuple(
tensor[~early_stops]
for tensor in [descriptors, keypoints[0], keypoints[1], mask, image_indices]
)
keypoints = (keypoints_0, keypoint_1)
if do_keypoint_pruning:
pruned_keypoints_indices, pruned_keypoints_iterations, keypoint_confidences = tuple(
tensor[~early_stops]
for tensor in [
pruned_keypoints_indices,
pruned_keypoints_iterations,
keypoint_confidences,
]
)
# If all pairs of images are early stopped, we stop the forward pass through the transformer
# layers for all pairs of images.
if torch.all(early_stopped_pairs):
break
if do_keypoint_pruning:
# Prune keypoints from the input of the transformer layers for the next iterations if the confidence of
# the keypoints is below a certain threshold.
descriptors, keypoints, pruned_keypoints_indices, mask, pruned_keypoints_iterations = (
self._do_layer_keypoint_pruning(
descriptors,
keypoints,
mask,
pruned_keypoints_indices,
pruned_keypoints_iterations,
keypoint_confidences,
layer_index,
)
)
if do_early_stop and do_keypoint_pruning:
# Concatenate early stopped outputs together and perform final keypoint pruning
final_pruned_keypoints_indices, final_pruned_keypoints_iterations, matches, matching_scores = (
self._concat_early_stopped_outputs(
early_stops_indices,
final_pruned_keypoints_indices,
final_pruned_keypoints_iterations,
matches,
matching_scores,
)
)
matches, matching_scores = self._do_final_keypoint_pruning(
final_pruned_keypoints_indices,
matches,
matching_scores,
initial_num_keypoints,
)
else:
matches, matching_scores = self._get_keypoint_matching(descriptors, mask, self.num_layers - 1)
final_pruned_keypoints_iterations = torch.ones_like(matching_scores) * self.num_layers
final_pruned_keypoints_iterations = final_pruned_keypoints_iterations.reshape(
batch_size, 2, initial_num_keypoints
)
return (
matches,
matching_scores,
final_pruned_keypoints_iterations,
all_hidden_states,
all_attentions,
)
@can_return_tuple
@auto_docstring
def forward(
self,
pixel_values: torch.FloatTensor,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
) -> Union[tuple, "LightGlueKeypointMatchingOutput"]:
loss = None
if labels is not None:
raise ValueError("LightGlue is not trainable, no labels should be provided.")
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
if pixel_values.ndim != 5 or pixel_values.size(1) != 2:
raise ValueError("Input must be a 5D tensor of shape (batch_size, 2, num_channels, height, width)")
batch_size, _, channels, height, width = pixel_values.shape
pixel_values = pixel_values.reshape(batch_size * 2, channels, height, width)
keypoint_detections = self.keypoint_detector(pixel_values)
keypoints, _, descriptors, mask = keypoint_detections[:4]
keypoints = keypoints.reshape(batch_size, 2, -1, 2).to(pixel_values)
descriptors = descriptors.reshape(batch_size, 2, -1, self.keypoint_detector_descriptor_dim).to(pixel_values)
mask = mask.reshape(batch_size, 2, -1)
absolute_keypoints = keypoints.clone()
absolute_keypoints[:, :, :, 0] = absolute_keypoints[:, :, :, 0] * width
absolute_keypoints[:, :, :, 1] = absolute_keypoints[:, :, :, 1] * height
matches, matching_scores, prune, hidden_states, attentions = self._match_image_pair(
absolute_keypoints,
descriptors,
height,
width,
mask=mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
return LightGlueKeypointMatchingOutput(
loss=loss,
matches=matches,
matching_scores=matching_scores,
keypoints=keypoints,
prune=prune,
mask=mask,
hidden_states=hidden_states,
attentions=attentions,
)
__all__ = [
"LightGluePreTrainedModel",
"LightGlueForKeypointMatching",
"LightGlueConfig",
"LightGlueImageProcessor",
"LightGlueImageProcessorFast",
]
| LightGlueForKeypointMatching |
python | etianen__django-reversion | tests/test_app/models.py | {
"start": 2849,
"end": 2981
} | class ____(models.Model):
name = models.CharField(
max_length=191,
unique=True,
)
| TestModelWithUniqueConstraint |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_display_units03.py | {
"start": 315,
"end": 1206
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_display_units03.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [56159232, 61364096]
data = [
[10000000, 20000000, 30000000, 20000000, 10000000],
]
worksheet.write_column(0, 0, data[0])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.set_y_axis({"display_units": "thousands", "display_units_visible": 0})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | Pylons__pyramid | tests/test_tweens.py | {
"start": 47,
"end": 3163
} | class ____(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
def _makeOne(self, handler, registry=None):
from pyramid.tweens import excview_tween_factory
if registry is None:
registry = self.config.registry
return excview_tween_factory(handler, registry)
def test_it_passthrough_no_exception(self):
dummy_response = DummyResponse()
def handler(request):
return dummy_response
tween = self._makeOne(handler)
request = DummyRequest()
result = tween(request)
self.assertTrue(result is dummy_response)
self.assertIsNone(request.exception)
self.assertIsNone(request.exc_info)
def test_it_catches_notfound(self):
from pyramid.httpexceptions import HTTPNotFound
from pyramid.request import Request
self.config.add_notfound_view(lambda exc, request: exc)
def handler(request):
raise HTTPNotFound
tween = self._makeOne(handler)
request = Request.blank('/')
request.registry = self.config.registry
result = tween(request)
self.assertEqual(result.status, '404 Not Found')
self.assertIsInstance(request.exception, HTTPNotFound)
self.assertEqual(request.exception, request.exc_info[1])
def test_it_catches_with_predicate(self):
from pyramid.request import Request
from pyramid.response import Response
def excview(request):
return Response('foo')
self.config.add_view(excview, context=ValueError, request_method='GET')
def handler(request):
raise ValueError
tween = self._makeOne(handler)
request = Request.blank('/')
request.registry = self.config.registry
result = tween(request)
self.assertTrue(b'foo' in result.body)
self.assertIsInstance(request.exception, ValueError)
self.assertEqual(request.exception, request.exc_info[1])
def test_it_reraises_on_mismatch(self):
from pyramid.request import Request
def excview(request): # pragma: no cover
pass
self.config.add_view(excview, context=ValueError, request_method='GET')
def handler(request):
raise ValueError
tween = self._makeOne(handler)
request = Request.blank('/')
request.registry = self.config.registry
request.method = 'POST'
self.assertRaises(ValueError, lambda: tween(request))
self.assertIsNone(request.exception)
self.assertIsNone(request.exc_info)
def test_it_reraises_on_no_match(self):
from pyramid.request import Request
def handler(request):
raise ValueError
tween = self._makeOne(handler)
request = Request.blank('/')
request.registry = self.config.registry
self.assertRaises(ValueError, lambda: tween(request))
self.assertIsNone(request.exception)
self.assertIsNone(request.exc_info)
| Test_excview_tween_factory |
python | doocs__leetcode | solution/1500-1599/1566.Detect Pattern of Length M Repeated K or More Times/Solution.py | {
"start": 0,
"end": 406
} | class ____:
def containsPattern(self, arr: List[int], m: int, k: int) -> bool:
if len(arr) < m * k:
return False
cnt, target = 0, (k - 1) * m
for i in range(m, len(arr)):
if arr[i] == arr[i - m]:
cnt += 1
if cnt == target:
return True
else:
cnt = 0
return False
| Solution |
python | huggingface__transformers | src/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py | {
"start": 2085,
"end": 8080
} | class ____(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer(
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
)
self.register_buffer(
"token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
)
self.padding_idx = config.pad_token_id
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
past_key_values_length: int = 0,
) -> torch.Tensor:
if position_ids is None:
if input_ids is not None:
# Create the position ids from the input token ids. Any padded tokens remain padded.
position_ids = self.create_position_ids_from_input_ids(
input_ids, self.padding_idx, past_key_values_length
)
else:
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds, self.padding_idx)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
# Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
# when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
# issue #5664
if token_type_ids is None:
if hasattr(self, "token_type_ids"):
# NOTE: We assume either pos ids to have bsz == 1 (broadcastable) or bsz == effective bsz (input_shape[0])
buffered_token_type_ids = self.token_type_ids.expand(position_ids.shape[0], -1)
buffered_token_type_ids = torch.gather(buffered_token_type_ids, dim=1, index=position_ids)
token_type_ids = buffered_token_type_ids.expand(batch_size, seq_length)
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
position_embeddings = self.position_embeddings(position_ids)
embeddings = embeddings + position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
@staticmethod
def create_position_ids_from_inputs_embeds(inputs_embeds, padding_idx):
"""
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(
padding_idx + 1, sequence_length + padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
)
return position_ids.unsqueeze(0).expand(input_shape)
@staticmethod
def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
"""
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
mask = input_ids.ne(padding_idx).int()
incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
return incremental_indices.long() + padding_idx
# Copied from transformers.models.bert.modeling_bert.eager_attention_forward
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: Optional[float] = None,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
if scaling is None:
scaling = query.size(-1) ** -0.5
# Take the dot product between "query" and "key" to get the raw attention scores.
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None:
attention_mask = attention_mask[:, :, :, : key.shape[-2]]
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->RobertaPreLayerNorm
| RobertaPreLayerNormEmbeddings |
python | apache__thrift | test/py/TestRenderedDoubleConstants.py | {
"start": 999,
"end": 10576
} | class ____(unittest.TestCase):
ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST = \
"failed to verify a double constant generated by Thrift (expected = %f, got = %f)"
ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_LIST_TEST =\
"failed to verify a list item by Thrift (expected = %f, got = %f)"
ASSERTION_MESSAGE_FOR_TYPE_CHECKS = "the rendered variable with name %s is not of double type"
# to make sure the variables inside Thrift files are generated correctly
def test_rendered_double_constants(self):
EXPECTED_DOUBLE_ASSIGNED_TO_INT_CONSTANT = 1.0
EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT = -100.0
EXPECTED_DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT = 9223372036854775807.0
EXPECTED_DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT = -9223372036854775807.0
EXPECTED_DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS = 3.14159265359
EXPECTED_DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE = 1000000.1
EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE = -1000000.1
EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_DOUBLE = 1.7e+308
EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE = 9223372036854775816.43
EXPECTED_DOUBLE_ASSIGNED_TO_SMALL_DOUBLE = -1.7e+308
EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE = -9223372036854775816.43
self.assertAlmostEqual(
constants.DOUBLE_ASSIGNED_TO_INT_CONSTANT_TEST, EXPECTED_DOUBLE_ASSIGNED_TO_INT_CONSTANT, places=7,
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % (
EXPECTED_DOUBLE_ASSIGNED_TO_INT_CONSTANT, constants.DOUBLE_ASSIGNED_TO_INT_CONSTANT_TEST))
self.assertAlmostEqual(
constants.DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT_TEST, EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT,
places=7,
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % (
EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT,
constants.DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT_TEST))
self.assertAlmostEqual(
constants.DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT_TEST, EXPECTED_DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT,
places=7,
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % (
EXPECTED_DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT,
constants.DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT_TEST))
self.assertAlmostEqual(
constants.DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT_TEST, EXPECTED_DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT,
places=7,
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % (
EXPECTED_DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT,
constants.DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT_TEST))
self.assertAlmostEqual(
constants.DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS_TEST,
EXPECTED_DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS, places=7,
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % (
EXPECTED_DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS,
constants.DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS_TEST))
self.assertAlmostEqual(
constants.DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE_TEST, EXPECTED_DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE,
places=7,
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % (
EXPECTED_DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE,
constants.DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE_TEST))
self.assertAlmostEqual(
constants.DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE_TEST,
EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE, places=7,
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % (
EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE,
constants.DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE_TEST))
self.assertAlmostEqual(
constants.DOUBLE_ASSIGNED_TO_LARGE_DOUBLE_TEST, EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_DOUBLE, places=7,
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % (
EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_DOUBLE,
constants.DOUBLE_ASSIGNED_TO_LARGE_DOUBLE_TEST))
self.assertAlmostEqual(
constants.DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE_TEST,
EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE, places=7,
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % (
EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE,
constants.DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE_TEST))
self.assertAlmostEqual(
constants.DOUBLE_ASSIGNED_TO_SMALL_DOUBLE_TEST, EXPECTED_DOUBLE_ASSIGNED_TO_SMALL_DOUBLE, places=7,
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % (
EXPECTED_DOUBLE_ASSIGNED_TO_SMALL_DOUBLE,
constants.DOUBLE_ASSIGNED_TO_SMALL_DOUBLE_TEST))
self.assertAlmostEqual(
constants.DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE_TEST,
EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE, places=7,
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % (
EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE,
constants.DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE_TEST))
self.assertTrue(
isinstance(constants.DOUBLE_ASSIGNED_TO_INT_CONSTANT_TEST, float),
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS %
"DOUBLE_ASSIGNED_TO_INT_CONSTANT_TEST")
self.assertTrue(
isinstance(constants.DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT_TEST, float),
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS %
"DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT_TEST")
self.assertTrue(
isinstance(constants.DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT_TEST, float),
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS %
"DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT_TEST")
self.assertTrue(
isinstance(constants.DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT_TEST, float),
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS %
"DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT_TEST")
self.assertTrue(
isinstance(constants.DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS_TEST, float),
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS %
"DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS_TEST")
self.assertTrue(
isinstance(constants.DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE_TEST, float),
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS %
"DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE_TEST")
self.assertTrue(
isinstance(constants.DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE_TEST, float),
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS %
"DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE_TEST")
self.assertTrue(
isinstance(constants.DOUBLE_ASSIGNED_TO_LARGE_DOUBLE_TEST, float),
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS %
"DOUBLE_ASSIGNED_TO_LARGE_DOUBLE_TEST")
self.assertTrue(
isinstance(constants.DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE_TEST, float),
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS %
"DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE_TEST")
self.assertTrue(
isinstance(constants.DOUBLE_ASSIGNED_TO_SMALL_DOUBLE_TEST, float),
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS %
"DOUBLE_ASSIGNED_TO_SMALL_DOUBLE_TEST")
self.assertTrue(
isinstance(constants.DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE_TEST, float),
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS %
"DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE_TEST")
# to make sure the variables inside Thrift files are generated correctly
def test_rendered_double_list(self):
EXPECTED_DOUBLE_LIST = [1.0, -100.0, 100.0, 9223372036854775807.0, -9223372036854775807.0, 3.14159265359,
1000000.1, -1000000.1, 1.7e+308, -1.7e+308, 9223372036854775816.43,
-9223372036854775816.43]
self.assertEqual(len(constants.DOUBLE_LIST_TEST), len(EXPECTED_DOUBLE_LIST))
for i, expectedValue in enumerate(EXPECTED_DOUBLE_LIST):
self.assertAlmostEqual(constants.DOUBLE_LIST_TEST[i], expectedValue, places=7)
def suite():
suite = unittest.TestSuite()
loader = unittest.TestLoader()
suite.addTest(loader.loadTestsFromTestCase(TestRenderedDoubleConstants))
return suite
if __name__ == "__main__":
unittest.main(defaultTest="suite", testRunner=unittest.TextTestRunner(verbosity=2))
| TestRenderedDoubleConstants |
python | kamyu104__LeetCode-Solutions | Python/falling-squares.py | {
"start": 106,
"end": 652
} | class ____(object):
def fallingSquares(self, positions):
result = []
pos = [-1]
heights = [0]
maxH = 0
for left, side in positions:
l = bisect.bisect_right(pos, left)
r = bisect.bisect_left(pos, left+side)
high = max(heights[l-1:r] or [0]) + side
pos[l:r] = [left, left+side] # Time: O(n)
heights[l:r] = [high, heights[r-1]] # Time: O(n)
maxH = max(maxH, high)
result.append(maxH)
return result
| Solution |
python | pandas-dev__pandas | pandas/tests/arithmetic/test_object.py | {
"start": 12031,
"end": 13218
} | class ____(pd.Index):
# Simple index subclass that tracks ops calls.
_calls: int
@classmethod
def _simple_new(cls, values, name=None, dtype=None):
result = object.__new__(cls)
result._data = values
result._name = name
result._calls = 0
result._reset_identity()
return result
def __add__(self, other):
self._calls += 1
return self._simple_new(self._data)
def __radd__(self, other):
return self.__add__(other)
@pytest.mark.parametrize(
"other",
[
[datetime.timedelta(1), datetime.timedelta(2)],
[datetime.datetime(2000, 1, 1), datetime.datetime(2000, 1, 2)],
[pd.Period("2000"), pd.Period("2001")],
["a", "b"],
],
ids=["timedelta", "datetime", "period", "object"],
)
def test_index_ops_defer_to_unknown_subclasses(other):
# https://github.com/pandas-dev/pandas/issues/31109
values = np.array(
[datetime.date(2000, 1, 1), datetime.date(2000, 1, 2)], dtype=object
)
a = MyIndex._simple_new(values)
other = pd.Index(other)
result = other + a
assert isinstance(result, MyIndex)
assert a._calls == 1
| MyIndex |
python | readthedocs__readthedocs.org | readthedocs/core/filters.py | {
"start": 267,
"end": 699
} | class ____(FilterSet):
"""
Filterset that supports empty querysets.
By default, unbound filter forms result in none of the filters functioning.
Instead, we want filters to always work, even when there is no filter data
passed in from the view/request.
"""
def __init__(self, data=None, **kwargs):
if data is None:
data = {}
super().__init__(data=data, **kwargs)
| ModelFilterSet |
python | neetcode-gh__leetcode | python/0073-set-matrix-zeroes.py | {
"start": 0,
"end": 843
} | class ____:
def setZeroes(self, matrix: List[List[int]]) -> None:
# O(1)
ROWS, COLS = len(matrix), len(matrix[0])
rowZero = False
# determine which rows/cols need to be zero
for r in range(ROWS):
for c in range(COLS):
if matrix[r][c] == 0:
matrix[0][c] = 0
if r > 0:
matrix[r][0] = 0
else:
rowZero = True
for r in range(1, ROWS):
for c in range(1, COLS):
if matrix[0][c] == 0 or matrix[r][0] == 0:
matrix[r][c] = 0
if matrix[0][0] == 0:
for r in range(ROWS):
matrix[r][0] = 0
if rowZero:
for c in range(COLS):
matrix[0][c] = 0
| Solution |
python | huggingface__transformers | src/transformers/models/ibert/modeling_ibert.py | {
"start": 43938,
"end": 48027
} | class ____(IBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.ibert = IBertModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
start_positions: Optional[torch.LongTensor] = None,
end_positions: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[QuestionAnsweringModelOutput, tuple[torch.FloatTensor]]:
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.ibert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's *utils.make_positions*.
Args:
input_ids (`torch.LongTensor`):
Indices of input sequence tokens in the vocabulary.
Returns: torch.Tensor
"""
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
mask = input_ids.ne(padding_idx).int()
incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
return incremental_indices.long() + padding_idx
__all__ = [
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
| IBertForQuestionAnswering |
python | Lightning-AI__lightning | examples/pytorch/basics/autoencoder.py | {
"start": 5659,
"end": 7196
} | class ____(LightningDataModule):
def __init__(self, batch_size: int = 32):
super().__init__()
dataset = MNIST(DATASETS_PATH, train=True, download=True, transform=transforms.ToTensor())
self.mnist_test = MNIST(DATASETS_PATH, train=False, download=True, transform=transforms.ToTensor())
self.mnist_train, self.mnist_val = random_split(
dataset, [55000, 5000], generator=torch.Generator().manual_seed(42)
)
self.batch_size = batch_size
def train_dataloader(self):
return DataLoader(self.mnist_train, batch_size=self.batch_size)
def val_dataloader(self):
return DataLoader(self.mnist_val, batch_size=self.batch_size)
def test_dataloader(self):
return DataLoader(self.mnist_test, batch_size=self.batch_size)
def predict_dataloader(self):
return DataLoader(self.mnist_test, batch_size=self.batch_size)
def cli_main():
cli = LightningCLI(
LitAutoEncoder,
MyDataModule,
seed_everything_default=1234,
run=False, # used to de-activate automatic fitting.
trainer_defaults={"callbacks": ImageSampler(), "max_epochs": 10},
save_config_kwargs={"overwrite": True},
)
cli.trainer.fit(cli.model, datamodule=cli.datamodule)
cli.trainer.test(ckpt_path="best", datamodule=cli.datamodule)
predictions = cli.trainer.predict(ckpt_path="best", datamodule=cli.datamodule)
print(predictions[0])
if __name__ == "__main__":
cli_lightning_logo()
cli_main()
| MyDataModule |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-deepseek/llama_index/llms/deepseek/base.py | {
"start": 186,
"end": 1599
} | class ____(OpenAILike):
"""
DeepSeek LLM.
Examples:
`pip install llama-index-llms-deepseek`
```python
from llama_index.llms.deepseek import DeepSeek
# Set up the DeepSeek class with the required model and API key
llm = DeepSeek(model="deepseek-chat", api_key="your_api_key")
# Call the complete method with a query
response = llm.complete("Explain the importance of low latency LLMs")
print(response)
```
"""
def __init__(
self,
model: str,
api_key: Optional[str] = None,
api_base: str = "https://api.deepseek.com",
**openai_llm_kwargs: Any,
) -> None:
api_key = api_key or os.environ.get("DEEPSEEK_API_KEY", None)
context_window = openai_llm_kwargs.pop(
"context_window", get_context_window(model)
)
super().__init__(
model=model,
api_key=api_key,
api_base=api_base,
is_chat_model=openai_llm_kwargs.pop("is_chat_model", True),
is_function_calling_model=openai_llm_kwargs.pop(
"is_function_calling_model", model in FUNCTION_CALLING_MODELS
),
context_window=context_window,
**openai_llm_kwargs,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "DeepSeek"
| DeepSeek |
python | apache__airflow | providers/databricks/src/airflow/providers/databricks/hooks/databricks_base.py | {
"start": 36490,
"end": 36864
} | class ____(aiohttp.BasicAuth):
"""aiohttp only ships BasicAuth, for Bearer auth we need a subclass of BasicAuth."""
def __new__(cls, token: str) -> BearerAuth:
return super().__new__(cls, token) # type: ignore
def __init__(self, token: str) -> None:
self.token = token
def encode(self) -> str:
return f"Bearer {self.token}"
| BearerAuth |
python | ray-project__ray | release/ray_release/exception.py | {
"start": 3944,
"end": 4050
} | class ____(ReleaseTestError):
exit_code = ExitCode.CLUSTER_STARTUP_TIMEOUT
| JobTerminatedBeforeStartError |
python | ray-project__ray | rllib/algorithms/dreamerv3/torch/models/components/conv_transpose_atari.py | {
"start": 506,
"end": 3884
} | class ____(nn.Module):
"""A Conv2DTranspose decoder to generate Atari images from a latent space.
Wraps an initial single linear layer with a stack of 4 Conv2DTranspose layers (with
layer normalization) and a diag Gaussian, from which we then sample the final image.
"""
def __init__(
self,
*,
input_size: int,
model_size: str = "XS",
cnn_multiplier: Optional[int] = None,
gray_scaled: bool,
):
"""Initializes a ConvTransposeAtari instance.
Args:
input_size: The input size of the ConvTransposeAtari network.
model_size: The "Model Size" used according to [1] Appendinx B.
Use None for manually setting the `cnn_multiplier`.
cnn_multiplier: Optional override for the additional factor used to multiply
the number of filters with each CNN transpose layer. Starting with
8 * `cnn_multiplier` filters in the first CNN transpose layer, the
number of filters then decreases via `4*cnn_multiplier`,
`2*cnn_multiplier`, till `1*cnn_multiplier`.
gray_scaled: Whether the last Conv2DTranspose layer's output has only 1
color channel (gray_scaled=True) or 3 RGB channels (gray_scaled=False).
"""
super().__init__()
cnn_multiplier = get_cnn_multiplier(model_size, override=cnn_multiplier)
self.gray_scaled = gray_scaled
config = CNNTransposeHeadConfig(
input_dims=[input_size],
initial_image_dims=(4, 4, 8 * cnn_multiplier),
initial_dense_weights_initializer=dreamerv3_normal_initializer,
cnn_transpose_filter_specifiers=[
[4 * cnn_multiplier, 4, 2],
[2 * cnn_multiplier, 4, 2],
[1 * cnn_multiplier, 4, 2],
[1 if self.gray_scaled else 3, 4, 2],
],
cnn_transpose_use_bias=False,
cnn_transpose_use_layernorm=True,
cnn_transpose_activation="silu",
cnn_transpose_kernel_initializer=dreamerv3_normal_initializer,
)
# Make sure the output dims match Atari.
# assert config.output_dims == (64, 64, 1 if self.gray_scaled else 3)
self._transpose_2d_head = config.build(framework="torch")
def forward(self, h, z):
"""Performs a forward pass through the Conv2D transpose decoder.
Args:
h: The deterministic hidden state of the sequence model.
z: The sequence of stochastic discrete representations of the original
observation input. Note: `z` is not used for the dynamics predictor
model (which predicts z from h).
"""
z_shape = z.size()
z = z.view(z_shape[0], -1)
input_ = torch.cat([h, z], dim=-1)
out = self._transpose_2d_head(input_)
# Interpret output as means of a diag-Gaussian with std=1.0:
# From [2]:
# "Distributions: The image predictor outputs the mean of a diagonal Gaussian
# likelihood with unit variance, ..."
# Reshape `out` for the diagonal multi-variate Gaussian (each pixel is its own
# independent (b/c diagonal co-variance matrix) variable).
loc = torch.reshape(out, (z_shape[0], -1))
return loc
| ConvTransposeAtari |
python | django__django | tests/prefetch_related/models.py | {
"start": 8147,
"end": 8362
} | class ____(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=20)
people = models.ManyToManyField(Person, related_name="pets")
| Pet |
python | great-expectations__great_expectations | great_expectations/types/fonts.py | {
"start": 60,
"end": 188
} | class ____(Enum):
MONTSERRAT = "Montserrat"
ROBOTO_MONO = "Roboto Mono"
SOURCE_SANS_PRO = "Source Sans Pro"
| FontFamily |
python | davidhalter__parso | parso/python/errors.py | {
"start": 25093,
"end": 25415
} | class ____(SyntaxRule):
# e.g. from foo import a,
message = "trailing comma not allowed without surrounding parentheses"
def is_issue(self, node):
if node.children[-1] == ',' and node.parent.children[-1] != ')':
return True
@ErrorFinder.register_rule(type='import_from')
| _TrailingImportComma |
python | getsentry__sentry | src/sentry/api/serializers/rest_framework/savedsearch.py | {
"start": 1154,
"end": 1485
} | class ____(BaseOrganizationSearchSerializer):
"""
Organization members may only set visibility to Visibility.OWNER
"""
visibility = serializers.ChoiceField(
choices=select_visibility_choices([Visibility.OWNER]),
default=Visibility.OWNER,
required=False,
)
| OrganizationSearchMemberSerializer |
python | django__django | tests/admin_docs/tests.py | {
"start": 643,
"end": 687
} | class ____(TestCase):
pass
| AdminDocsTestCase |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.