language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | apache__airflow | airflow-core/src/airflow/utils/types.py | {
"start": 1664,
"end": 2442
} | class ____(enum.Enum):
"""Class with TriggeredBy types for DagRun."""
CLI = "cli" # for the trigger subcommand of the CLI: airflow dags trigger
OPERATOR = "operator" # for the TriggerDagRunOperator
REST_API = "rest_api" # for triggering the DAG via RESTful API
UI = "ui" # for clicking the `Trigger DAG` button
TEST = "test" # for dag.test()
TIMETABLE = "timetable" # for timetable based triggering
ASSET = "asset" # for asset_triggered run type
BACKFILL = "backfill"
add_deprecated_classes(
{
__name__: {
"ArgNotSet": "airflow.serialization.definitions.notset.ArgNotSet",
"NOTSET": "airflow.serialization.definitions.notset.ArgNotSet",
},
},
package=__name__,
)
| DagRunTriggeredByType |
python | huggingface__transformers | src/transformers/models/phimoe/modeling_phimoe.py | {
"start": 21782,
"end": 23475
} | class ____(nn.Module):
"""
This implementation is
strictly equivalent to standard MoE with full capacity (no
dropped tokens). It's faster since it formulates MoE operations
in terms of block-sparse operations to accommodate imbalanced
assignments of tokens to experts, whereas standard MoE either
(1) drop tokens at the cost of reduced performance or (2) set
capacity factor to number of experts and thus waste computation
and memory on padding.
"""
def __init__(self, config):
super().__init__()
self.hidden_dim = config.hidden_size
self.ffn_dim = config.intermediate_size
self.num_experts = config.num_local_experts
self.top_k = config.num_experts_per_tok
self.router = PhimoeTopKRouter(config)
self.experts = PhimoeExperts(config)
self.input_jitter_noise = config.input_jitter_noise
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
batch_size, sequence_length, hidden_dim = hidden_states.shape
if self.training and self.input_jitter_noise > 0:
hidden_states *= torch.empty_like(hidden_states).uniform_(
1.0 - self.input_jitter_noise, 1.0 + self.input_jitter_noise
)
batch_size, sequence_length, hidden_dim = hidden_states.shape
hidden_states = hidden_states.reshape(-1, hidden_dim)
routing_weights, selected_experts = self.router(hidden_states)
final_hidden_states = self.experts(hidden_states, selected_experts, routing_weights)
return final_hidden_states.reshape(batch_size, sequence_length, hidden_dim)
@use_kernel_forward_from_hub("RMSNorm")
| PhimoeSparseMoeBlock |
python | wntrblm__nox | nox/manifest.py | {
"start": 1305,
"end": 16153
} | class ____:
"""Session manifest.
The session manifest provides the source of truth for the sequence of
sessions that should be run by Nox.
It is possible for this to be mutated during execution. This allows for
useful use cases, such as for one session to "notify" another or
"chain" to another.
Args:
session_functions (Mapping[str, function]): The registry of discovered
session functions.
global_config (.nox.main.GlobalConfig): The global configuration.
module_docstring (Optional[str]): The user noxfile.py docstring.
Defaults to `None`.
"""
def __init__(
self,
session_functions: Mapping[str, Func],
global_config: argparse.Namespace,
module_docstring: str | None = None,
) -> None:
self._all_sessions: list[SessionRunner] = []
self._queue: list[SessionRunner] = []
self._consumed: list[SessionRunner] = []
self._config: argparse.Namespace = global_config
self.module_docstring: str | None = module_docstring
# Create the sessions based on the provided session functions.
for name, func in session_functions.items():
for session in self.make_session(name, func):
self.add_session(session)
def __contains__(self, needle: str | SessionRunner) -> bool:
if needle in self._queue or needle in self._consumed:
return True
for session in self._queue + self._consumed:
if session.name == needle or needle in session.signatures:
return True
return False
def __iter__(self) -> Manifest:
return self
def __getitem__(self, key: str) -> SessionRunner:
for session in self._queue + self._consumed:
if session.name == key or key in session.signatures:
return session
raise KeyError(key)
def __next__(self) -> SessionRunner:
"""Return the next item in the queue.
Raises:
StopIteration: If the queue has been entirely consumed.
"""
if not len(self._queue):
raise StopIteration
session = self._queue.pop(0)
self._consumed.append(session)
return session
def __len__(self) -> int:
return len(self._queue) + len(self._consumed)
def list_all_sessions(self) -> Iterator[tuple[SessionRunner, bool]]:
"""Yields all sessions and whether or not they're selected."""
for session in self._all_sessions:
yield session, session in self._queue
@property
def all_sessions_by_signature(self) -> dict[str, SessionRunner]:
return {
signature: session
for session in self._all_sessions
for signature in session.signatures
}
@property
def parametrized_sessions_by_name(self) -> dict[str, list[SessionRunner]]:
"""Returns a mapping from names to all sessions that are parameterizations of
the ``@session`` with each name.
The sessions in each returned list will occur in the same order as they occur in
``self._all_sessions``.
"""
parametrized_sessions = filter(operator.attrgetter("multi"), self._all_sessions)
key = operator.attrgetter("name")
# Note that ``sorted`` uses a stable sorting algorithm.
return {
name: list(sessions_parametrizing_name)
for name, sessions_parametrizing_name in itertools.groupby(
sorted(parametrized_sessions, key=key), key
)
}
def add_session(self, session: SessionRunner) -> None:
"""Add the given session to the manifest.
Args:
session (~nox.sessions.Session): A session object, such as
one returned from ``make_session``.
"""
if session not in self._all_sessions:
self._all_sessions.append(session)
if session not in self._queue:
self._queue.append(session)
def filter_by_name(self, specified_sessions: Iterable[str]) -> None:
"""Filter sessions in the queue based on the user-specified names.
Args:
specified_sessions (Sequence[str]): A list of specified
session names.
Raises:
KeyError: If any explicitly listed sessions are not found.
"""
# Filter the sessions remaining in the queue based on
# whether they are individually specified.
self._queue = [
session
for session_name in specified_sessions
for session in self._queue
if _normalized_session_match(session_name, session)
]
# If a session was requested and was not found, complain loudly.
all_sessions = set(
map(
_normalize_arg,
(
itertools.chain(
[x.name for x in self._all_sessions if x.name],
*[x.signatures for x in self._all_sessions],
)
),
)
)
missing_sessions = [
session_name
for session_name in specified_sessions
if _normalize_arg(session_name) not in all_sessions
]
if missing_sessions:
msg = f"Sessions not found: {', '.join(missing_sessions)}"
raise KeyError(msg)
def filter_by_default(self) -> None:
"""Filter sessions in the queue based on the default flag."""
self._queue = [x for x in self._queue if x.func.default]
def filter_by_python_interpreter(self, specified_pythons: Sequence[str]) -> None:
"""Filter sessions in the queue based on the user-specified
python interpreter versions.
Args:
specified_pythons (Sequence[str]): A list of specified
python interpreter versions.
"""
self._queue = [x for x in self._queue if x.func.python in specified_pythons]
def filter_by_keywords(self, keywords: str) -> None:
"""Filter sessions using pytest-like keyword expressions.
Args:
keywords (str): A Python expression of keywords which
session names are checked against.
"""
self._queue = [
x
for x in self._queue
if keyword_match(keywords, [*x.signatures, *x.tags, x.name])
]
def filter_by_tags(self, tags: Iterable[str]) -> None:
"""Filter sessions by their tags.
Args:
tags (list[str]): A list of tags which session names
are checked against.
"""
self._queue = [x for x in self._queue if set(x.tags).intersection(tags)]
def add_dependencies(self) -> None:
"""Add direct and recursive dependencies to the queue.
Raises:
KeyError: If any depended-on sessions are not found.
~nox._resolver.CycleError: If a dependency cycle is encountered.
"""
sessions_by_id = self.all_sessions_by_signature
# For each session that was parametrized from a list of Pythons, create a fake
# parent session that depends on it.
parent_sessions: set[SessionRunner] = set()
for (
parent_name,
parametrized_sessions,
) in self.parametrized_sessions_by_name.items():
parent_func = _null_session_func.copy()
parent_func.requires = [
session.signatures[0] for session in parametrized_sessions
]
parent_session = SessionRunner(
parent_name, [], parent_func, self._config, self, multi=False
)
parent_sessions.add(parent_session)
sessions_by_id[parent_name] = parent_session
# Construct the dependency graph. Note that this is done lazily with iterators
# so that we won't raise if a session that doesn't actually need to run declares
# missing/improper dependencies.
dependency_graph = {
session: session.get_direct_dependencies(sessions_by_id)
for session in sessions_by_id.values()
}
# Resolve the dependency graph.
root = cast("SessionRunner", object()) # sentinel
try:
resolved_graph = list(
lazy_stable_topo_sort({**dependency_graph, root: self._queue}, root)
)
except CycleError as exc:
raise CycleError(
"Sessions are in a dependency cycle: "
+ " -> ".join(session.name for session in exc.args[1])
) from exc
# Remove fake parent sessions from the resolved graph.
self._queue = [
session for session in resolved_graph if session not in parent_sessions
]
def make_session(
self, name: str, func: Func, *, multi: bool = False
) -> list[SessionRunner]:
"""Create a session object from the session function.
Args:
name (str): The name of the session.
func (function): The session function.
multi (bool): Whether the function is a member of a set of sessions
with different interpreters.
Returns:
Sequence[~nox.session.Session]: A sequence of Session objects
bound to this manifest and configuration.
"""
sessions = []
# If the backend is "none", we won't parametrize `python`.
backend = (
self._config.force_venv_backend
or func.venv_backend
or self._config.default_venv_backend
)
if backend == "none" and isinstance(func.python, (list, tuple, set)):
# we can not log a warning here since the session is maybe deselected.
# instead let's set a flag, to warn later when session is actually run.
func.should_warn[WARN_PYTHONS_IGNORED] = func.python
func.python = False
if self._config.extra_pythons:
# If extra python is provided, expand the func.python list to
# include additional python interpreters
extra_pythons: list[str] = self._config.extra_pythons
if isinstance(func.python, (list, tuple, set)):
func.python = _unique_list(*func.python, *extra_pythons)
elif not multi and func.python:
# If this is multi, but there is only a single interpreter, it
# is the reentrant case. The extra_python interpreter shouldn't
# be added in that case. If func.python is False, the session
# has no backend; if None, it uses the same interpreter as Nox.
# Otherwise, add the extra specified python.
assert isinstance(func.python, str)
func.python = _unique_list(func.python, *extra_pythons)
elif not func.python and self._config.force_pythons:
# If a python is forced by the user, but the underlying function
# has no version parametrised, add it as sole occupant to func.python
func.python = _unique_list(*extra_pythons)
# If the func has the python attribute set to a list, we'll need
# to expand them.
if isinstance(func.python, (list, tuple, set)):
for python in func.python:
single_func = func.copy()
single_func.python = python
sessions.extend(self.make_session(name, single_func, multi=True))
return sessions
# Simple case: If this function is not parametrized, then make
# a simple session.
if not hasattr(func, "parametrize"):
long_names = []
if not multi:
long_names.append(name)
if func.python:
long_names.append(f"{name}-{func.python}")
return [
SessionRunner(name, long_names, func, self._config, self, multi=multi)
]
# Since this function is parametrized, we need to add a distinct
# session for each permutation.
parametrize = func.parametrize
calls = Call.generate_calls(func, parametrize)
for call in calls:
long_names = []
if not multi or (
self._config.force_pythons and call.python in self._config.extra_pythons
):
long_names.append(f"{name}{call.session_signature}")
if func.python:
long_names.append(f"{name}-{func.python}{call.session_signature}")
# Ensure that specifying session-python will run all parameterizations.
long_names.append(f"{name}-{func.python}")
sessions.append(
SessionRunner(name, long_names, call, self._config, self, multi=multi)
)
# Edge case: If the parameters made it such that there were no valid
# calls, add an empty, do-nothing session.
if not calls:
sessions.append(
SessionRunner(
name, [], _null_session_func, self._config, self, multi=multi
)
)
# Return the list of sessions.
return sessions
def next(self) -> SessionRunner:
return next(self)
def notify(
self, session: str | SessionRunner, posargs: Iterable[str] | None = None
) -> bool:
"""Enqueue the specified session in the queue.
If the session is already in the queue, or has been run already,
then this is a no-op.
Args:
session (Union[str, ~nox.session.Session]): The session to be
enqueued.
posargs (Optional[List[str]]): If given, sets the positional
arguments *only* for the queued session. Otherwise, the
standard globally available positional arguments will be
used instead.
Returns:
bool: Whether the session was added to the queue.
Raises:
ValueError: If the session was not found.
"""
# Sanity check: If this session is already in the queue, this is
# a no-op.
if session in self:
return False
# Locate the session in the list of all sessions, and place it at
# the end of the queue.
for s in self._all_sessions:
if s == session or s.name == session or session in s.signatures: # noqa: PLR1714
if posargs is not None:
s.posargs = list(posargs)
self._queue.append(s)
return True
# The session was not found in the list of sessions.
msg = f"Session {session} not found."
raise ValueError(msg)
| Manifest |
python | pytorch__pytorch | torch/autograd/profiler.py | {
"start": 44056,
"end": 47051
} | class ____:
"""Raises an error if a key is seen more than once."""
def __init__(self):
self.seen = set()
def see(self, *key):
r"""
Observe a key and raise an error if it is seen multiple times.
"""
if key in self.seen:
raise RuntimeError("duplicate key: " + str(key))
self.seen.add(key)
def parse_nvprof_trace(path):
import sqlite3
conn = sqlite3.connect(path)
conn.row_factory = sqlite3.Row
# Parse strings table
strings = {}
for r in conn.execute("SELECT _id_ as id, value FROM StringTable"):
strings[r["id"]] = torch._C._demangle(r["value"])
# First, find all functions and create FunctionEvents for them
marker_query = """
SELECT
start.id AS marker_id, start.name, start.timestamp AS start_time, end.timestamp AS end_time
FROM
CUPTI_ACTIVITY_KIND_MARKER AS start INNER JOIN CUPTI_ACTIVITY_KIND_MARKER AS end
ON start.id = end.id
WHERE
start.name != 0 AND end.name = 0
"""
functions = []
functions_map = {}
unique = EnforceUnique()
for row in conn.execute(marker_query):
unique.see(row["marker_id"])
evt = FunctionEvent(
id=row["marker_id"],
node_id=0, # missing a node_id when calling FunctionEvent. This is just to ensure
# that pytorch doesn't crash when creating a FunctionEvent() object
name=strings[row["name"]],
start_us=row["start_time"],
end_us=row["end_time"],
thread=0,
) # TODO: find in sqlite database
functions.append(evt)
functions_map[evt.id] = evt
# Now, correlate all kernels with FunctionEvents
kernel_query = """
SELECT
start.id AS marker_id, start.name, start.timestamp, end.timestamp,
runtime._id_ AS runtime_id, runtime.cbid, runtime.start AS runtime_start, runtime.end AS runtime_end,
kernel.start AS kernel_start, kernel.end AS kernel_end, kernel.name AS kernel_name
FROM
CUPTI_ACTIVITY_KIND_MARKER AS start
INNER JOIN CUPTI_ACTIVITY_KIND_MARKER AS end
ON start.id = end.id
INNER JOIN CUPTI_ACTIVITY_KIND_RUNTIME as runtime
ON (start.timestamp < runtime.start AND runtime.end < end.timestamp)
INNER JOIN CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL AS kernel
ON kernel.correlationId = runtime.correlationId
"""
unique = EnforceUnique()
for row in conn.execute(kernel_query):
unique.see(row["marker_id"], row["runtime_id"])
# 211 is cudaKernelLaunch for cuda >= 9.2
if row["cbid"] != 211:
raise AssertionError(f"Expected cbid to be 211, but got {row['cbid']}")
evt = functions_map[row["marker_id"]]
evt.append_kernel(
row["kernel_name"], 0, row["kernel_end"] - row["kernel_start"]
)
functions.sort(key=lambda evt: evt.time_range.start)
return functions
| EnforceUnique |
python | realpython__materials | python-313/repl/power_factory.py | {
"start": 0,
"end": 255
} | class ____:
"""Create instances that can calculate powers."""
def __init__(self, exponent):
self.exponent = exponent
def __call__(self, number):
return number**self.exponent
cubed = PowerFactory(3)
print(cubed(13))
| PowerFactory |
python | jd__tenacity | tests/test_tenacity.py | {
"start": 2773,
"end": 3607
} | class ____(unittest.TestCase):
def test_retrying_repr(self):
class ConcreteRetrying(tenacity.BaseRetrying):
def __call__(self, fn, *args, **kwargs):
pass
repr(ConcreteRetrying())
def test_callstate_repr(self):
rs = RetryCallState(None, None, (), {})
rs.idle_for = 1.1111111
assert repr(rs).endswith("attempt #1; slept for 1.11; last result: none yet>")
rs = make_retry_state(2, 5)
assert repr(rs).endswith(
"attempt #2; slept for 0.0; last result: returned None>"
)
rs = make_retry_state(
0, 0, last_result=tenacity.Future.construct(1, ValueError("aaa"), True)
)
assert repr(rs).endswith(
"attempt #0; slept for 0.0; last result: failed (ValueError aaa)>"
)
| TestBase |
python | spyder-ide__spyder | spyder/widgets/elementstable.py | {
"start": 8204,
"end": 24680
} | class ____(HoverRowsTableView):
def __init__(
self,
parent: Optional[QWidget],
highlight_hovered_row: bool = True,
add_padding_around_widgets: bool = False,
):
HoverRowsTableView.__init__(self, parent, custom_delegate=True)
# To highlight the hovered row
self._highlight_hovered_row = highlight_hovered_row
# To add padding around widgets. This is necessary in case widgets are
# too small, e.g. when they are checkboxes or radiobuttons.
self._add_padding_around_widgets = add_padding_around_widgets
# To keep a reference to the table's elements
self.elements: List[Element] | None = None
# To keep track of the current row widget (e.g. a checkbox) in order to
# change its background color when its row is hovered.
self._current_row = -1
self._current_row_widget = None
# To make adjustments when the widget is shown
self._is_shown = False
# To use these widths where necessary
self._info_column_width = 0
self._widgets_column_width = 0
# ---- Public API
# -------------------------------------------------------------------------
def setup_elements(
self, elements: List[Element], set_layout: bool = False
):
"""Setup a list of Elements in the table."""
self.elements = elements
# Check for additional features
self._with_description = self._with_feature('description')
self._with_icons = self._with_feature('icon')
self._with_additional_info = self._with_feature('additional_info')
self._with_widgets = self._with_feature('widget')
# This is used to paint the entire row's background color when its
# hovered.
if self._highlight_hovered_row:
self.sig_hover_index_changed.connect(self._on_hover_index_changed)
# Set models
self.model = ElementsModel(
self,
self.elements,
self._with_description,
self._with_icons,
self._with_additional_info,
self._with_widgets
)
self.proxy_model = SortElementsFilterProxy(self)
self.proxy_model.setSourceModel(self.model)
self.proxy_model.setDynamicSortFilter(True)
self.proxy_model.setFilterKeyColumn(0)
self.proxy_model.setFilterCaseSensitivity(Qt.CaseInsensitive)
self.proxy_model.setSortRole(Qt.UserRole)
self.setModel(self.proxy_model)
# Adjustments for the title column
title_delegate = HTMLDelegate(
self,
margin=(
3 * AppStyle.MarginSize
if self._with_description
else 2 * AppStyle.MarginSize
),
wrap_text=True
)
self.setItemDelegateForColumn(
self.model.columns['title'], title_delegate
)
if self._highlight_hovered_row:
self.sig_hover_index_changed.connect(
title_delegate.on_hover_index_changed
)
# Adjustments for the additional info column
if self._with_additional_info:
info_delegate = HTMLDelegate(
self,
margin=(
3 * AppStyle.MarginSize
if self._with_description
else 2 * AppStyle.MarginSize
),
align_vcenter=True
)
self.setItemDelegateForColumn(
self.model.columns['additional_info'], info_delegate
)
if self._highlight_hovered_row:
self.sig_hover_index_changed.connect(
info_delegate.on_hover_index_changed
)
self._compute_info_column_width()
# Adjustments for the widgets column
if self._with_widgets:
widgets_delegate = HTMLDelegate(self, margin=0)
self.setItemDelegateForColumn(
self.model.columns['widgets'], widgets_delegate
)
if self._highlight_hovered_row:
self.sig_hover_index_changed.connect(
widgets_delegate.on_hover_index_changed
)
self._add_widgets()
# Make last column take the available space to the right, if necessary
stretch_last_column = True
if self._with_widgets and not self._with_additional_info:
stretch_last_column = False
self.horizontalHeader().setStretchLastSection(stretch_last_column)
# Hide headers
self.horizontalHeader().hide()
self.verticalHeader().hide()
# Set icons size
if self._with_icons:
self.setIconSize(QSize(32, 32))
# Hide grid to only paint horizontal lines with css
self.setShowGrid(False)
# Set selection behavior
self.setSelectionMode(QAbstractItemView.NoSelection)
# Set stylesheet
self._set_stylesheet()
if set_layout:
self._set_layout()
def replace_elements(
self, elements: List[Element], clear_first: bool = True
):
"""
Replace current elements by new ones.
Parameters
----------
elements: List[Element]
Elements that will be replaced.
clear_first: bool
Whether the table should be cleared before adding the new elements
"""
if clear_first:
self.clear_elements()
self.elements = elements
self.model.replace_elements(elements)
if self._with_widgets:
self._add_widgets()
self._set_layout()
def clear_elements(self):
"""Clear all elements to leave the table empty."""
self.model.clear_elements()
self._current_row_widget = None
@qdebounced(timeout=200)
def do_find(self, text: str):
"""
Filter rows that match `text` in their title, description or additional
info.
Parameters
----------
text: str
Text to filter rows with.
"""
if self._with_widgets:
# We need to do this when the table has widgets because it seems Qt
# deletes all filtered rows, which deletes their widgets too. So,
# they are unavailable to be displayed again when the filter is
# reset.
for i in range(len(self.elements)):
filter_row = self.proxy_model.filter_row(i, text)
self.setRowHidden(i, not filter_row)
else:
# This is probably more efficient, so we use it if there are no
# widgets
self.proxy_model.set_filter(text)
self._set_layout()
# ---- Private API
# -------------------------------------------------------------------------
def _on_hover_index_changed(self, index):
"""Actions to take when the index that is hovered has changed."""
row = self.proxy_model.mapToSource(index).row()
if row != self._current_row:
self._current_row = row
if self._with_widgets:
# Remove background color of previous row widget
if self._current_row_widget is not None:
self._current_row_widget.setStyleSheet("")
# Set background for the new row widget
new_row_widget = self.elements[row]["row_widget"]
new_row_widget.setStyleSheet(
f"background-color: {SpyderPalette.COLOR_BACKGROUND_3}"
)
# Set new current row widget
self._current_row_widget = new_row_widget
def _set_stylesheet(self, leave=False):
"""Set stylesheet when entering or leaving the widget."""
css = qstylizer.style.StyleSheet()
bgcolor = SpyderPalette.COLOR_BACKGROUND_1 if leave else "transparent"
css["QTableView::item"].setValues(
borderBottom=f"1px solid {SpyderPalette.COLOR_BACKGROUND_4}",
paddingLeft=f"{2 * AppStyle.MarginSize}px",
paddingRight=(
f"{AppStyle.MarginSize + 1}px"
if not self._add_padding_around_widgets
else "0px"
),
backgroundColor=bgcolor
)
self.setStyleSheet(css.toString())
def _set_layout(self):
"""
Set rows and columns layout.
This is necessary to make the table look good at different sizes.
"""
# We need to make these extra adjustments for Mac so that the last
# column is not too close to the right border
extra_width = 0
if sys.platform == 'darwin':
if self.verticalScrollBar().isVisible():
extra_width = (
AppStyle.MacScrollBarWidth +
(15 if self._with_widgets else 5)
)
else:
extra_width = 10 if self._with_widgets else 5
# Resize title column so that the table fits into the available
# horizontal space.
self._compute_info_column_width()
self._compute_widgets_column_width()
if self._info_column_width > 0 or self._widgets_column_width > 0:
title_column_width = (
self.horizontalHeader().size().width() -
(self._info_column_width + self._widgets_column_width +
extra_width)
)
self.horizontalHeader().resizeSection(
self.model.columns['title'], title_column_width
)
# Resize rows. This is done because wrapping text in HTMLDelegate's
# changes row heights in unpredictable ways.
self.resizeRowsToContents()
_set_layout_debounced = qdebounced(_set_layout, timeout=40)
"""
Debounced version of _set_layout.
Notes
-----
* We need a different version of _set_layout so that we can use the regular
one in showEvent. That way users won't experience a visual glitch when
the widget is rendered for the first time.
* We use this version in resizeEvent, where that is not a problem.
"""
def _with_feature(self, feature_name: str) -> bool:
"""Check if it's necessary to build the table with `feature_name`."""
return len([e for e in self.elements if e.get(feature_name)]) > 0
def _compute_info_column_width(self):
if self._with_additional_info:
# This is necessary to get the right width
self.resizeColumnsToContents()
self._info_column_width = self.horizontalHeader().sectionSize(
self.model.columns['additional_info']
)
def _compute_widgets_column_width(self):
if self._with_widgets:
# This is necessary to get the right width
self.resizeColumnsToContents()
# We add 10 pixels to the width computed by Qt so that the widgets
# are not so close to the right border of the table, which doesn't
# look good.
extra_width = 10
if self._with_widgets and not self._with_additional_info:
# In this case the extra width is added by the widget's
# container layout to prevent the row separator not to end in
# the table's right border.
extra_width = 0
self._widgets_column_width = (
self.horizontalHeader().sectionSize(
self.model.columns["widgets"]
)
+ extra_width
)
def _add_widgets(self):
"""Add element widgets to the table."""
for i in range(len(self.elements)):
layout = QHBoxLayout()
if self._add_padding_around_widgets:
layout.setContentsMargins(
3 * AppStyle.MarginSize,
3 * AppStyle.MarginSize,
# We add 10 pixels to the right when there's no additional
# info, so that the widgets are not so close to the border
# of the table.
3 * AppStyle.MarginSize
+ (10 if not self._with_additional_info else 0),
3 * AppStyle.MarginSize,
)
layout.addWidget(self.elements[i]['widget'])
layout.setAlignment(Qt.AlignHCenter)
else:
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(self.elements[i]['widget'])
# Widgets are the last column, so we prefer widgets to be
# aligned to the right border of the table.
layout.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
container_widget = QWidget(self)
container_widget.setLayout(layout)
# This key is not accounted for in Element because it's only
# used internally, so it doesn't need to provided in a list of
# Element's.
self.elements[i]['row_widget'] = container_widget
self.setIndexWidget(
self.proxy_model.index(i, self.model.columns['widgets']),
container_widget
)
# ---- Qt methods
# -------------------------------------------------------------------------
def showEvent(self, event):
if not self._is_shown:
if self.elements is not None:
self._compute_widgets_column_width()
self._set_layout()
# To not run the adjustments above every time the widget is shown
self._is_shown = True
super().showEvent(event)
def leaveEvent(self, event):
super().leaveEvent(event)
# Clear background color painted on hovered row widget
if self._current_row_widget is not None:
self._current_row_widget.setStyleSheet('')
self._set_stylesheet(leave=True)
def enterEvent(self, event):
super().enterEvent(event)
# Restore background color that's going to be painted on hovered row
if self._current_row_widget is not None:
self._current_row_widget.setStyleSheet(
f"background-color: {SpyderPalette.COLOR_BACKGROUND_3}"
)
self._set_stylesheet()
def resizeEvent(self, event):
# Notes
# -----
# * This is necessary to readjust the layout when the parent widget is
# resized.
# * We skip this when there's a single element because the table is not
# rendered as expected. In that case it's necessary to call
# set_layout directly.
if self.elements is not None and len(self.elements) > 1:
self._set_layout_debounced()
super().resizeEvent(event)
def test_elements_table():
from spyder.utils.qthelpers import qapplication
app = qapplication() # noqa
elements_with_title = [
{'title': 'IPython console', 'description': 'Execute code'},
{'title': 'Help', 'description': 'Look for help'}
]
table = ElementsTable(None, elements_with_title)
table.show()
elements_with_icons = [
{'title': 'IPython console', 'description': 'Execute code',
'icon': ima.icon('ipython_console')},
{'title': 'Help', 'description': 'Look for help',
'icon': ima.icon('help')}
]
table_with_icons = ElementsTable(None, elements_with_icons)
table_with_icons.show()
elements_with_widgets = [
{'title': 'IPython console', 'description': 'Execute code',
'icon': ima.icon('ipython_console'), 'widget': QCheckBox()},
{'title': 'Help', 'description': 'Look for help',
'icon': ima.icon('help'), 'widget': QCheckBox()}
]
table_with_widgets = ElementsTable(None, elements_with_widgets)
table_with_widgets.show()
elements_with_info = [
{'title': 'IPython console', 'description': 'Execute code',
'icon': ima.icon('ipython_console'), 'widget': QCheckBox(),
'additional_info': 'Core plugin'},
{'title': 'Help', 'description': 'Look for help',
'icon': ima.icon('help'), 'widget': QCheckBox()}
]
table_with_widgets_and_icons = ElementsTable(None, elements_with_info)
table_with_widgets_and_icons.show()
app.exec_()
if __name__ == '__main__':
test_elements_table()
| ElementsTable |
python | walkccc__LeetCode | solutions/2914. Minimum Number of Changes to Make Binary String Beautiful/2914.py | {
"start": 0,
"end": 111
} | class ____:
def minChanges(self, s: str) -> int:
return sum(a != b for a, b in zip(s[::2], s[1::2]))
| Solution |
python | anthropics__anthropic-sdk-python | src/anthropic/types/beta/beta_text_editor_code_execution_tool_result_error_param.py | {
"start": 275,
"end": 616
} | class ____(TypedDict, total=False):
error_code: Required[
Literal["invalid_tool_input", "unavailable", "too_many_requests", "execution_time_exceeded", "file_not_found"]
]
type: Required[Literal["text_editor_code_execution_tool_result_error"]]
error_message: Optional[str]
| BetaTextEditorCodeExecutionToolResultErrorParam |
python | oauthlib__oauthlib | examples/device_code_flow.py | {
"start": 7892,
"end": 9830
} | class ____(ServerSetupForTokenEndpoint):
def default_flow_token_response(self, request):
url, headers, body, status = self.server.create_token_response(request)
access_token = json.loads(body).get("access_token")
# return access_token in a http response
return access_token
@rate_limit # this will raise the SlowDownError
def device_flow_token_response(self, request, device_code):
"""
Following the rfc, this will route the device request accordingly and raise
required errors.
Remember that unlike other auth flows, the device if polling this endpoint once
every "interval" amount of seconds.
"""
# using device_code arg to retrieve the correct device object instance
device = Device
if device.status == device.DeviceFlowStatus.AUTHORIZATION_PENDING:
raise AuthorizationPendingError()
# If user clicked "deny" in the /approve-deny page endpoint.
# the device gets set to 'authorized' in /approve-deny and /device checks
# if someone tries to input a code for a user code that's already been authorized
if device.status == device.DeviceFlowStatus.DENIED:
raise AccessDenied()
url, headers, body, status = self.server.create_token_response(request)
access_token = json.loads(body).get("access_token")
device.status = device.EXPIRED
# return access_token in a http response
return access_token
# Example of how token endpoint could handle the token creation depending on
# the grant type during a POST to /token.
def post(self, request):
params = request.POST
if params.get("grant_type") == "urn:ietf:params:oauth:grant-type:device_code":
return self.device_flow_token_response(request, params["device_code"])
return self.default_flow_token_response(request)
| TokenEndpoint |
python | PyCQA__bandit | tests/unit/core/test_docs_util.py | {
"start": 147,
"end": 996
} | class ____(testtools.TestCase):
"""This set of tests exercises bandit.core.docs_util functions."""
BASE_URL = f"https://bandit.readthedocs.io/en/{bandit.__version__}/"
def test_overwrite_bib_info(self):
expected_url = self.BASE_URL + (
"blacklists/blacklist_calls.html" "#b304-b305-ciphers-and-modes"
)
self.assertEqual(get_url("B304"), get_url("B305"))
self.assertEqual(expected_url, get_url("B304"))
def test_plugin_call_bib(self):
expected_url = self.BASE_URL + "plugins/b101_assert_used.html"
self.assertEqual(expected_url, get_url("B101"))
def test_import_call_bib(self):
expected_url = self.BASE_URL + (
"blacklists/blacklist_imports.html" "#b413-import-pycrypto"
)
self.assertEqual(expected_url, get_url("B413"))
| DocsUtilTests |
python | pyqtgraph__pyqtgraph | pyqtgraph/graphicsItems/ROI.py | {
"start": 89654,
"end": 93389
} | class ____(ROI):
r"""
ROI subclass with two freely-moving handles defining a line.
============== =============================================================
**Arguments**
positions (list of two length-2 sequences) The endpoints of the line
segment. Note that, unlike the handle positions specified in
other ROIs, these positions must be expressed in the normal
coordinate system of the ROI, rather than (0 to 1) relative
to the size of the ROI.
\**args All extra keyword arguments are passed to ROI()
============== =============================================================
"""
def __init__(self, positions=(None, None), pos=None, handles=(None,None), **args):
if pos is None:
pos = [0,0]
ROI.__init__(self, pos, [1,1], **args)
if len(positions) > 2:
raise Exception("LineSegmentROI must be defined by exactly 2 positions. For more points, use PolyLineROI.")
for i, p in enumerate(positions):
self.addFreeHandle(p, item=handles[i])
@property
def endpoints(self):
# must not be cached because self.handles may change.
return [h['item'] for h in self.handles]
def listPoints(self):
return [p['item'].pos() for p in self.handles]
def getState(self):
state = ROI.getState(self)
state['points'] = [Point(h.pos()) for h in self.getHandles()]
return state
def saveState(self):
state = ROI.saveState(self)
state['points'] = [tuple(h.pos()) for h in self.getHandles()]
return state
def setState(self, state):
ROI.setState(self, state)
p1 = [state['points'][0][0]+state['pos'][0], state['points'][0][1]+state['pos'][1]]
p2 = [state['points'][1][0]+state['pos'][0], state['points'][1][1]+state['pos'][1]]
self.movePoint(self.getHandles()[0], p1, finish=False)
self.movePoint(self.getHandles()[1], p2)
def paint(self, p, *args):
p.setRenderHint(
QtGui.QPainter.RenderHint.Antialiasing,
self._antialias
)
p.setPen(self.currentPen)
h1 = self.endpoints[0].pos()
h2 = self.endpoints[1].pos()
p.drawLine(h1, h2)
def boundingRect(self):
return self.shape().boundingRect()
def shape(self):
p = QtGui.QPainterPath()
h1 = self.endpoints[0].pos()
h2 = self.endpoints[1].pos()
dh = h2-h1
if dh.length() == 0:
return p
pxv = self.pixelVectors(dh)[1]
if pxv is None:
return p
pxv *= 4
p.moveTo(h1+pxv)
p.lineTo(h2+pxv)
p.lineTo(h2-pxv)
p.lineTo(h1-pxv)
p.lineTo(h1+pxv)
return p
def getArrayRegion(self, data, img, axes=(0,1), order=1, returnMappedCoords=False, **kwds):
"""
Use the position of this ROI relative to an imageItem to pull a slice
from an array.
Since this pulls 1D data from a 2D coordinate system, the return value
will have ndim = data.ndim-1
See :meth:`~pyqtgraph.ROI.getArrayRegion` for a description of the
arguments.
"""
imgPts = [self.mapToItem(img, h.pos()) for h in self.endpoints]
d = Point(imgPts[1] - imgPts[0])
o = Point(imgPts[0])
rgn = fn.affineSlice(data, shape=(int(d.length()),), vectors=[Point(d.norm())], origin=o, axes=axes, order=order, returnCoords=returnMappedCoords, **kwds)
return rgn
| LineSegmentROI |
python | django__django | tests/migrations/migrations_test_apps/unspecified_app_with_conflict/migrations/0002_conflicting_second.py | {
"start": 43,
"end": 335
} | class ____(migrations.Migration):
dependencies = [("unspecified_app_with_conflict", "0001_initial")]
operations = [
migrations.CreateModel(
"Something",
[
("id", models.AutoField(primary_key=True)),
],
)
]
| Migration |
python | google__jax | jax/_src/config.py | {
"start": 35309,
"end": 45321
} | class ____:
__slots__ = ["_config", "_new_value", "_prev_value"]
def __init__(self, config, new_value):
self._config = config
self._new_value = new_value
def __enter__(self):
self._prev_value = self._config.swap_local(self._new_value)
def __exit__(self, exc_type, exc_val, exc_tb):
self._config.set_local(self._prev_value)
def make_user_context(default_value=None):
"""Creates a `jax.jit` cache sensitive context.
If the value of the context changes, JAX's tracing, lowering and compilation
cache won't get a hit and the jitted function will be re-traced, re-lowered
and re-compiled.
This function is not thread-safe. Do not call it concurrently with other JAX
APIs.
Example:
```
@jax.jit
def f(x):
return x * 2
my_context = jax.make_user_context(default_value=None)
with my_context(1):
f(1.)
with my_context(2):
f(1.) # tracing cache miss
```
"""
obj = UserConfig(default_value)
return obj
# TODO(b/214340779): remove flag when XLA:CPU is improved.
jax2tf_associative_scan_reductions = bool_state(
name='jax2tf_associative_scan_reductions',
default=False,
help=(
'JAX has two separate lowering rules for the cumulative reduction '
'primitives (cumsum, cumprod, cummax, cummin). On CPUs and GPUs it uses '
'a lax.associative_scan, while for TPUs it uses the HLO ReduceWindow. '
'The latter has a slow implementation on CPUs and GPUs. '
'By default, jax2tf uses the TPU lowering. Set this flag to True to '
'use the associative scan lowering usage, and only if it makes a difference '
'for your application. '
'See the jax2tf README.md for more details.'
)
)
jax2tf_default_native_serialization = bool_state(
name='jax2tf_default_native_serialization',
default=bool_env('JAX2TF_DEFAULT_NATIVE_SERIALIZATION', True),
help=(
'Sets the default value of the native_serialization parameter to '
'jax2tf.convert. Prefer using the parameter instead of the flag, '
'the flag may be removed in the future. '
'Starting with JAX 0.4.31 non-native serialization is deprecated.'
)
)
jax_serialization_version = int_state(
name='jax_serialization_version',
default=int_env('JAX_SERIALIZATION_VERSION', 0), # We use 0 to detect default.
help=(
'DEPRECATED: use jax_export_calling_convention_version.'
)
)
jax_export_calling_convention_version = int_state(
name='jax_export_calling_convention_version',
# Note: bump the default calling convention version at least one month after
# we update XlaCallModule to support the new version, so that serialized
# modules are forward compatible with deployed versions of XlaCallModule.
# Version 10 of XlaCallModule is supported since May 20th, 2025.
default=int_env('JAX_EXPORT_CALLING_CONVENTION_VERSION', 10),
help=(
'The calling convention version number to use for exporting. This must be '
'within the range of versions supported by the tf.XlaCallModule '
'used in your deployment environment. '
'See https://docs.jax.dev/en/latest/export/shape_poly.html#calling-convention-versions.'
)
)
export_ignore_forward_compatibility = bool_state(
name='jax_export_ignore_forward_compatibility',
default=bool_env('JAX_EXPORT_IGNORE_FORWARD_COMPATIBILIY', False),
help=(
'Whether to ignore the forward compatibility lowering rules. '
'See https://docs.jax.dev/en/latest/export/export.html#compatibility-guarantees-for-custom-calls.'
)
)
jax_platforms = optional_string_state(
name='jax_platforms',
default=None,
help=(
'Comma-separated list of platform names specifying which platforms jax '
'should initialize. If any of the platforms in this list are not successfully '
'initialized, an exception will be raised and the program will be aborted. '
'The first platform in the list will be the default platform. '
'For example, config.jax_platforms=cpu,tpu means that CPU and TPU backends '
'will be initialized, and the CPU backend will be used unless otherwise '
'specified. If TPU initialization fails, it will raise an exception. '
'By default, jax will try to initialize all available '
'platforms and will default to GPU or TPU if available, and fallback to CPU '
'otherwise.'
))
def _validate_jax_pjrt_client_create_options(new_val):
if new_val is not None and not isinstance(new_val, (str, dict)):
raise ValueError('new string config value must be None or of type dict'
f' | str, got {new_val} of type {type(new_val)}.')
jax_pjrt_client_create_options = string_or_object_state(
name='jax_pjrt_client_create_options',
default=None,
help=('A set of key-value pairs in the format of "k1:v1;k2:v2" strings '
'provided to a device platform pjrt client as extra arguments.'),
validator=_validate_jax_pjrt_client_create_options)
enable_checks = bool_state(
name='jax_enable_checks',
default=False,
help='Turn on invariant checking for JAX internals. Makes things slower.')
debug_key_reuse = bool_state(
name='jax_debug_key_reuse',
default=False,
help=('Turn on experimental key reuse checking. With this configuration enabled,'
' typed PRNG keys (i.e. keys created with jax.random.key()) will have their'
' usage tracked, and incorrect reuse of a previously-used key will lead to'
' an error. Currently enabling this leads to a small Python overhead on'
' every call to a JIT-compiled function with keys as inputs or outputs.'),
include_in_trace_context=True)
check_tracer_leaks = bool_state(
name='jax_check_tracer_leaks',
default=False,
help=('Turn on checking for leaked tracers as soon as a trace completes. '
'Enabling leak checking may have performance impacts: some caching '
'is disabled, and other overheads may be added. Additionally, be aware '
'that some Python debuggers can cause false positives, so it is recommended '
'to disable any debuggers while leak checking is enabled.'))
checking_leaks = functools.partial(check_tracer_leaks, True)
captured_constants_warn_bytes = int_state(
name='jax_captured_constants_warn_bytes',
default=2 * 10 ** 9,
help=('The number of bytes of parameters that may be captured as constants '
'before a warning is issued. Defaults to approximately 2GB. '
'Set to -1 to disable issuing a warning.'
)
)
captured_constants_report_frames = int_state(
name='jax_captured_constants_report_frames',
default=0,
help=('The number of stack frames reported for each captured constant '
'indicating the file and operation where the constant was captured. '
'Set to -1 to print the complete set of frames, or 0 to disable. '
'N.b. the report is only generated if the total amount of captured '
'constants exceeds `jax_captured_constants_warn_bytes`, as it is expensive'
'to generate the report.'
)
)
debug_nans = bool_state(
name='jax_debug_nans',
default=False,
help=('Add nan checks to every operation. When a nan is detected on the '
'output of a jit-compiled computation, call into the un-compiled '
'version in an attempt to more precisely identify the operation '
'which produced the nan.'))
debug_infs = bool_state(
name='jax_debug_infs',
default=False,
help=('Add inf checks to every operation. When an inf is detected on the '
'output of a jit-compiled computation, call into the un-compiled '
'version in an attempt to more precisely identify the operation '
'which produced the inf.'))
log_compiles = bool_state(
name='jax_log_compiles',
default=False,
help=('Log a message each time `jit` or `pmap` compiles an XLA '
'computation. Logging is performed with `logging`. When this '
'option is set, the log level is WARNING; otherwise the level is '
'DEBUG.'))
explain_cache_misses = bool_state(
name='jax_explain_cache_misses',
default=False,
help=('Each time there is a miss on one of the main caches (e.g. the '
'tracing cache), log an explanation. Logging is performed with '
'`logging`. When this option is set, the log level is WARNING; '
'otherwise the level is DEBUG.'))
log_checkpoint_residuals = bool_state(
name='jax_log_checkpoint_residuals',
default=False,
help=('Log a message every time jax.checkpoint (aka jax.remat) is '
'partially evaluated (e.g. for autodiff), printing what residuals '
'are saved.'))
pmap_shmap_merge = bool_state(
name='jax_pmap_shmap_merge',
default=True,
upgrade=True,
help='If True, pmap and shard_map API will be merged.')
distributed_debug = bool_state(
name='jax_distributed_debug',
default=False,
help=('Enable logging useful for debugging multi-process distributed '
'computations. Logging is performed with `logging` at WARNING '
'level.'))
random_seed_offset = int_state(
name='jax_random_seed_offset',
default=0,
help=('Offset to all random seeds (e.g. argument to jax.random.key()).'),
include_in_jit_key=True,
include_in_trace_context=True,
)
def _safer_randint_deprecation(new_val):
if not new_val:
deprecations.warn(
'safer-randint-config',
(
'The jax_safer_randint configuration is deprecated in JAX v0.7.2'
' and will be removed in JAX v0.9.0.'
),
stacklevel=4
)
# TODO(jakevdp): remove this flag.
safer_randint = bool_state(
name='jax_safer_randint',
default=True,
help='Use a safer randint algorithm for 8-bit and 16-bit dtypes.',
include_in_jit_key=True,
upgrade=True,
validator=_safer_randint_deprecation
)
| UserContext |
python | psf__black | src/black/trans.py | {
"start": 6828,
"end": 11019
} | class ____(ABC):
"""
An implementation of the Transformer protocol that relies on its
subclasses overriding the template methods `do_match(...)` and
`do_transform(...)`.
This Transformer works exclusively on strings (for example, by merging
or splitting them).
The following sections can be found among the docstrings of each concrete
StringTransformer subclass.
Requirements:
Which requirements must be met of the given Line for this
StringTransformer to be applied?
Transformations:
If the given Line meets all of the above requirements, which string
transformations can you expect to be applied to it by this
StringTransformer?
Collaborations:
What contractual agreements does this StringTransformer have with other
StringTransfomers? Such collaborations should be eliminated/minimized
as much as possible.
"""
__name__: Final = "StringTransformer"
# Ideally this would be a dataclass, but unfortunately mypyc breaks when used with
# `abc.ABC`.
def __init__(self, line_length: int, normalize_strings: bool) -> None:
self.line_length = line_length
self.normalize_strings = normalize_strings
@abstractmethod
def do_match(self, line: Line) -> TMatchResult:
"""
Returns:
* Ok(string_indices) such that for each index, `line.leaves[index]`
is our target string if a match was able to be made. For
transformers that don't result in more lines (e.g. StringMerger,
StringParenStripper), multiple matches and transforms are done at
once to reduce the complexity.
OR
* Err(CannotTransform), if no match could be made.
"""
@abstractmethod
def do_transform(
self, line: Line, string_indices: list[int]
) -> Iterator[TResult[Line]]:
"""
Yields:
* Ok(new_line) where new_line is the new transformed line.
OR
* Err(CannotTransform) if the transformation failed for some reason. The
`do_match(...)` template method should usually be used to reject
the form of the given Line, but in some cases it is difficult to
know whether or not a Line meets the StringTransformer's
requirements until the transformation is already midway.
Side Effects:
This method should NOT mutate @line directly, but it MAY mutate the
Line's underlying Node structure. (WARNING: If the underlying Node
structure IS altered, then this method should NOT be allowed to
yield an CannotTransform after that point.)
"""
def __call__(
self, line: Line, _features: Collection[Feature], _mode: Mode
) -> Iterator[Line]:
"""
StringTransformer instances have a call signature that mirrors that of
the Transformer type.
Raises:
CannotTransform(...) if the concrete StringTransformer class is unable
to transform @line.
"""
# Optimization to avoid calling `self.do_match(...)` when the line does
# not contain any string.
if not any(leaf.type == token.STRING for leaf in line.leaves):
raise CannotTransform("There are no strings in this line.")
match_result = self.do_match(line)
if isinstance(match_result, Err):
cant_transform = match_result.err()
raise CannotTransform(
f"The string transformer {self.__class__.__name__} does not recognize"
" this line as one that it can transform."
) from cant_transform
string_indices = match_result.ok()
for line_result in self.do_transform(line, string_indices):
if isinstance(line_result, Err):
cant_transform = line_result.err()
raise CannotTransform(
"StringTransformer failed while attempting to transform string."
) from cant_transform
line = line_result.ok()
yield line
@dataclass
| StringTransformer |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-chatgpt-plugin/llama_index/readers/chatgpt_plugin/base.py | {
"start": 238,
"end": 2107
} | class ____(BaseReader):
"""ChatGPT Retrieval Plugin reader."""
def __init__(
self,
endpoint_url: str,
bearer_token: Optional[str] = None,
retries: Optional[Retry] = None,
batch_size: int = 100,
) -> None:
"""Chatgpt Retrieval Plugin."""
self._endpoint_url = endpoint_url
self._bearer_token = bearer_token or os.getenv("BEARER_TOKEN")
self._retries = retries
self._batch_size = batch_size
self._s = requests.Session()
self._s.mount("http://", HTTPAdapter(max_retries=self._retries))
def load_data(
self,
query: str,
top_k: int = 10,
separate_documents: bool = True,
**kwargs: Any,
) -> List[Document]:
"""Load data from ChatGPT Retrieval Plugin."""
headers = {"Authorization": f"Bearer {self._bearer_token}"}
queries = [{"query": query, "top_k": top_k}]
res = requests.post(
f"{self._endpoint_url}/query", headers=headers, json={"queries": queries}
)
documents: List[Document] = []
for query_result in res.json()["results"]:
for result in query_result["results"]:
result_id = result["id"]
result_txt = result["text"]
result_embedding = result["embedding"]
document = Document(
text=result_txt,
id_=result_id,
embedding=result_embedding,
)
documents.append(document)
# NOTE: there should only be one query
break
if not separate_documents:
text_list = [doc.get_content() for doc in documents]
text = "\n\n".join(text_list)
documents = [Document(text=text)]
return documents
| ChatGPTRetrievalPluginReader |
python | huggingface__transformers | src/transformers/pipelines/deprecated/text2text_generation.py | {
"start": 430,
"end": 556
} | class ____(enum.Enum):
TENSORS = 0
TEXT = 1
@add_end_docstrings(build_pipeline_init_args(has_tokenizer=True))
| ReturnType |
python | pyca__cryptography | tests/hazmat/asn1/test_serialization.py | {
"start": 2753,
"end": 3217
} | class ____:
def test_string(self) -> None:
assert_roundtrips(
[
("", b"\x0c\x00"),
("hello", b"\x0c\x05hello"),
("Test User 1", b"\x0c\x0bTest User 1"),
(
"café",
b"\x0c\x05caf\xc3\xa9",
), # UTF-8 string with non-ASCII
("🚀", b"\x0c\x04\xf0\x9f\x9a\x80"), # UTF-8 emoji
]
)
| TestString |
python | has2k1__plotnine | plotnine/labels.py | {
"start": 366,
"end": 2125
} | class ____:
"""
Add labels for any aesthetics with a scale or title, subtitle & caption
"""
# Names of Scaled Aesthetics
x: str | None = None
"""
Name of the x-axis.
"""
y: str | None = None
"""
Name of the y-axis.
"""
alpha: str | None = None
"""
Name of the alpha legend.
"""
color: str | None = None
"""
Name of the color legend or colorbar.
"""
colour: str | None = None
"""
Name of the colour legend or colourbar.
This is an alias of the `color` parameter. Only use one of
the spellings.
"""
fill: str | None = None
"""
Name of the fill legend/colourbar.
"""
linetype: str | None = None
"""
Name of the linetype legend.
"""
shape: str | None = None
"""
Name of the shape legend.
"""
size: str | None = None
"""
Name of the size legend.
"""
stroke: str | None = None
"""
Name of the stroke legend.
"""
# Other texts
title: str | None = None
"""
The title of the plot.
"""
subtitle: str | None = None
"""
The subtitle of the plot.
"""
caption: str | None = None
"""
The caption at the bottom of the plot.
"""
tag: str | None = None
"""
A plot tag
"""
def __post_init__(self):
kwargs: dict[str, str] = {
f.name: value
for f in fields(self)
if (value := getattr(self, f.name)) is not None
}
self.labels = labels_view(**rename_aesthetics(kwargs))
def __radd__(self, other: p9.ggplot) -> p9.ggplot:
"""
Add labels to ggplot object
"""
other.labels.update(self.labels)
return other
| labs |
python | py-pdf__pypdf | pypdf/errors.py | {
"start": 1823,
"end": 1947
} | class ____(PyPdfError, RuntimeError):
"""Raised when the XMP XML document context is invalid or missing."""
| XmpDocumentError |
python | scipy__scipy | scipy/integrate/tests/test_integrate.py | {
"start": 16413,
"end": 18631
} | class ____:
"""Call an ode-class solver with several cases of parameter use."""
# solver_name must be set before tests can be run with this class.
# Set these in subclasses.
solver_name = ''
solver_uses_jac = False
def _get_solver(self, f, jac):
solver = ode(f, jac)
if self.solver_uses_jac:
solver.set_integrator(self.solver_name, atol=1e-9, rtol=1e-7,
with_jacobian=self.solver_uses_jac)
else:
# XXX Shouldn't set_integrator *always* accept the keyword arg
# 'with_jacobian', and perhaps raise an exception if it is set
# to True if the solver can't actually use it?
solver.set_integrator(self.solver_name, atol=1e-9, rtol=1e-7)
return solver
def _check_solver(self, solver):
ic = [1.0, 0.0]
solver.set_initial_value(ic, 0.0)
solver.integrate(pi)
assert_array_almost_equal(solver.y, [-1.0, 0.0])
def test_no_params(self):
solver = self._get_solver(f, jac)
self._check_solver(solver)
def test_one_scalar_param(self):
solver = self._get_solver(f1, jac1)
omega = 1.0
solver.set_f_params(omega)
if self.solver_uses_jac:
solver.set_jac_params(omega)
self._check_solver(solver)
def test_two_scalar_params(self):
solver = self._get_solver(f2, jac2)
omega1 = 1.0
omega2 = 1.0
solver.set_f_params(omega1, omega2)
if self.solver_uses_jac:
solver.set_jac_params(omega1, omega2)
self._check_solver(solver)
def test_vector_param(self):
solver = self._get_solver(fv, jacv)
omega = [1.0, 1.0]
solver.set_f_params(omega)
if self.solver_uses_jac:
solver.set_jac_params(omega)
self._check_solver(solver)
def test_warns_on_failure(self):
# Set nsteps small to ensure failure
solver = self._get_solver(f, jac)
solver.set_integrator(self.solver_name, nsteps=1)
ic = [1.0, 0.0]
solver.set_initial_value(ic, 0.0)
with pytest.warns(UserWarning):
solver.integrate(pi)
| ODECheckParameterUse |
python | skorch-dev__skorch | skorch/helper.py | {
"start": 4359,
"end": 9153
} | class ____(Sequence):
# pylint: disable=anomalous-backslash-in-string
"""Helper class that wraps a torch dataset to make it work with
sklearn.
Sometimes, sklearn will touch the input data, e.g. when splitting
the data for a grid search. This will fail when the input data is
a torch dataset. To prevent this, use this wrapper class for your
dataset.
Note: This class will only return the X value by default (i.e. the
first value returned by indexing the original dataset). Sklearn,
and hence skorch, always require 2 values, X and y. Therefore, you
still need to provide the y data separately.
Note: This class behaves similarly to a PyTorch
:class:`~torch.utils.data.Subset` when it is indexed by a slice or
numpy array: It will return another ``SliceDataset`` that
references the subset instead of the actual values. Only when it
is indexed by an int does it return the actual values. The reason
for this is to avoid loading all data into memory when sklearn,
for instance, creates a train/validation split on the
dataset. Data will only be loaded in batches during the fit loop.
Examples
--------
>>> X = MyCustomDataset()
>>> search = GridSearchCV(net, params, ...)
>>> search.fit(X, y) # raises error
>>> ds = SliceDataset(X)
>>> search.fit(ds, y) # works
Parameters
----------
dataset : torch.utils.data.Dataset
A valid torch dataset.
idx : int (default=0)
Indicates which element of the dataset should be
returned. Typically, the dataset returns both X and y
values. SliceDataset can only return 1 value. If you want to
get X, choose idx=0 (default), if you want y, choose idx=1.
indices : list, np.ndarray, or None (default=None)
If you only want to return a subset of the dataset, indicate
which subset that is by passing this argument. Typically, this
can be left to be None, which returns all the data. See also
:class:`~torch.utils.data.Subset`.
"""
def __init__(self, dataset, idx=0, indices=None):
self.dataset = dataset
self.idx = idx
self.indices = indices
self.indices_ = (self.indices if self.indices is not None
else np.arange(len(self.dataset)))
self.ndim = 1
def __len__(self):
return len(self.indices_)
@property
def shape(self):
return (len(self),)
def transform(self, data):
"""Additional transformations on ``data``.
Note: If you use this in conjuction with PyTorch
:class:`~torch.utils.data.DataLoader`, the latter will call
the dataset for each row separately, which means that the
incoming ``data`` is a single rows.
"""
return data
def _select_item(self, Xn):
# Raise a custom error message when accessing out of
# bounds. However, this will only trigger as soon as this is
# indexed by an integer.
try:
return Xn[self.idx]
except IndexError:
name = self.__class__.__name__
msg = ("{} is trying to access element {} but there are only "
"{} elements.".format(name, self.idx, len(Xn)))
raise IndexError(msg)
def __getitem__(self, i):
if isinstance(i, (int, np.integer)):
Xn = self.dataset[self.indices_[i]]
Xi = self._select_item(Xn)
return self.transform(Xi)
cls = type(self)
if isinstance(i, slice):
return cls(self.dataset, idx=self.idx, indices=self.indices_[i])
if isinstance(i, np.ndarray):
if i.ndim != 1:
raise IndexError("SliceDataset only supports slicing with 1 "
"dimensional arrays, got {} dimensions instead."
"".format(i.ndim))
if i.dtype == bool:
i = np.flatnonzero(i)
return cls(self.dataset, idx=self.idx, indices=self.indices_[i])
def __array__(self, dtype=None):
# This method is invoked when calling np.asarray(X)
# https://numpy.org/devdocs/user/basics.dispatch.html
X = [self[i] for i in range(len(self))]
if np.isscalar(X[0]):
return np.asarray(X)
return np.asarray([to_numpy(x) for x in X], dtype=dtype)
def predefined_split(dataset):
"""Uses ``dataset`` for validiation in :class:`.NeuralNet`.
Examples
--------
>>> valid_ds = skorch.dataset.Dataset(X, y)
>>> net = NeuralNet(..., train_split=predefined_split(valid_ds))
Parameters
----------
dataset: torch Dataset
Validiation dataset
"""
return partial(_make_split, valid_ds=dataset)
| SliceDataset |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/memberAccess1.py | {
"start": 1665,
"end": 1787
} | class ____:
def __get__(self, instance: "type[ClassE] | None", owner: "MetaclassE"):
return None
| MetaDescriptorE |
python | numpy__numpy | numpy/polynomial/tests/test_hermite.py | {
"start": 11307,
"end": 12852
} | class ____:
# some random values in [-1, 1)
x = np.random.random((3, 5)) * 2 - 1
def test_hermvander(self):
# check for 1d x
x = np.arange(3)
v = herm.hermvander(x, 3)
assert_(v.shape == (3, 4))
for i in range(4):
coef = [0] * i + [1]
assert_almost_equal(v[..., i], herm.hermval(x, coef))
# check for 2d x
x = np.array([[1, 2], [3, 4], [5, 6]])
v = herm.hermvander(x, 3)
assert_(v.shape == (3, 2, 4))
for i in range(4):
coef = [0] * i + [1]
assert_almost_equal(v[..., i], herm.hermval(x, coef))
def test_hermvander2d(self):
# also tests hermval2d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3))
van = herm.hermvander2d(x1, x2, [1, 2])
tgt = herm.hermval2d(x1, x2, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = herm.hermvander2d([x1], [x2], [1, 2])
assert_(van.shape == (1, 5, 6))
def test_hermvander3d(self):
# also tests hermval3d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3, 4))
van = herm.hermvander3d(x1, x2, x3, [1, 2, 3])
tgt = herm.hermval3d(x1, x2, x3, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = herm.hermvander3d([x1], [x2], [x3], [1, 2, 3])
assert_(van.shape == (1, 5, 24))
| TestVander |
python | tensorflow__tensorflow | third_party/xla/build_tools/configure/configure.py | {
"start": 5631,
"end": 6565
} | class ____(enum.Enum):
"""Enum base class with helper methods for working with argparse.
Example usage:
```
class Fruit(ArgparseableEnum):
APPLE = enum.auto()
# argparse setup
parser.add_argument("--fruit", type=Fruit.from_str, choices=list(Fruit))
```
Users can pass strings like `--fruit=apple` with nice error messages and the
parser will get the corresponding enum value.
NOTE: PyType gets confused when this class is used to create Enums in the
functional style like `ArgparseableEnum("Fruit", ["APPLE", "BANANA"])`.
"""
def __str__(self):
return self.name
@classmethod
def from_str(cls, s):
s = s.upper()
try:
return cls[s]
except KeyError:
# Sloppy looking exception handling, but argparse will catch ValueError
# and give a pleasant error message. KeyError would not work here.
raise ValueError # pylint: disable=raise-missing-from
| ArgparseableEnum |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/exc.py | {
"start": 939,
"end": 1910
} | class ____(sa_exc.SQLAlchemyError):
"""An operation encountered database state that is unaccounted for.
Conditions which cause this to happen include:
* A flush may have attempted to update or delete rows
and an unexpected number of rows were matched during
the UPDATE or DELETE statement. Note that when
version_id_col is used, rows in UPDATE or DELETE statements
are also matched against the current known version
identifier.
* A mapped object with version_id_col was refreshed,
and the version number coming back from the database does
not match that of the object itself.
* A object is detached from its parent object, however
the object was previously attached to a different parent
identity which was garbage collected, and a decision
cannot be made if the new parent was really the most
recent "parent".
"""
ConcurrentModificationError = StaleDataError
| StaleDataError |
python | pytorch__pytorch | torch/testing/_internal/distributed/ddp_under_dist_autograd_test.py | {
"start": 10665,
"end": 17267
} | class ____(RpcAgentTestFixture):
@property
def world_size(self) -> int:
return WORLD_SIZE
def remote_worker_name(self) -> str:
# The name has to be consistent with that in 'dist_init' decorator.
return f"worker{REMOTE_WORKER_RANK}"
def trainer_name(self, rank):
# The name has to be consistent with that in 'dist_init' decorator.
return f"worker{rank}"
def _remote_worker_process(self, ddp_mode):
gLogger.info("The remote worker is running.")
dist.init_process_group(
backend="gloo",
init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name),
world_size=self.world_size,
rank=self.rank,
)
if ddp_mode in (DdpMode.INSIDE, DdpMode.OUTSIDE):
# new_group needs to be called on ranks.
dist.new_group(TRAINER_RANKS)
global shutdown_signal
with shutdown_signal:
shutdown_signal.wait()
gLogger.info("Exiting remote worker.")
dist.destroy_process_group()
def _trainer_process(self, rank: int):
gLogger.info("Running the trainer #%s...", rank)
gLogger.info(
"Initing trainer process group by trainer #%s with ranks %s",
rank,
TRAINER_RANKS,
)
dist.init_process_group(
backend="gloo",
init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name),
world_size=self.world_size,
rank=self.rank,
)
gLogger.info("Waiting for shutdown signal on trainer #%s...", rank)
global shutdown_signal
with shutdown_signal:
shutdown_signal.wait()
gLogger.info("Exiting the trainer #%s...", rank)
dist.destroy_process_group()
def _master_process(self, ddp_mode: DdpMode, simulate_uneven_inputs: bool):
gLogger.info("Running the master process...")
dist.init_process_group(
backend="gloo",
init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name),
world_size=self.world_size,
rank=self.rank,
)
remote_em_rref = rpc.remote(
self.remote_worker_name(), RemoteEM, args=(NUM_EM_ROW, D_SPARSE)
)
remote_net_rref = rpc.remote(
self.remote_worker_name(), RemoteNet, args=(D_DENSE + D_SPARSE, D_HID)
)
gLogger.info("Created remote rrefs on master")
self.do_test_on_master(
ddp_mode, simulate_uneven_inputs, remote_em_rref, remote_net_rref
)
def do_test_on_master(
self,
ddp_mode: DdpMode,
simulate_uneven_inputs: bool,
remote_em_rref: rpc.RRef,
remote_net_rref: rpc.RRef,
):
if simulate_uneven_inputs:
gLogger.info(
"Running DDP + RPC test with simulating uneven inputs across trainers."
)
trainer_rrefs = []
for rank in TRAINER_RANKS:
trainer = self.trainer_name(rank)
trainer_rrefs.append(
rpc.remote(
trainer,
Trainer,
args=(remote_em_rref, remote_net_rref, ddp_mode, rank),
)
)
if ddp_mode in (DdpMode.INSIDE, DdpMode.OUTSIDE):
# new_group needs to be called on ranks.
dist.new_group(TRAINER_RANKS)
training_examples = get_training_examples()
for _ in range(3):
futures = []
num_trainers = len(trainer_rrefs)
for idx, trainer_rref in enumerate(trainer_rrefs):
# Half the trainers will deplete inputs earlier than the rest.
trainer_has_less_inputs = (
simulate_uneven_inputs and idx < num_trainers // 2
)
futures.append(
_remote_method_async(
Trainer.train_batch,
trainer_rref,
training_examples[idx],
trainer_has_less_inputs,
simulate_uneven_inputs,
)
)
for future in futures:
ddp_grads, non_ddp_grads = future.wait()
# When there are uneven inputs, it is not necessary that grads
# cancel each other out, since some trainers contribute 0 grad.
if not simulate_uneven_inputs:
for grad in ddp_grads:
self.assertEqual(
grad,
torch.zeros_like(grad),
msg=f"The grad for any ddp parameter should be zeros, because "
"the training examples' grads cancel each other. Received "
f"gradient {grad}",
)
for grad in non_ddp_grads:
self.assertNotEqual(
grad,
torch.zeros_like(grad),
msg="The grad for any non-ddp parameter shouldn't be zeros",
)
# Destroy process groups
for trainer_rref in trainer_rrefs:
_remote_method_async(Trainer.destroy_pg, trainer_rref).wait()
# Send shutdown signals.
for rank in TRAINER_RANKS:
trainer = self.trainer_name(rank)
rpc.rpc_sync(trainer, set_shutdown_signal, args=())
rpc.rpc_sync(self.remote_worker_name(), set_shutdown_signal, args=())
def _do_test(self, ddp_mode, simulate_uneven_inputs=False):
if self.rank == MASTER_RANK:
self._master_process(ddp_mode, simulate_uneven_inputs)
elif self.rank == REMOTE_WORKER_RANK:
self._remote_worker_process(ddp_mode)
elif self.rank in TRAINER_RANKS:
self._trainer_process(self.rank)
else:
raise RuntimeError(f"Unknown process rank: {self.rank}")
@requires_gloo()
@dist_init
def test_backward_no_ddp(self):
self._do_test(DdpMode.NONE)
@requires_gloo()
@dist_init
def test_backward_ddp_outside(self):
self._do_test(DdpMode.OUTSIDE)
@requires_gloo()
@dist_init
def test_backward_ddp_outside_uneven_inputs(self):
self._do_test(DdpMode.OUTSIDE, simulate_uneven_inputs=True)
@requires_gloo()
@dist_init
def test_backward_ddp_inside(self):
self._do_test(DdpMode.INSIDE)
# Common utils for both CPU and CUDA test suites
| DdpUnderDistAutogradTest |
python | buildout__buildout | zc.recipe.egg_/src/zc/recipe/egg/custom.py | {
"start": 777,
"end": 1069
} | class ____:
def __init__(self, buildout, name, options):
self.name, self.options = name, options
options['_d'] = buildout['buildout']['develop-eggs-directory']
self.build_ext = build_ext(buildout, options)
def update(self):
return self.install()
| Base |
python | eventlet__eventlet | eventlet/green/http/cookiejar.py | {
"start": 31542,
"end": 46022
} | class ____(CookiePolicy):
"""Implements the standard rules for accepting and returning cookies."""
DomainStrictNoDots = 1
DomainStrictNonDomain = 2
DomainRFC2965Match = 4
DomainLiberal = 0
DomainStrict = DomainStrictNoDots|DomainStrictNonDomain
def __init__(self,
blocked_domains=None, allowed_domains=None,
netscape=True, rfc2965=False,
rfc2109_as_netscape=None,
hide_cookie2=False,
strict_domain=False,
strict_rfc2965_unverifiable=True,
strict_ns_unverifiable=False,
strict_ns_domain=DomainLiberal,
strict_ns_set_initial_dollar=False,
strict_ns_set_path=False,
):
"""Constructor arguments should be passed as keyword arguments only."""
self.netscape = netscape
self.rfc2965 = rfc2965
self.rfc2109_as_netscape = rfc2109_as_netscape
self.hide_cookie2 = hide_cookie2
self.strict_domain = strict_domain
self.strict_rfc2965_unverifiable = strict_rfc2965_unverifiable
self.strict_ns_unverifiable = strict_ns_unverifiable
self.strict_ns_domain = strict_ns_domain
self.strict_ns_set_initial_dollar = strict_ns_set_initial_dollar
self.strict_ns_set_path = strict_ns_set_path
if blocked_domains is not None:
self._blocked_domains = tuple(blocked_domains)
else:
self._blocked_domains = ()
if allowed_domains is not None:
allowed_domains = tuple(allowed_domains)
self._allowed_domains = allowed_domains
def blocked_domains(self):
"""Return the sequence of blocked domains (as a tuple)."""
return self._blocked_domains
def set_blocked_domains(self, blocked_domains):
"""Set the sequence of blocked domains."""
self._blocked_domains = tuple(blocked_domains)
def is_blocked(self, domain):
for blocked_domain in self._blocked_domains:
if user_domain_match(domain, blocked_domain):
return True
return False
def allowed_domains(self):
"""Return None, or the sequence of allowed domains (as a tuple)."""
return self._allowed_domains
def set_allowed_domains(self, allowed_domains):
"""Set the sequence of allowed domains, or None."""
if allowed_domains is not None:
allowed_domains = tuple(allowed_domains)
self._allowed_domains = allowed_domains
def is_not_allowed(self, domain):
if self._allowed_domains is None:
return False
for allowed_domain in self._allowed_domains:
if user_domain_match(domain, allowed_domain):
return False
return True
def set_ok(self, cookie, request):
"""
If you override .set_ok(), be sure to call this method. If it returns
false, so should your subclass (assuming your subclass wants to be more
strict about which cookies to accept).
"""
_debug(" - checking cookie %s=%s", cookie.name, cookie.value)
assert cookie.name is not None
for n in "version", "verifiability", "name", "path", "domain", "port":
fn_name = "set_ok_"+n
fn = getattr(self, fn_name)
if not fn(cookie, request):
return False
return True
def set_ok_version(self, cookie, request):
if cookie.version is None:
# Version is always set to 0 by parse_ns_headers if it's a Netscape
# cookie, so this must be an invalid RFC 2965 cookie.
_debug(" Set-Cookie2 without version attribute (%s=%s)",
cookie.name, cookie.value)
return False
if cookie.version > 0 and not self.rfc2965:
_debug(" RFC 2965 cookies are switched off")
return False
elif cookie.version == 0 and not self.netscape:
_debug(" Netscape cookies are switched off")
return False
return True
def set_ok_verifiability(self, cookie, request):
if request.unverifiable and is_third_party(request):
if cookie.version > 0 and self.strict_rfc2965_unverifiable:
_debug(" third-party RFC 2965 cookie during "
"unverifiable transaction")
return False
elif cookie.version == 0 and self.strict_ns_unverifiable:
_debug(" third-party Netscape cookie during "
"unverifiable transaction")
return False
return True
def set_ok_name(self, cookie, request):
# Try and stop servers setting V0 cookies designed to hack other
# servers that know both V0 and V1 protocols.
if (cookie.version == 0 and self.strict_ns_set_initial_dollar and
cookie.name.startswith("$")):
_debug(" illegal name (starts with '$'): '%s'", cookie.name)
return False
return True
def set_ok_path(self, cookie, request):
if cookie.path_specified:
req_path = request_path(request)
if ((cookie.version > 0 or
(cookie.version == 0 and self.strict_ns_set_path)) and
not req_path.startswith(cookie.path)):
_debug(" path attribute %s is not a prefix of request "
"path %s", cookie.path, req_path)
return False
return True
def set_ok_domain(self, cookie, request):
if self.is_blocked(cookie.domain):
_debug(" domain %s is in user block-list", cookie.domain)
return False
if self.is_not_allowed(cookie.domain):
_debug(" domain %s is not in user allow-list", cookie.domain)
return False
if cookie.domain_specified:
req_host, erhn = eff_request_host(request)
domain = cookie.domain
if self.strict_domain and (domain.count(".") >= 2):
# XXX This should probably be compared with the Konqueror
# (kcookiejar.cpp) and Mozilla implementations, but it's a
# losing battle.
i = domain.rfind(".")
j = domain.rfind(".", 0, i)
if j == 0: # domain like .foo.bar
tld = domain[i+1:]
sld = domain[j+1:i]
if sld.lower() in ("co", "ac", "com", "edu", "org", "net",
"gov", "mil", "int", "aero", "biz", "cat", "coop",
"info", "jobs", "mobi", "museum", "name", "pro",
"travel", "eu") and len(tld) == 2:
# domain like .co.uk
_debug(" country-code second level domain %s", domain)
return False
if domain.startswith("."):
undotted_domain = domain[1:]
else:
undotted_domain = domain
embedded_dots = (undotted_domain.find(".") >= 0)
if not embedded_dots and domain != ".local":
_debug(" non-local domain %s contains no embedded dot",
domain)
return False
if cookie.version == 0:
if (not erhn.endswith(domain) and
(not erhn.startswith(".") and
not ("."+erhn).endswith(domain))):
_debug(" effective request-host %s (even with added "
"initial dot) does not end with %s",
erhn, domain)
return False
if (cookie.version > 0 or
(self.strict_ns_domain & self.DomainRFC2965Match)):
if not domain_match(erhn, domain):
_debug(" effective request-host %s does not domain-match "
"%s", erhn, domain)
return False
if (cookie.version > 0 or
(self.strict_ns_domain & self.DomainStrictNoDots)):
host_prefix = req_host[:-len(domain)]
if (host_prefix.find(".") >= 0 and
not IPV4_RE.search(req_host)):
_debug(" host prefix %s for domain %s contains a dot",
host_prefix, domain)
return False
return True
def set_ok_port(self, cookie, request):
if cookie.port_specified:
req_port = request_port(request)
if req_port is None:
req_port = "80"
else:
req_port = str(req_port)
for p in cookie.port.split(","):
try:
int(p)
except ValueError:
_debug(" bad port %s (not numeric)", p)
return False
if p == req_port:
break
else:
_debug(" request port (%s) not found in %s",
req_port, cookie.port)
return False
return True
def return_ok(self, cookie, request):
"""
If you override .return_ok(), be sure to call this method. If it
returns false, so should your subclass (assuming your subclass wants to
be more strict about which cookies to return).
"""
# Path has already been checked by .path_return_ok(), and domain
# blocking done by .domain_return_ok().
_debug(" - checking cookie %s=%s", cookie.name, cookie.value)
for n in "version", "verifiability", "secure", "expires", "port", "domain":
fn_name = "return_ok_"+n
fn = getattr(self, fn_name)
if not fn(cookie, request):
return False
return True
def return_ok_version(self, cookie, request):
if cookie.version > 0 and not self.rfc2965:
_debug(" RFC 2965 cookies are switched off")
return False
elif cookie.version == 0 and not self.netscape:
_debug(" Netscape cookies are switched off")
return False
return True
def return_ok_verifiability(self, cookie, request):
if request.unverifiable and is_third_party(request):
if cookie.version > 0 and self.strict_rfc2965_unverifiable:
_debug(" third-party RFC 2965 cookie during unverifiable "
"transaction")
return False
elif cookie.version == 0 and self.strict_ns_unverifiable:
_debug(" third-party Netscape cookie during unverifiable "
"transaction")
return False
return True
def return_ok_secure(self, cookie, request):
if cookie.secure and request.type != "https":
_debug(" secure cookie with non-secure request")
return False
return True
def return_ok_expires(self, cookie, request):
if cookie.is_expired(self._now):
_debug(" cookie expired")
return False
return True
def return_ok_port(self, cookie, request):
if cookie.port:
req_port = request_port(request)
if req_port is None:
req_port = "80"
for p in cookie.port.split(","):
if p == req_port:
break
else:
_debug(" request port %s does not match cookie port %s",
req_port, cookie.port)
return False
return True
def return_ok_domain(self, cookie, request):
req_host, erhn = eff_request_host(request)
domain = cookie.domain
# strict check of non-domain cookies: Mozilla does this, MSIE5 doesn't
if (cookie.version == 0 and
(self.strict_ns_domain & self.DomainStrictNonDomain) and
not cookie.domain_specified and domain != erhn):
_debug(" cookie with unspecified domain does not string-compare "
"equal to request domain")
return False
if cookie.version > 0 and not domain_match(erhn, domain):
_debug(" effective request-host name %s does not domain-match "
"RFC 2965 cookie domain %s", erhn, domain)
return False
if cookie.version == 0 and not ("."+erhn).endswith(domain):
_debug(" request-host %s does not match Netscape cookie domain "
"%s", req_host, domain)
return False
return True
def domain_return_ok(self, domain, request):
# Liberal check of. This is here as an optimization to avoid
# having to load lots of MSIE cookie files unless necessary.
req_host, erhn = eff_request_host(request)
if not req_host.startswith("."):
req_host = "."+req_host
if not erhn.startswith("."):
erhn = "."+erhn
if not (req_host.endswith(domain) or erhn.endswith(domain)):
#_debug(" request domain %s does not match cookie domain %s",
# req_host, domain)
return False
if self.is_blocked(domain):
_debug(" domain %s is in user block-list", domain)
return False
if self.is_not_allowed(domain):
_debug(" domain %s is not in user allow-list", domain)
return False
return True
def path_return_ok(self, path, request):
_debug("- checking cookie path=%s", path)
req_path = request_path(request)
if not req_path.startswith(path):
_debug(" %s does not path-match %s", req_path, path)
return False
return True
def vals_sorted_by_key(adict):
keys = sorted(adict.keys())
return map(adict.get, keys)
def deepvalues(mapping):
"""Iterates over nested mapping, depth-first, in sorted order by key."""
values = vals_sorted_by_key(mapping)
for obj in values:
mapping = False
try:
obj.items
except AttributeError:
pass
else:
mapping = True
yield from deepvalues(obj)
if not mapping:
yield obj
# Used as second parameter to dict.get() method, to distinguish absent
# dict key from one with a None value.
| DefaultCookiePolicy |
python | facebook__pyre-check | client/commands/tests/profile_test.py | {
"start": 238,
"end": 16574
} | class ____(testslide.TestCase):
def test_parse_event(self) -> None:
self.assertEqual(
profile.parse_event(
"""
{
"name": "Kara",
"worker_id": 579102694,
"pid": 400,
"event_type": [ "Duration", 11 ],
"timestamp": 42,
"tags": [["actor", "Valorie Curry"]]
}
"""
),
profile.DurationEvent(
duration=11,
metadata=profile.EventMetadata(
name="Kara",
worker_id=579102694,
pid=400,
timestamp=42,
tags={"actor": "Valorie Curry"},
),
),
)
self.assertEqual(
profile.parse_event(
"""
{
"name": "Conor",
"worker_id": 313248317,
"pid": 800,
"event_type": [ "Counter" ],
"timestamp": 43,
"tags": [["actor", "Bryan Dechart"]]
}
"""
),
profile.CounterEvent(
description=None,
metadata=profile.EventMetadata(
name="Conor",
worker_id=313248317,
pid=800,
timestamp=43,
tags={"actor": "Bryan Dechart"},
),
),
)
self.assertEqual(
profile.parse_event(
"""
{
"name": "Marcus",
"worker_id": 684842971,
"pid": 200,
"event_type": [ "Counter", "ra9" ],
"timestamp": 44
}
"""
),
profile.CounterEvent(
description="ra9",
metadata=profile.EventMetadata(
name="Marcus", worker_id=684842971, pid=200, timestamp=44, tags={}
),
),
)
with self.assertRaises(Exception):
profile.parse_event("{}")
with self.assertRaises(Exception):
profile.parse_event('{ name: "foo" }')
with self.assertRaises(Exception):
profile.parse_event('{ "name": "foo", "pid": 42, "timestamp": 100}')
with self.assertRaises(Exception):
profile.parse_event(
'{ "name": "foo", "pid": 42, "event_type": "wat", "timestamp": 100}'
)
with self.assertRaises(Exception):
profile.parse_event(
'{ "name": "foo", "pid": 42, "event_type": [ "Duration", "10" ]}'
)
def test_to_incremental_updates(self) -> None:
self.assertEqual(
profile.to_incremental_updates(
[
profile.DurationEvent(
duration=11,
metadata=profile.EventMetadata(
name="SomeUpdate",
worker_id=0,
pid=400,
timestamp=42,
tags={"phase_name": "phase1"},
),
),
profile.DurationEvent(
duration=11,
metadata=profile.EventMetadata(
name="initialization",
worker_id=1,
pid=400,
timestamp=42,
tags={},
),
),
profile.DurationEvent(
duration=11,
metadata=profile.EventMetadata(
name="SomeUpdate",
worker_id=2,
pid=400,
timestamp=42,
tags={"phase_name": "phase1"},
),
),
profile.DurationEvent(
duration=12,
metadata=profile.EventMetadata(
name="SomeUpdate",
worker_id=3,
pid=400,
timestamp=42,
tags={"phase_name": "phase2"},
),
),
profile.DurationEvent(
duration=13,
metadata=profile.EventMetadata(
name="SomeUpdate",
worker_id=0,
pid=400,
timestamp=42,
tags={"phase_name": "phase3"},
),
),
profile.DurationEvent(
duration=1,
metadata=profile.EventMetadata(
name="incremental check",
worker_id=1,
pid=400,
timestamp=42,
tags={},
),
),
profile.DurationEvent(
duration=21,
metadata=profile.EventMetadata(
name="SomeUpdate",
worker_id=2,
pid=400,
timestamp=42,
tags={"phase_name": "phase1"},
),
),
profile.DurationEvent(
duration=22,
metadata=profile.EventMetadata(
name="SomeUpdate",
worker_id=3,
pid=400,
timestamp=42,
tags={"phase_name": "phase2"},
),
),
profile.DurationEvent(
duration=2,
metadata=profile.EventMetadata(
name="incremental check",
worker_id=0,
pid=400,
timestamp=42,
tags={},
),
),
profile.DurationEvent(
duration=31,
metadata=profile.EventMetadata(
name="SomeUpdate",
worker_id=1,
pid=400,
timestamp=42,
tags={"phase_name": "phase1"},
),
),
]
),
[
{"phase1": 11, "phase2": 12, "phase3": 13, "total": 1},
{"phase1": 21, "phase2": 22, "total": 2},
],
)
def test_to_cold_start_phases(self) -> None:
self.assertEqual(
profile.to_cold_start_phases(
[
profile.DurationEvent(
duration=11,
metadata=profile.EventMetadata(
name="SomeUpdate",
worker_id=0,
pid=400,
timestamp=42,
tags={"phase_name": "phase1"},
),
),
profile.DurationEvent(
duration=14,
metadata=profile.EventMetadata(
name="SomeUpdate",
worker_id=0,
pid=400,
timestamp=42,
tags={"phase_name": "phase2"},
),
),
profile.DurationEvent(
duration=12,
metadata=profile.EventMetadata(
name="initialization",
worker_id=1,
pid=400,
timestamp=42,
tags={},
),
),
profile.DurationEvent(
duration=40,
metadata=profile.EventMetadata(
name="SomeUpdate",
worker_id=0,
pid=400,
timestamp=42,
tags={"phase_name": "phase1"},
),
),
profile.DurationEvent(
duration=50,
metadata=profile.EventMetadata(
name="SomeUpdate",
worker_id=0,
pid=400,
timestamp=42,
tags={"phase_name": "phase2"},
),
),
profile.DurationEvent(
duration=1,
metadata=profile.EventMetadata(
name="incremental check",
worker_id=1,
pid=400,
timestamp=42,
tags={},
),
),
]
),
{"phase1": 11, "phase2": 14, "total": 12},
)
def test_table_statistics(self) -> None:
statistics = profile.TableStatistics()
lines = [
"(ALL cache hit rate) stats -- samples: 183.378K, total: 143.256K, "
"avg: 0.781206, stddev: 0.413429, max: 1, min: 0)",
"ALL bytes deserialized from shared heap stats -- samples: 80.124K, "
"total: 3.501M, avg: 43.692277, stddev: 338.921118, max: 22.434K, min: 0)",
"ALL bytes saved in shared heap due to compression stats -- samples: "
"51.672K, total: 6.504M, avg: 125.869813, stddev: 1.211K, "
"max: 78.471K, min: 0)",
"ALL bytes serialized into shared heap stats -- samples: 51.672K, "
"total: 11.721M, avg: 226.833449, stddev: 1.331K, max: 89.739K, "
"min: 3)",
"ALL bytes shared heap compression ratio stats -- samples: 51.672K, "
"total: 46.811K, avg: 0.905918, stddev: 0.132285, max: 1, min: 0.200000)",
"AST (bytes deserialized from shared heap) stats -- samples: "
"1.690K, total: 1.635M, avg: 967.172781, stddev: 1.721K, max: "
"22.434K, min: 18)",
"AST (bytes saved in shared heap due to compression) stats -- "
"samples: 845, total: 2.571M, avg: 3.043K, stddev: 6.361K, max: "
"78.471K, min: 0)",
"AST (bytes serialized into shared heap) stats -- samples: 845, "
"total: 3.270M, avg: 3.870K, stddev: 6.883K, max: 89.739K, "
"min: 73)",
"AST (shared heap compression ratio) stats -- samples: 845, "
"total: 562.823112, avg: 0.666063, stddev: 0.124657, max: 1, "
"min: 0.337358)",
"Alias (bytes deserialized from shared heap) stats -- samples: "
"46.897K, total: 255.028K, avg: 5.438045, stddev: 1.665136, "
"max: 18, min: 5)",
"Alias (bytes saved in shared heap due to compression) stats -- "
"samples: 1.158K, total: 85.000000, avg: 0.073402, stddev: "
"1.005495, max: 21, min: 0)",
"Alias (bytes serialized into shared heap) stats -- samples: "
"1.158K, total: 27.826K, avg: 24.029361, stddev: 8.903096, "
"max: 73, min: 21)",
"Alias (shared heap compression ratio) stats -- samples: 1.158K, "
"total: 1.157K, avg: 0.999019, stddev: 0.012015, max: 1, min: "
"0.769231)",
"Class (bytes deserialized from shared heap) stats -- samples: "
"3.430K, total: 677.011K, avg: 197.379300, stddev: 343.207031, "
"max: 5.507K, min: 12)",
"Class (bytes saved in shared heap due to compression) stats -- "
"samples: 3.305K, total: 1.300M, avg: 393.340091, stddev: 1.101K, "
"max: 19.893K, min: 0)",
"Class (bytes serialized into shared heap) stats -- samples: "
"3.305K, total: 2.556M, avg: 773.294100, stddev: 1.331K, max: "
"22.030K, min: 51)",
"Class (cache hit rate) stats -- samples: 19.701K, total: 16.100K, "
"avg: 0.817217, stddev: 0.386488, max: 1, min: 0)",
"Class (shared heap compression ratio) stats -- samples: 3.305K, "
"total: 2.679K, avg: 0.810528, stddev: 0.129101, max: 1, min: "
"0.437840)",
]
for line in lines:
statistics.add(line + "\n")
self.assertEqual(
statistics.get_totals(),
[
("ALL", "11.721M"),
("AST", "3.270M"),
("Class", "2.556M"),
("Alias", "27.826K"),
],
)
self.assertEqual(
statistics.get_counts(),
[
("ALL", "51.672K"),
("Class", "3.305K"),
("Alias", "1.158K"),
("AST", "845"),
],
)
def test_statistics_over_time(self) -> None:
statistics = profile.StatisticsOverTime()
lines = [
"2020-04-27 20:08:35 MEMORY Shared memory size post-typecheck (size: 42)",
"2020-02-19 10:35:57 PERFORMANCE Check_TypeCheck: 1.767435s",
"2020-02-19 10:35:57 PROGRESS Postprocessing 51 sources...",
"2020-02-19 10:35:57 PROGRESS Postprocessed 51 of 51 sources",
"2020-02-19 10:35:57 MEMORY Shared memory size (size: 2105)",
"2020-02-19 10:35:57 INFO Number of new errors = 0",
"2020-02-19 10:35:57 PERFORMANCE Incremental check: 2.456214s",
"2020-02-19 10:35:57 PERFORMANCE Server request: 2.456249s",
"2020-02-19 10:35:57 PERFORMANCE Server request: 2.456372s",
"2020-02-19 10:36:06 PERFORMANCE Module tracker updated: 0.000838s",
"2020-02-19 10:36:06 INFO Parsing 9 updated modules...",
"2020-02-19 10:36:07 INFO Repopulating the environment for 9 " "modules.",
"2020-02-19 10:36:07 INFO Updating is from empty stub result "
"Environment",
"2020-02-19 10:36:07 INFO Updating Alias Environment",
"2020-02-19 10:36:07 INFO Updating Edges Environment",
"2020-02-19 10:36:07 INFO Updating Undecorated functions " "Environment",
"2020-02-19 10:36:07 INFO Updating Class metadata Environment",
"2020-02-19 10:36:07 INFO Updating parse annotation Environment",
"2020-02-19 10:36:07 INFO Updating attributes Environment",
"2020-02-19 10:36:07 INFO Updating Global Environment",
"2020-02-19 10:36:07 INFO Updating Global Locations Environment",
"2020-02-19 10:36:07 INFO Checking 295 functions...",
"2020-02-19 10:36:09 PROGRESS Processed 295 of 295 functions",
"2020-02-19 10:36:09 PERFORMANCE Check_TypeCheck: 2.156352s",
"2020-02-19 10:36:09 PROGRESS Postprocessing 23 sources...",
"2020-02-19 10:36:09 PROGRESS Postprocessed 23 of 23 sources",
"2020-02-19 10:36:09 MEMORY Shared memory size (size: 2106)",
"2020-02-19 10:36:09 INFO Number of new errors = 0",
]
for line in lines:
statistics.add(line + "\n")
self.assertEqual(
statistics._data,
[
("2020-04-27 20:08:35", 42000000),
("2020-02-19 10:35:57", 2105000000),
("2020-02-19 10:36:09", 2106000000),
],
)
| ProfileTest |
python | kamyu104__LeetCode-Solutions | Python/maximum-area-rectangle-with-point-constraints-ii.py | {
"start": 66,
"end": 1611
} | class ____(object):
def maxRectangleArea(self, xCoord, yCoord):
"""
:type xCoord: List[int]
:type yCoord: List[int]
:rtype: int
"""
class BIT(object): # 0-indexed.
def __init__(self, n):
self.__bit = [0]*(n+1) # Extra one for dummy node.
def add(self, i, val):
i += 1 # Extra one for dummy node.
while i < len(self.__bit):
self.__bit[i] += val
i += (i & -i)
def query(self, i):
i += 1 # Extra one for dummy node.
ret = 0
while i > 0:
ret += self.__bit[i]
i -= (i & -i)
return ret
points = sorted((xCoord[i], yCoord[i]) for i in xrange(len(xCoord)))
y_to_idx = {y:idx for idx, y in enumerate(sorted(set(yCoord)))}
bit = BIT(len(y_to_idx))
lookup = {}
result = -1
for i, (x, y) in enumerate(points):
y_idx = y_to_idx[y]
bit.add(y_idx, +1)
if not (i-1 >= 0 and points[i-1][0] == x):
continue
prev_y_idx = y_to_idx[points[i-1][1]]
curr = bit.query(y_idx)-bit.query(prev_y_idx-1)
if (prev_y_idx, y_idx) in lookup and lookup[prev_y_idx, y_idx][0] == curr-2:
result = max(result, (x-lookup[prev_y_idx, y_idx][1])*(y-points[i-1][1]))
lookup[prev_y_idx, y_idx] = (curr, x)
return result
| Solution |
python | scrapy__scrapy | tests/test_utils_asyncgen.py | {
"start": 127,
"end": 552
} | class ____:
@deferred_f_from_coro_f
async def test_as_async_generator(self):
ag = as_async_generator(range(42))
results = [i async for i in ag]
assert results == list(range(42))
@deferred_f_from_coro_f
async def test_collect_asyncgen(self):
ag = as_async_generator(range(42))
results = await collect_asyncgen(ag)
assert results == list(range(42))
| TestAsyncgenUtils |
python | cython__cython | Cython/Compiler/Tests/TestCode.py | {
"start": 2290,
"end": 4273
} | class ____(TestCase):
def _process(self, code):
utility_code = UtilityCode()
formatted_code, is_module_specific = process_utility_ccode(utility_code, None, code)
self.assertFalse(is_module_specific) # cannot currently test this case
return formatted_code
def assert_formatted_code(self, code: str, expected: str, dedent=False):
if dedent:
expected = textwrap.dedent(expected)
expected = expected.strip() + '\n\n'
formatted = self._process(code)
self.assertEqual(formatted, expected)
def test_format_cstring(self):
self.assert_formatted_code('''
Some Text and a CSTRING("""
spanning "multiple" 'lines'.
Really.
"""); # end of C string
''',
expected=r'''
Some Text and a "\n"
" spanning \042multiple\042 'lines'.\n"
" Really.\n"
" \n"
; # end of C string
''',
dedent=True)
def test_cglobal(self):
self.assert_formatted_code("""
CGLOBAL(name)
NAMED_CGLOBAL(empty_tuple)
""",
expected=f"""
{Naming.modulestateglobal_cname}->name
{Naming.modulestateglobal_cname}->{Naming.empty_tuple}
""")
def test_empty_builtin(self):
self.assert_formatted_code("""
EMPTY(tuple)EMPTY(bytes)
EMPTY(tuple);EMPTY(bytes)
EMPTY(unicode)
EMPTY(bytes)
EMPTY(tuple)
""",
expected=f"""
{Naming.modulestateglobal_cname}->{Naming.empty_tuple}{Naming.modulestateglobal_cname}->{Naming.empty_bytes}
{Naming.modulestateglobal_cname}->{Naming.empty_tuple};{Naming.modulestateglobal_cname}->{Naming.empty_bytes}
{Naming.modulestateglobal_cname}->{Naming.empty_unicode}
{Naming.modulestateglobal_cname}->{Naming.empty_bytes}
{Naming.modulestateglobal_cname}->{Naming.empty_tuple}
""")
| TestUtilityCodeProcessing |
python | coleifer__peewee | tests/keys.py | {
"start": 1312,
"end": 1486
} | class ____(TestModel):
thing = CharField()
user = ForeignKeyField(User, backref='things')
class Meta:
primary_key = CompositeKey('thing', 'user')
| UserThing |
python | pytorch__pytorch | torch/_library/opaque_object.py | {
"start": 173,
"end": 6542
} | class ____:
def __init__(self) -> None:
pass
@classmethod
def __obj_unflatten__(cls, flattened_ctx: dict[str, Any]) -> None:
raise RuntimeError(
"FakeOpaqueObject should not be created through __obj_unflatten__ "
"and should be special handled. Please file an issue to Github."
)
OpaqueTypeStr = "__torch__.torch.classes.aten.OpaqueObject"
OpaqueType = NewType("OpaqueType", torch._C.ScriptObject)
def make_opaque(payload: Any = None) -> torch._C.ScriptObject:
"""
Creates an opaque object which stores the given Python object.
This opaque object can be passed to any custom operator as an argument.
The Python object can then be accessed from the opaque object using the `get_payload()` API.
The opaque object has `._type()`
"__torch__.torch.classes.aten.OpaqueObject", which should be the type used
when creating custom operator schemas.
Args:
payload (Any): The Python object to store in the opaque object. This can
be empty, and can be set with `set_payload()` later.
Returns:
torch._C.ScriptObject: The opaque object that stores the given Python object.
Example:
>>> import random
>>> import torch
>>> from torch._library.opaque_object import (
... make_opaque,
... get_payload,
... set_payload,
... )
>>>
>>> class RNGState:
>>> def __init__(self, seed):
>>> self.rng = random.Random(seed)
>>>
>>> rng = RNGState(0)
>>> obj = make_opaque()
>>> set_payload(obj, rng)
>>>
>>> assert get_payload(obj) == rng
>>>
>>> lib = torch.library.Library("mylib", "FRAGMENT")
>>>
>>> torch.library.define(
>>> "mylib::noisy_inject",
>>> "(Tensor x, __torch__.torch.classes.aten.OpaqueObject obj) -> Tensor",
>>> tags=torch.Tag.pt2_compliant_tag,
>>> lib=lib,
>>> )
>>>
>>> @torch.library.impl(
>>> "mylib::noisy_inject", "CompositeExplicitAutograd", lib=lib
>>> )
>>> def noisy_inject(x: torch.Tensor, obj: torch._C.ScriptObject) -> torch.Tensor:
>>> rng_state = get_payload(obj)
>>> assert isinstance(rng_state, RNGState)
>>> out = x.clone()
>>> for i in range(out.numel()):
>>> out.view(-1)[i] += rng_state.rng.random()
>>> return out
>>>
>>> print(torch.ops.mylib.noisy_inject(torch.ones(3), obj))
"""
return torch._C._make_opaque_object(payload)
def get_payload(opaque_object: torch._C.ScriptObject) -> Any:
"""
Retrieves the Python object stored in the given opaque object.
Args:
torch._C.ScriptObject: The opaque object that stores the given Python object.
Returns:
payload (Any): The Python object stored in the opaque object. This can
be set with `set_payload()`.
"""
if isinstance(opaque_object, FakeScriptObject):
raise ValueError(
"get_payload: this function was called with a FakeScriptObject "
"implying that you are calling get_payload inside of a fake kernel."
"The fake kernel should not depend on the contents of the "
"OpaqueObject at all, so we're erroring out. If you need this"
"functionality, consider creating a custom TorchBind Object instead"
"(but note that this is more difficult)."
)
if not (
isinstance(opaque_object, torch._C.ScriptObject)
and opaque_object._type().qualified_name() == OpaqueTypeStr
):
type_ = (
opaque_object._type().qualified_name()
if isinstance(opaque_object, torch._C.ScriptObject)
else type(opaque_object)
)
raise ValueError(
f"Tried to get the payload from a non-OpaqueObject of type `{type_}`"
)
return torch._C._get_opaque_object_payload(opaque_object)
def set_payload(opaque_object: torch._C.ScriptObject, payload: Any) -> None:
"""
Sets the Python object stored in the given opaque object.
Args:
torch._C.ScriptObject: The opaque object that stores the given Python object.
payload (Any): The Python object to store in the opaque object.
"""
if isinstance(opaque_object, FakeScriptObject):
raise ValueError(
"set_payload: this function was called with a FakeScriptObject "
"implying that you are calling get_payload inside of a fake kernel."
"The fake kernel should not depend on the contents of the "
"OpaqueObject at all, so we're erroring out. If you need this"
"functionality, consider creating a custom TorchBind Object instead"
"(but note that this is more difficult)."
)
if not (
isinstance(opaque_object, torch._C.ScriptObject)
and opaque_object._type().qualified_name() == OpaqueTypeStr
):
type_ = (
opaque_object._type().qualified_name()
if isinstance(opaque_object, torch._C.ScriptObject)
else type(opaque_object)
)
raise ValueError(
f"Tried to get the payload from a non-OpaqueObject of type `{type_}`"
)
torch._C._set_opaque_object_payload(opaque_object, payload)
_OPAQUE_TYPES: dict[Any, str] = {}
def register_opaque_type(cls: Any, name: Optional[str] = None) -> None:
"""
Registers the given type as an opaque type which allows this to be consumed
by a custom operator.
Args:
cls (type): The class to register as an opaque type.
name (str): A unique qualified name of the type.
"""
if name is None:
name = cls.__name__
if "." in name:
# The schema_type_parser will break up types with periods
raise ValueError(
f"Unable to accept name, {name}, for this opaque type as it contains a '.'"
)
_OPAQUE_TYPES[cls] = name
torch._C._register_opaque_type(name)
def is_opaque_type(cls: Any) -> bool:
"""
Checks if the given type is an opaque type.
"""
if cls not in _OPAQUE_TYPES:
return False
return torch._C._is_opaque_type_registered(_OPAQUE_TYPES[cls])
| FakeOpaqueObject |
python | doocs__leetcode | solution/1100-1199/1184.Distance Between Bus Stops/Solution.py | {
"start": 0,
"end": 319
} | class ____:
def distanceBetweenBusStops(
self, distance: List[int], start: int, destination: int
) -> int:
s = sum(distance)
t, n = 0, len(distance)
while start != destination:
t += distance[start]
start = (start + 1) % n
return min(t, s - t)
| Solution |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/constructorCallable2.py | {
"start": 696,
"end": 935
} | class ____:
def __new__(cls, *args, **kwargs) -> Self: ...
def __init__(self, x: int) -> None: ...
r3 = accepts_callable(Class3)
reveal_type(r3, expected_text="(x: int) -> Class3")
reveal_type(r3(3), expected_text="Class3")
| Class3 |
python | apache__airflow | dev/breeze/src/airflow_breeze/utils/custom_param_types.py | {
"start": 4638,
"end": 4730
} | class ____:
value: Any
def __repr__(self):
return self.value
| CacheableDefault |
python | django-guardian__django-guardian | example_project/articles/views.py | {
"start": 1259,
"end": 1450
} | class ____(PermissionRequiredMixin, DeleteView):
model = Article
success_url = reverse_lazy("articles:list")
permission_required = ["view_article", "delete_article"]
| ArticleDeleteView |
python | scikit-learn__scikit-learn | sklearn/neighbors/_base.py | {
"start": 38763,
"end": 52382
} | class ____:
"""Mixin for radius-based neighbors searches."""
def _radius_neighbors_reduce_func(self, dist, start, radius, return_distance):
"""Reduce a chunk of distances to the nearest neighbors.
Callback to :func:`sklearn.metrics.pairwise.pairwise_distances_chunked`
Parameters
----------
dist : ndarray of shape (n_samples_chunk, n_samples)
The distance matrix.
start : int
The index in X which the first row of dist corresponds to.
radius : float
The radius considered when making the nearest neighbors search.
return_distance : bool
Whether or not to return the distances.
Returns
-------
dist : list of ndarray of shape (n_samples_chunk,)
Returned only if `return_distance=True`.
neigh : list of ndarray of shape (n_samples_chunk,)
The neighbors indices.
"""
neigh_ind = [np.where(d <= radius)[0] for d in dist]
if return_distance:
if self.effective_metric_ == "euclidean":
dist = [np.sqrt(d[neigh_ind[i]]) for i, d in enumerate(dist)]
else:
dist = [d[neigh_ind[i]] for i, d in enumerate(dist)]
results = dist, neigh_ind
else:
results = neigh_ind
return results
def radius_neighbors(
self, X=None, radius=None, return_distance=True, sort_results=False
):
"""Find the neighbors within a given radius of a point or points.
Return the indices and distances of each point from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
Parameters
----------
X : {array-like, sparse matrix} of (n_samples, n_features), default=None
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float, default=None
Limiting distance of neighbors to return. The default is the value
passed to the constructor.
return_distance : bool, default=True
Whether or not to return the distances.
sort_results : bool, default=False
If True, the distances and indices will be sorted by increasing
distances before being returned. If False, the results may not
be sorted. If `return_distance=False`, setting `sort_results=True`
will result in an error.
.. versionadded:: 0.22
Returns
-------
neigh_dist : ndarray of shape (n_samples,) of arrays
Array representing the distances to each point, only present if
`return_distance=True`. The distance values are computed according
to the ``metric`` constructor parameter.
neigh_ind : ndarray of shape (n_samples,) of arrays
An array of arrays of indices of the approximate nearest points
from the population matrix that lie within a ball of size
``radius`` around the query points.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1, 1, 1]:
>>> import numpy as np
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples)
NearestNeighbors(radius=1.6)
>>> rng = neigh.radius_neighbors([[1., 1., 1.]])
>>> print(np.asarray(rng[0][0]))
[1.5 0.5]
>>> print(np.asarray(rng[1][0]))
[1 2]
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
"""
check_is_fitted(self)
if sort_results and not return_distance:
raise ValueError("return_distance must be True if sort_results is True.")
ensure_all_finite = "allow-nan" if get_tags(self).input_tags.allow_nan else True
query_is_train = X is None
if query_is_train:
X = self._fit_X
else:
if self.metric == "precomputed":
X = _check_precomputed(X)
else:
X = validate_data(
self,
X,
ensure_all_finite=ensure_all_finite,
accept_sparse="csr",
reset=False,
order="C",
)
if radius is None:
radius = self.radius
use_pairwise_distances_reductions = (
self._fit_method == "brute"
and RadiusNeighbors.is_usable_for(
X if X is not None else self._fit_X, self._fit_X, self.effective_metric_
)
)
if use_pairwise_distances_reductions:
results = RadiusNeighbors.compute(
X=X,
Y=self._fit_X,
radius=radius,
metric=self.effective_metric_,
metric_kwargs=self.effective_metric_params_,
strategy="auto",
return_distance=return_distance,
sort_results=sort_results,
)
elif (
self._fit_method == "brute" and self.metric == "precomputed" and issparse(X)
):
results = _radius_neighbors_from_graph(
X, radius=radius, return_distance=return_distance
)
elif self._fit_method == "brute":
# Joblib-based backend, which is used when user-defined callable
# are passed for metric.
# This won't be used in the future once PairwiseDistancesReductions
# support:
# - DistanceMetrics which work on supposedly binary data
# - CSR-dense and dense-CSR case if 'euclidean' in metric.
# for efficiency, use squared euclidean distances
if self.effective_metric_ == "euclidean":
radius *= radius
kwds = {"squared": True}
else:
kwds = self.effective_metric_params_
reduce_func = partial(
self._radius_neighbors_reduce_func,
radius=radius,
return_distance=return_distance,
)
chunked_results = pairwise_distances_chunked(
X,
self._fit_X,
reduce_func=reduce_func,
metric=self.effective_metric_,
n_jobs=self.n_jobs,
**kwds,
)
if return_distance:
neigh_dist_chunks, neigh_ind_chunks = zip(*chunked_results)
neigh_dist_list = list(itertools.chain.from_iterable(neigh_dist_chunks))
neigh_ind_list = list(itertools.chain.from_iterable(neigh_ind_chunks))
neigh_dist = _to_object_array(neigh_dist_list)
neigh_ind = _to_object_array(neigh_ind_list)
results = neigh_dist, neigh_ind
else:
neigh_ind_list = list(itertools.chain.from_iterable(chunked_results))
results = _to_object_array(neigh_ind_list)
if sort_results:
for ii in range(len(neigh_dist)):
order = np.argsort(neigh_dist[ii], kind="mergesort")
neigh_ind[ii] = neigh_ind[ii][order]
neigh_dist[ii] = neigh_dist[ii][order]
results = neigh_dist, neigh_ind
elif self._fit_method in ["ball_tree", "kd_tree"]:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method
)
n_jobs = effective_n_jobs(self.n_jobs)
delayed_query = delayed(self._tree.query_radius)
chunked_results = Parallel(n_jobs, prefer="threads")(
delayed_query(X[s], radius, return_distance, sort_results=sort_results)
for s in gen_even_slices(X.shape[0], n_jobs)
)
if return_distance:
neigh_ind, neigh_dist = tuple(zip(*chunked_results))
results = np.hstack(neigh_dist), np.hstack(neigh_ind)
else:
results = np.hstack(chunked_results)
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return results
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
neigh_dist, neigh_ind = results
else:
neigh_ind = results
for ind, ind_neighbor in enumerate(neigh_ind):
mask = ind_neighbor != ind
neigh_ind[ind] = ind_neighbor[mask]
if return_distance:
neigh_dist[ind] = neigh_dist[ind][mask]
if return_distance:
return neigh_dist, neigh_ind
return neigh_ind
def radius_neighbors_graph(
self, X=None, radius=None, mode="connectivity", sort_results=False
):
"""Compute the (weighted) graph of Neighbors for points in X.
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), default=None
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float, default=None
Radius of neighborhoods. The default is the value passed to the
constructor.
mode : {'connectivity', 'distance'}, default='connectivity'
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are distances between points, type of distance
depends on the selected metric parameter in
NearestNeighbors class.
sort_results : bool, default=False
If True, in each row of the result, the non-zero entries will be
sorted by increasing distances. If False, the non-zero entries may
not be sorted. Only used with mode='distance'.
.. versionadded:: 0.22
Returns
-------
A : sparse-matrix of shape (n_queries, n_samples_fit)
`n_samples_fit` is the number of samples in the fitted data.
`A[i, j]` gives the weight of the edge connecting `i` to `j`.
The matrix is of CSR format.
See Also
--------
kneighbors_graph : Compute the (weighted) graph of k-Neighbors for
points in X.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X)
NearestNeighbors(radius=1.5)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[1., 0., 1.],
[0., 1., 0.],
[1., 0., 1.]])
"""
check_is_fitted(self)
# check the input only in self.radius_neighbors
if radius is None:
radius = self.radius
# construct CSR matrix representation of the NN graph
if mode == "connectivity":
A_ind = self.radius_neighbors(X, radius, return_distance=False)
A_data = None
elif mode == "distance":
dist, A_ind = self.radius_neighbors(
X, radius, return_distance=True, sort_results=sort_results
)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
f'or "distance" but got "{mode}" instead'
)
n_queries = A_ind.shape[0]
n_samples_fit = self.n_samples_fit_
n_neighbors = np.array([len(a) for a in A_ind])
A_ind = np.concatenate(list(A_ind))
if A_data is None:
A_data = np.ones(len(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int), np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr), shape=(n_queries, n_samples_fit))
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.allow_nan = self.metric == "nan_euclidean"
return tags
| RadiusNeighborsMixin |
python | walkccc__LeetCode | solutions/3317. Find the Number of Possible Ways for an Event/3317.py | {
"start": 0,
"end": 1178
} | class ____:
def numberOfWays(self, n: int, x: int, y: int) -> int:
MOD = 1_000_000_007
@functools.lru_cache(None)
def fact(i: int) -> int:
return 1 if i <= 1 else i * fact(i - 1) % MOD
@functools.lru_cache(None)
def inv(i: int) -> int:
return pow(i, MOD - 2, MOD)
@functools.lru_cache(None)
def nCk(n: int, k: int) -> int:
return fact(n) * inv(fact(k)) * inv(fact(n - k)) % MOD
@functools.lru_cache(None)
def stirling(n: int, k: int) -> int:
"""
Returns the number of ways to partition a set of n objects into k
non-empty subsets.
https://en.wikipedia.org/wiki/Stirling_numbers_of_the_second_kind
"""
if k == 0 or n < k:
return 0
if k == 1 or n == k:
return 1
return (k * stirling(n - 1, k) + stirling(n - 1, k - 1)) % MOD
# 1. Choose `k` stages from `x` stages.
# 2. Partition `n` performers into `k` stages.
# 3. Permute `k` stages.
# 4. Score `k` stages with score in the range [1, y], so y^k ways.
return sum(nCk(x, k) * stirling(n, k) * fact(k) * pow(y, k, MOD) % MOD
for k in range(1, min(n, x) + 1)) % MOD
| Solution |
python | redis__redis-py | redis/asyncio/connection.py | {
"start": 34809,
"end": 36884
} | class ____(AbstractConnection):
"Manages UDS communication to and from a Redis server"
def __init__(self, *, path: str = "", **kwargs):
self.path = path
super().__init__(**kwargs)
def repr_pieces(self) -> Iterable[Tuple[str, Union[str, int]]]:
pieces = [("path", self.path), ("db", self.db)]
if self.client_name:
pieces.append(("client_name", self.client_name))
return pieces
async def _connect(self):
async with async_timeout(self.socket_connect_timeout):
reader, writer = await asyncio.open_unix_connection(path=self.path)
self._reader = reader
self._writer = writer
await self.on_connect()
def _host_error(self) -> str:
return self.path
FALSE_STRINGS = ("0", "F", "FALSE", "N", "NO")
def to_bool(value) -> Optional[bool]:
if value is None or value == "":
return None
if isinstance(value, str) and value.upper() in FALSE_STRINGS:
return False
return bool(value)
def parse_ssl_verify_flags(value):
# flags are passed in as a string representation of a list,
# e.g. VERIFY_X509_STRICT, VERIFY_X509_PARTIAL_CHAIN
verify_flags_str = value.replace("[", "").replace("]", "")
verify_flags = []
for flag in verify_flags_str.split(","):
flag = flag.strip()
if not hasattr(VerifyFlags, flag):
raise ValueError(f"Invalid ssl verify flag: {flag}")
verify_flags.append(getattr(VerifyFlags, flag))
return verify_flags
URL_QUERY_ARGUMENT_PARSERS: Mapping[str, Callable[..., object]] = MappingProxyType(
{
"db": int,
"socket_timeout": float,
"socket_connect_timeout": float,
"socket_keepalive": to_bool,
"retry_on_timeout": to_bool,
"max_connections": int,
"health_check_interval": int,
"ssl_check_hostname": to_bool,
"ssl_include_verify_flags": parse_ssl_verify_flags,
"ssl_exclude_verify_flags": parse_ssl_verify_flags,
"timeout": float,
}
)
| UnixDomainSocketConnection |
python | ray-project__ray | rllib/core/rl_module/torch/tests/test_torch_rl_module.py | {
"start": 3732,
"end": 5445
} | class ____(unittest.TestCase):
@unittest.skipIf(not _dynamo_is_available(), "torch._dynamo not available")
def test_torch_compile_no_memory_leak_gpu(self):
assert torch.cuda.is_available()
def get_memory_usage_cuda():
torch.cuda.empty_cache()
return torch.cuda.memory_allocated()
compile_cfg = TorchCompileConfig()
env = gym.make("CartPole-v1")
memory_before_create = get_memory_usage_cuda()
torch_rl_module = VPGTorchRLModule(
observation_space=env.observation_space,
action_space=env.action_space,
model_config={"hidden_dim": 32},
)
torch_rl_module.cuda()
torch_rl_module.compile(compile_cfg)
memory_after_create = get_memory_usage_cuda()
memory_diff_create = memory_after_create - memory_before_create
print("memory_diff_create: ", memory_diff_create)
# Sanity check that we actually allocated memory.
assert memory_diff_create > 0
del torch_rl_module
gc.collect()
memory_after_delete = get_memory_usage_cuda()
memory_diff_delete = memory_after_delete - memory_after_create
print("memory_diff_delete: ", memory_diff_delete)
# Memory should be released after deleting the module.
check(memory_before_create, memory_after_delete)
if __name__ == "__main__":
import sys
import pytest
# One can specify the specific TestCase class to run.
# None for all unittest.TestCase classes in this file.
class_ = sys.argv[1] if len(sys.argv) > 1 else None
sys.exit(pytest.main(["-v", __file__ + ("" if class_ is None else "::" + class_)]))
| TestRLModuleGPU |
python | pallets__werkzeug | examples/simplewiki/database.py | {
"start": 2909,
"end": 3846
} | class ____(Page, Revision):
"""
Represents a wiki page with a revision. Thanks to multiple inheritance
and the ability of SQLAlchemy to map to joins we can combine `Page` and
`Revision` into one class here.
"""
query = session.query_property()
def __init__(self):
raise TypeError(
"cannot create WikiPage instances, use the Page and "
"Revision classes for data manipulation."
)
def __repr__(self):
return f"<{type(self).__name__} {self.name!r}:{self.revision_id!r}>"
# setup mappers
mapper(Revision, revision_table)
mapper(
Page,
page_table,
properties=dict(
revisions=relation(
Revision, backref="page", order_by=Revision.revision_id.desc()
)
),
)
mapper(
RevisionedPage,
join(page_table, revision_table),
properties=dict(page_id=[page_table.c.page_id, revision_table.c.page_id]),
)
| RevisionedPage |
python | gevent__gevent | src/gevent/tests/test__queue.py | {
"start": 15948,
"end": 16035
} | class ____(TestGetInterrupt):
kind = queue.PriorityQueue
| TestGetInterruptPriorityQueue |
python | pytorch__pytorch | torch/testing/_internal/common_quantization.py | {
"start": 99089,
"end": 100144
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
norm_layer = nn.BatchNorm2d
inplanes = 3
self.conv1 = nn.Conv2d(inplanes, inplanes, (1, 1), bias=False)
self.conv2 = nn.Conv2d(inplanes, inplanes, (1, 1), bias=False)
self.bn1 = norm_layer(inplanes)
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
self.skip_add = nn.quantized.FloatFunctional()
self.cat = nn.quantized.FloatFunctional()
self.maxpool = nn.MaxPool2d((4, 4))
self.fc = nn.Linear(12, 6)
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
skip = self.conv2(x)
out = self.skip_add.add(out, skip)
out = self.relu2(out)
out = self.maxpool(out)
out = self.conv2(out)
out = torch.nn.functional.max_pool2d(out, 2, 2)
out = self.cat.cat([out, out])
out = out.reshape(-1, 3 * 2 * 2)
out = self.fc(out)
return out
| ModelMultipleOpsNoAvgPool |
python | kamyu104__LeetCode-Solutions | Python/choose-numbers-from-two-arrays-in-range.py | {
"start": 116,
"end": 741
} | class ____(object):
def countSubranges(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: int
"""
MOD = 10**9+7
result = 0
dp = collections.Counter()
for x, y in itertools.izip(nums1, nums2):
new_dp = collections.Counter()
new_dp[x] += 1
new_dp[-y] += 1
for v, c in dp.iteritems():
new_dp[v+x] = (new_dp[v+x]+c)%MOD
new_dp[v-y] = (new_dp[v-y]+c)%MOD
dp = new_dp
result = (result+dp[0])%MOD
return result
| Solution |
python | pytorch__pytorch | test/distributed/elastic/utils/logging_test.py | {
"start": 416,
"end": 1071
} | class ____(TestCase):
def setUp(self):
super().setUp()
self.clazz_log = logging.get_logger()
def test_logger_name(self):
local_log = logging.get_logger()
name_override_log = logging.get_logger("foobar")
self.assertEqual(__name__, log.name)
self.assertEqual(__name__, self.clazz_log.name)
self.assertEqual(__name__, local_log.name)
self.assertEqual("foobar", name_override_log.name)
def test_derive_module_name(self):
module_name = logging._derive_module_name(depth=1)
self.assertEqual(__name__, module_name)
if __name__ == "__main__":
run_tests()
| LoggingTest |
python | tox-dev__tox | src/tox/session/env_select.py | {
"start": 4958,
"end": 5435
} | class ____:
"""tox environment information."""
env: PackageToxEnv | RunToxEnv #: the tox environment
is_active: bool #: a flag indicating if the environment is marked as active in the current run
package_skip: tuple[str, Skip] | None = None #: if set the creation of the packaging environment failed
_DYNAMIC_ENV_FACTORS = re.compile(r"(pypy|py|cython|)(((\d(\.\d+(\.\d+)?)?)|\d+)t?)?")
_PY_PRE_RELEASE_FACTOR = re.compile(r"alpha|beta|rc\.\d+")
| _ToxEnvInfo |
python | facebook__pyre-check | tools/upgrade/commands/command.py | {
"start": 1553,
"end": 1817
} | class ____:
def __init__(self, repository: Repository) -> None:
self._repository: Repository = repository
@classmethod
def add_arguments(cls, parser: argparse.ArgumentParser) -> None:
pass
def run(self) -> None:
pass
| Command |
python | mwaskom__seaborn | seaborn/palettes.py | {
"start": 2246,
"end": 27857
} | class ____(list):
"""Set the color palette in a with statement, otherwise be a list."""
def __enter__(self):
"""Open the context."""
from .rcmod import set_palette
self._orig_palette = color_palette()
set_palette(self)
return self
def __exit__(self, *args):
"""Close the context."""
from .rcmod import set_palette
set_palette(self._orig_palette)
def as_hex(self):
"""Return a color palette with hex codes instead of RGB values."""
hex = [mpl.colors.rgb2hex(rgb) for rgb in self]
return _ColorPalette(hex)
def _repr_html_(self):
"""Rich display of the color palette in an HTML frontend."""
s = 55
n = len(self)
html = f'<svg width="{n * s}" height="{s}">'
for i, c in enumerate(self.as_hex()):
html += (
f'<rect x="{i * s}" y="0" width="{s}" height="{s}" style="fill:{c};'
'stroke-width:2;stroke:rgb(255,255,255)"/>'
)
html += '</svg>'
return html
def _patch_colormap_display():
"""Simplify the rich display of matplotlib color maps in a notebook."""
def _repr_png_(self):
"""Generate a PNG representation of the Colormap."""
import io
from PIL import Image
import numpy as np
IMAGE_SIZE = (400, 50)
X = np.tile(np.linspace(0, 1, IMAGE_SIZE[0]), (IMAGE_SIZE[1], 1))
pixels = self(X, bytes=True)
png_bytes = io.BytesIO()
Image.fromarray(pixels).save(png_bytes, format='png')
return png_bytes.getvalue()
def _repr_html_(self):
"""Generate an HTML representation of the Colormap."""
import base64
png_bytes = self._repr_png_()
png_base64 = base64.b64encode(png_bytes).decode('ascii')
return ('<img '
+ 'alt="' + self.name + ' color map" '
+ 'title="' + self.name + '"'
+ 'src="data:image/png;base64,' + png_base64 + '">')
mpl.colors.Colormap._repr_png_ = _repr_png_
mpl.colors.Colormap._repr_html_ = _repr_html_
def color_palette(palette=None, n_colors=None, desat=None, as_cmap=False):
"""Return a list of colors or continuous colormap defining a palette.
Possible ``palette`` values include:
- Name of a seaborn palette (deep, muted, bright, pastel, dark, colorblind)
- Name of matplotlib colormap
- 'husl' or 'hls'
- 'ch:<cubehelix arguments>'
- 'light:<color>', 'dark:<color>', 'blend:<color>,<color>',
- A sequence of colors in any format matplotlib accepts
Calling this function with ``palette=None`` will return the current
matplotlib color cycle.
This function can also be used in a ``with`` statement to temporarily
set the color cycle for a plot or set of plots.
See the :ref:`tutorial <palette_tutorial>` for more information.
Parameters
----------
palette : None, string, or sequence, optional
Name of palette or None to return current palette. If a sequence, input
colors are used but possibly cycled and desaturated.
n_colors : int, optional
Number of colors in the palette. If ``None``, the default will depend
on how ``palette`` is specified. Named palettes default to 6 colors,
but grabbing the current palette or passing in a list of colors will
not change the number of colors unless this is specified. Asking for
more colors than exist in the palette will cause it to cycle. Ignored
when ``as_cmap`` is True.
desat : float, optional
Proportion to desaturate each color by.
as_cmap : bool
If True, return a :class:`matplotlib.colors.ListedColormap`.
Returns
-------
list of RGB tuples or :class:`matplotlib.colors.ListedColormap`
See Also
--------
set_palette : Set the default color cycle for all plots.
set_color_codes : Reassign color codes like ``"b"``, ``"g"``, etc. to
colors from one of the seaborn palettes.
Examples
--------
.. include:: ../docstrings/color_palette.rst
"""
if palette is None:
palette = get_color_cycle()
if n_colors is None:
n_colors = len(palette)
elif not isinstance(palette, str):
palette = palette
if n_colors is None:
n_colors = len(palette)
else:
if n_colors is None:
# Use all colors in a qualitative palette or 6 of another kind
n_colors = QUAL_PALETTE_SIZES.get(palette, 6)
if palette in SEABORN_PALETTES:
# Named "seaborn variant" of matplotlib default color cycle
palette = SEABORN_PALETTES[palette]
elif palette == "hls":
# Evenly spaced colors in cylindrical RGB space
palette = hls_palette(n_colors, as_cmap=as_cmap)
elif palette == "husl":
# Evenly spaced colors in cylindrical Lab space
palette = husl_palette(n_colors, as_cmap=as_cmap)
elif palette.lower() == "jet":
# Paternalism
raise ValueError("No.")
elif palette.startswith("ch:"):
# Cubehelix palette with params specified in string
args, kwargs = _parse_cubehelix_args(palette)
palette = cubehelix_palette(n_colors, *args, **kwargs, as_cmap=as_cmap)
elif palette.startswith("light:"):
# light palette to color specified in string
_, color = palette.split(":")
reverse = color.endswith("_r")
if reverse:
color = color[:-2]
palette = light_palette(color, n_colors, reverse=reverse, as_cmap=as_cmap)
elif palette.startswith("dark:"):
# light palette to color specified in string
_, color = palette.split(":")
reverse = color.endswith("_r")
if reverse:
color = color[:-2]
palette = dark_palette(color, n_colors, reverse=reverse, as_cmap=as_cmap)
elif palette.startswith("blend:"):
# blend palette between colors specified in string
_, colors = palette.split(":")
colors = colors.split(",")
palette = blend_palette(colors, n_colors, as_cmap=as_cmap)
else:
try:
# Perhaps a named matplotlib colormap?
palette = mpl_palette(palette, n_colors, as_cmap=as_cmap)
except (ValueError, KeyError): # Error class changed in mpl36
raise ValueError(f"{palette!r} is not a valid palette name")
if desat is not None:
palette = [desaturate(c, desat) for c in palette]
if not as_cmap:
# Always return as many colors as we asked for
pal_cycle = cycle(palette)
palette = [next(pal_cycle) for _ in range(n_colors)]
# Always return in r, g, b tuple format
try:
palette = map(mpl.colors.colorConverter.to_rgb, palette)
palette = _ColorPalette(palette)
except ValueError:
raise ValueError(f"Could not generate a palette for {palette}")
return palette
def hls_palette(n_colors=6, h=.01, l=.6, s=.65, as_cmap=False): # noqa
"""
Return hues with constant lightness and saturation in the HLS system.
The hues are evenly sampled along a circular path. The resulting palette will be
appropriate for categorical or cyclical data.
The `h`, `l`, and `s` values should be between 0 and 1.
.. note::
While the separation of the resulting colors will be mathematically
constant, the HLS system does not construct a perceptually-uniform space,
so their apparent intensity will vary.
Parameters
----------
n_colors : int
Number of colors in the palette.
h : float
The value of the first hue.
l : float
The lightness value.
s : float
The saturation intensity.
as_cmap : bool
If True, return a matplotlib colormap object.
Returns
-------
palette
list of RGB tuples or :class:`matplotlib.colors.ListedColormap`
See Also
--------
husl_palette : Make a palette using evenly spaced hues in the HUSL system.
Examples
--------
.. include:: ../docstrings/hls_palette.rst
"""
if as_cmap:
n_colors = 256
hues = np.linspace(0, 1, int(n_colors) + 1)[:-1]
hues += h
hues %= 1
hues -= hues.astype(int)
palette = [colorsys.hls_to_rgb(h_i, l, s) for h_i in hues]
if as_cmap:
return mpl.colors.ListedColormap(palette, "hls")
else:
return _ColorPalette(palette)
def husl_palette(n_colors=6, h=.01, s=.9, l=.65, as_cmap=False): # noqa
"""
Return hues with constant lightness and saturation in the HUSL system.
The hues are evenly sampled along a circular path. The resulting palette will be
appropriate for categorical or cyclical data.
The `h`, `l`, and `s` values should be between 0 and 1.
This function is similar to :func:`hls_palette`, but it uses a nonlinear color
space that is more perceptually uniform.
Parameters
----------
n_colors : int
Number of colors in the palette.
h : float
The value of the first hue.
l : float
The lightness value.
s : float
The saturation intensity.
as_cmap : bool
If True, return a matplotlib colormap object.
Returns
-------
palette
list of RGB tuples or :class:`matplotlib.colors.ListedColormap`
See Also
--------
hls_palette : Make a palette using evenly spaced hues in the HSL system.
Examples
--------
.. include:: ../docstrings/husl_palette.rst
"""
if as_cmap:
n_colors = 256
hues = np.linspace(0, 1, int(n_colors) + 1)[:-1]
hues += h
hues %= 1
hues *= 359
s *= 99
l *= 99 # noqa
palette = [_color_to_rgb((h_i, s, l), input="husl") for h_i in hues]
if as_cmap:
return mpl.colors.ListedColormap(palette, "hsl")
else:
return _ColorPalette(palette)
def mpl_palette(name, n_colors=6, as_cmap=False):
"""
Return a palette or colormap from the matplotlib registry.
For continuous palettes, evenly-spaced discrete samples are chosen while
excluding the minimum and maximum value in the colormap to provide better
contrast at the extremes.
For qualitative palettes (e.g. those from colorbrewer), exact values are
indexed (rather than interpolated), but fewer than `n_colors` can be returned
if the palette does not define that many.
Parameters
----------
name : string
Name of the palette. This should be a named matplotlib colormap.
n_colors : int
Number of discrete colors in the palette.
Returns
-------
list of RGB tuples or :class:`matplotlib.colors.ListedColormap`
Examples
--------
.. include:: ../docstrings/mpl_palette.rst
"""
if name.endswith("_d"):
sub_name = name[:-2]
if sub_name.endswith("_r"):
reverse = True
sub_name = sub_name[:-2]
else:
reverse = False
pal = color_palette(sub_name, 2) + ["#333333"]
if reverse:
pal = pal[::-1]
cmap = blend_palette(pal, n_colors, as_cmap=True)
else:
cmap = get_colormap(name)
if name in MPL_QUAL_PALS:
bins = np.linspace(0, 1, MPL_QUAL_PALS[name])[:n_colors]
else:
bins = np.linspace(0, 1, int(n_colors) + 2)[1:-1]
palette = list(map(tuple, cmap(bins)[:, :3]))
if as_cmap:
return cmap
else:
return _ColorPalette(palette)
def _color_to_rgb(color, input):
"""Add some more flexibility to color choices."""
if input == "hls":
color = colorsys.hls_to_rgb(*color)
elif input == "husl":
color = husl.husl_to_rgb(*color)
color = tuple(np.clip(color, 0, 1))
elif input == "xkcd":
color = xkcd_rgb[color]
return mpl.colors.to_rgb(color)
def dark_palette(color, n_colors=6, reverse=False, as_cmap=False, input="rgb"):
"""Make a sequential palette that blends from dark to ``color``.
This kind of palette is good for data that range between relatively
uninteresting low values and interesting high values.
The ``color`` parameter can be specified in a number of ways, including
all options for defining a color in matplotlib and several additional
color spaces that are handled by seaborn. You can also use the database
of named colors from the XKCD color survey.
If you are using the IPython notebook, you can also choose this palette
interactively with the :func:`choose_dark_palette` function.
Parameters
----------
color : base color for high values
hex, rgb-tuple, or html color name
n_colors : int, optional
number of colors in the palette
reverse : bool, optional
if True, reverse the direction of the blend
as_cmap : bool, optional
If True, return a :class:`matplotlib.colors.ListedColormap`.
input : {'rgb', 'hls', 'husl', xkcd'}
Color space to interpret the input color. The first three options
apply to tuple inputs and the latter applies to string inputs.
Returns
-------
palette
list of RGB tuples or :class:`matplotlib.colors.ListedColormap`
See Also
--------
light_palette : Create a sequential palette with bright low values.
diverging_palette : Create a diverging palette with two colors.
Examples
--------
.. include:: ../docstrings/dark_palette.rst
"""
rgb = _color_to_rgb(color, input)
hue, sat, _ = husl.rgb_to_husl(*rgb)
gray_s, gray_l = .15 * sat, 15
gray = _color_to_rgb((hue, gray_s, gray_l), input="husl")
colors = [rgb, gray] if reverse else [gray, rgb]
return blend_palette(colors, n_colors, as_cmap)
def light_palette(color, n_colors=6, reverse=False, as_cmap=False, input="rgb"):
"""Make a sequential palette that blends from light to ``color``.
The ``color`` parameter can be specified in a number of ways, including
all options for defining a color in matplotlib and several additional
color spaces that are handled by seaborn. You can also use the database
of named colors from the XKCD color survey.
If you are using a Jupyter notebook, you can also choose this palette
interactively with the :func:`choose_light_palette` function.
Parameters
----------
color : base color for high values
hex code, html color name, or tuple in `input` space.
n_colors : int, optional
number of colors in the palette
reverse : bool, optional
if True, reverse the direction of the blend
as_cmap : bool, optional
If True, return a :class:`matplotlib.colors.ListedColormap`.
input : {'rgb', 'hls', 'husl', xkcd'}
Color space to interpret the input color. The first three options
apply to tuple inputs and the latter applies to string inputs.
Returns
-------
palette
list of RGB tuples or :class:`matplotlib.colors.ListedColormap`
See Also
--------
dark_palette : Create a sequential palette with dark low values.
diverging_palette : Create a diverging palette with two colors.
Examples
--------
.. include:: ../docstrings/light_palette.rst
"""
rgb = _color_to_rgb(color, input)
hue, sat, _ = husl.rgb_to_husl(*rgb)
gray_s, gray_l = .15 * sat, 95
gray = _color_to_rgb((hue, gray_s, gray_l), input="husl")
colors = [rgb, gray] if reverse else [gray, rgb]
return blend_palette(colors, n_colors, as_cmap)
def diverging_palette(h_neg, h_pos, s=75, l=50, sep=1, n=6, # noqa
center="light", as_cmap=False):
"""Make a diverging palette between two HUSL colors.
If you are using the IPython notebook, you can also choose this palette
interactively with the :func:`choose_diverging_palette` function.
Parameters
----------
h_neg, h_pos : float in [0, 359]
Anchor hues for negative and positive extents of the map.
s : float in [0, 100], optional
Anchor saturation for both extents of the map.
l : float in [0, 100], optional
Anchor lightness for both extents of the map.
sep : int, optional
Size of the intermediate region.
n : int, optional
Number of colors in the palette (if not returning a cmap)
center : {"light", "dark"}, optional
Whether the center of the palette is light or dark
as_cmap : bool, optional
If True, return a :class:`matplotlib.colors.ListedColormap`.
Returns
-------
palette
list of RGB tuples or :class:`matplotlib.colors.ListedColormap`
See Also
--------
dark_palette : Create a sequential palette with dark values.
light_palette : Create a sequential palette with light values.
Examples
--------
.. include: ../docstrings/diverging_palette.rst
"""
palfunc = dict(dark=dark_palette, light=light_palette)[center]
n_half = int(128 - (sep // 2))
neg = palfunc((h_neg, s, l), n_half, reverse=True, input="husl")
pos = palfunc((h_pos, s, l), n_half, input="husl")
midpoint = dict(light=[(.95, .95, .95)], dark=[(.133, .133, .133)])[center]
mid = midpoint * sep
pal = blend_palette(np.concatenate([neg, mid, pos]), n, as_cmap=as_cmap)
return pal
def blend_palette(colors, n_colors=6, as_cmap=False, input="rgb"):
"""Make a palette that blends between a list of colors.
Parameters
----------
colors : sequence of colors in various formats interpreted by `input`
hex code, html color name, or tuple in `input` space.
n_colors : int, optional
Number of colors in the palette.
as_cmap : bool, optional
If True, return a :class:`matplotlib.colors.ListedColormap`.
Returns
-------
palette
list of RGB tuples or :class:`matplotlib.colors.ListedColormap`
Examples
--------
.. include: ../docstrings/blend_palette.rst
"""
colors = [_color_to_rgb(color, input) for color in colors]
name = "blend"
pal = mpl.colors.LinearSegmentedColormap.from_list(name, colors)
if not as_cmap:
rgb_array = pal(np.linspace(0, 1, int(n_colors)))[:, :3] # no alpha
pal = _ColorPalette(map(tuple, rgb_array))
return pal
def xkcd_palette(colors):
"""Make a palette with color names from the xkcd color survey.
See xkcd for the full list of colors: https://xkcd.com/color/rgb/
This is just a simple wrapper around the `seaborn.xkcd_rgb` dictionary.
Parameters
----------
colors : list of strings
List of keys in the `seaborn.xkcd_rgb` dictionary.
Returns
-------
palette
A list of colors as RGB tuples.
See Also
--------
crayon_palette : Make a palette with Crayola crayon colors.
"""
palette = [xkcd_rgb[name] for name in colors]
return color_palette(palette, len(palette))
def crayon_palette(colors):
"""Make a palette with color names from Crayola crayons.
Colors are taken from here:
https://en.wikipedia.org/wiki/List_of_Crayola_crayon_colors
This is just a simple wrapper around the `seaborn.crayons` dictionary.
Parameters
----------
colors : list of strings
List of keys in the `seaborn.crayons` dictionary.
Returns
-------
palette
A list of colors as RGB tuples.
See Also
--------
xkcd_palette : Make a palette with named colors from the XKCD color survey.
"""
palette = [crayons[name] for name in colors]
return color_palette(palette, len(palette))
def cubehelix_palette(n_colors=6, start=0, rot=.4, gamma=1.0, hue=0.8,
light=.85, dark=.15, reverse=False, as_cmap=False):
"""Make a sequential palette from the cubehelix system.
This produces a colormap with linearly-decreasing (or increasing)
brightness. That means that information will be preserved if printed to
black and white or viewed by someone who is colorblind. "cubehelix" is
also available as a matplotlib-based palette, but this function gives the
user more control over the look of the palette and has a different set of
defaults.
In addition to using this function, it is also possible to generate a
cubehelix palette generally in seaborn using a string starting with
`ch:` and containing other parameters (e.g. `"ch:s=.25,r=-.5"`).
Parameters
----------
n_colors : int
Number of colors in the palette.
start : float, 0 <= start <= 3
The hue value at the start of the helix.
rot : float
Rotations around the hue wheel over the range of the palette.
gamma : float 0 <= gamma
Nonlinearity to emphasize dark (gamma < 1) or light (gamma > 1) colors.
hue : float, 0 <= hue <= 1
Saturation of the colors.
dark : float 0 <= dark <= 1
Intensity of the darkest color in the palette.
light : float 0 <= light <= 1
Intensity of the lightest color in the palette.
reverse : bool
If True, the palette will go from dark to light.
as_cmap : bool
If True, return a :class:`matplotlib.colors.ListedColormap`.
Returns
-------
palette
list of RGB tuples or :class:`matplotlib.colors.ListedColormap`
See Also
--------
choose_cubehelix_palette : Launch an interactive widget to select cubehelix
palette parameters.
dark_palette : Create a sequential palette with dark low values.
light_palette : Create a sequential palette with bright low values.
References
----------
Green, D. A. (2011). "A colour scheme for the display of astronomical
intensity images". Bulletin of the Astromical Society of India, Vol. 39,
p. 289-295.
Examples
--------
.. include:: ../docstrings/cubehelix_palette.rst
"""
def get_color_function(p0, p1):
# Copied from matplotlib because it lives in private module
def color(x):
# Apply gamma factor to emphasise low or high intensity values
xg = x ** gamma
# Calculate amplitude and angle of deviation from the black
# to white diagonal in the plane of constant
# perceived intensity.
a = hue * xg * (1 - xg) / 2
phi = 2 * np.pi * (start / 3 + rot * x)
return xg + a * (p0 * np.cos(phi) + p1 * np.sin(phi))
return color
cdict = {
"red": get_color_function(-0.14861, 1.78277),
"green": get_color_function(-0.29227, -0.90649),
"blue": get_color_function(1.97294, 0.0),
}
cmap = mpl.colors.LinearSegmentedColormap("cubehelix", cdict)
x = np.linspace(light, dark, int(n_colors))
pal = cmap(x)[:, :3].tolist()
if reverse:
pal = pal[::-1]
if as_cmap:
x_256 = np.linspace(light, dark, 256)
if reverse:
x_256 = x_256[::-1]
pal_256 = cmap(x_256)
cmap = mpl.colors.ListedColormap(pal_256, "seaborn_cubehelix")
return cmap
else:
return _ColorPalette(pal)
def _parse_cubehelix_args(argstr):
"""Turn stringified cubehelix params into args/kwargs."""
if argstr.startswith("ch:"):
argstr = argstr[3:]
if argstr.endswith("_r"):
reverse = True
argstr = argstr[:-2]
else:
reverse = False
if not argstr:
return [], {"reverse": reverse}
all_args = argstr.split(",")
args = [float(a.strip(" ")) for a in all_args if "=" not in a]
kwargs = [a.split("=") for a in all_args if "=" in a]
kwargs = {k.strip(" "): float(v.strip(" ")) for k, v in kwargs}
kwarg_map = dict(
s="start", r="rot", g="gamma",
h="hue", l="light", d="dark", # noqa: E741
)
kwargs = {kwarg_map.get(k, k): v for k, v in kwargs.items()}
if reverse:
kwargs["reverse"] = True
return args, kwargs
def set_color_codes(palette="deep"):
"""Change how matplotlib color shorthands are interpreted.
Calling this will change how shorthand codes like "b" or "g"
are interpreted by matplotlib in subsequent plots.
Parameters
----------
palette : {deep, muted, pastel, dark, bright, colorblind}
Named seaborn palette to use as the source of colors.
See Also
--------
set : Color codes can be set through the high-level seaborn style
manager.
set_palette : Color codes can also be set through the function that
sets the matplotlib color cycle.
"""
if palette == "reset":
colors = [
(0., 0., 1.),
(0., .5, 0.),
(1., 0., 0.),
(.75, 0., .75),
(.75, .75, 0.),
(0., .75, .75),
(0., 0., 0.)
]
elif not isinstance(palette, str):
err = "set_color_codes requires a named seaborn palette"
raise TypeError(err)
elif palette in SEABORN_PALETTES:
if not palette.endswith("6"):
palette = palette + "6"
colors = SEABORN_PALETTES[palette] + [(.1, .1, .1)]
else:
err = f"Cannot set colors with palette '{palette}'"
raise ValueError(err)
for code, color in zip("bgrmyck", colors):
rgb = mpl.colors.colorConverter.to_rgb(color)
mpl.colors.colorConverter.colors[code] = rgb
| _ColorPalette |
python | tensorflow__tensorflow | tensorflow/python/data/util/structure_test.py | {
"start": 21245,
"end": 42415
} | class ____(test_base.DatasetTestBase, parameterized.TestCase):
# pylint: disable=g-long-lambda,protected-access
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
_test_flat_structure_combinations()))
def testFlatStructure(self, value_fn, expected_structure_fn,
expected_types_fn, expected_shapes_fn):
value = value_fn()
expected_structure = expected_structure_fn()
expected_types = expected_types_fn()
expected_shapes = expected_shapes_fn()
s = structure.type_spec_from_value(value)
self.assertIsInstance(s, expected_structure)
flat_types = structure.get_flat_tensor_types(s)
self.assertEqual(expected_types, flat_types)
flat_shapes = structure.get_flat_tensor_shapes(s)
self.assertLen(flat_shapes, len(expected_shapes))
for expected, actual in zip(expected_shapes, flat_shapes):
if expected is None:
self.assertEqual(actual.ndims, None)
else:
self.assertEqual(actual.as_list(), expected)
@combinations.generate(
combinations.times(test_base.graph_only_combinations(),
_test_is_compatible_with_structure_combinations()))
def testIsCompatibleWithStructure(self, original_value_fn,
compatible_values_fn,
incompatible_values_fn):
original_value = original_value_fn()
compatible_values = compatible_values_fn()
incompatible_values = incompatible_values_fn()
s = structure.type_spec_from_value(original_value)
for compatible_value in compatible_values:
self.assertTrue(
structure.are_compatible(
s, structure.type_spec_from_value(compatible_value)))
for incompatible_value in incompatible_values:
self.assertFalse(
structure.are_compatible(
s, structure.type_spec_from_value(incompatible_value)))
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
_test_structure_from_value_equality_combinations()))
def testStructureFromValueEquality(self, value1_fn, value2_fn,
not_equal_value_fns):
# pylint: disable=g-generic-assert
not_equal_value_fns = not_equal_value_fns._obj
s1 = structure.type_spec_from_value(value1_fn())
s2 = structure.type_spec_from_value(value2_fn())
self.assertEqual(s1, s1) # check __eq__ operator.
self.assertEqual(s1, s2) # check __eq__ operator.
self.assertFalse(s1 != s1) # check __ne__ operator.
self.assertFalse(s1 != s2) # check __ne__ operator.
for c1, c2 in zip(nest.flatten(s1), nest.flatten(s2)):
self.assertEqual(hash(c1), hash(c1))
self.assertEqual(hash(c1), hash(c2))
for value_fn in not_equal_value_fns:
s3 = structure.type_spec_from_value(value_fn())
self.assertNotEqual(s1, s3) # check __ne__ operator.
self.assertNotEqual(s2, s3) # check __ne__ operator.
self.assertFalse(s1 == s3) # check __eq_ operator.
self.assertFalse(s2 == s3) # check __eq_ operator.
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
_test_ragged_structure_inequality_combinations()))
def testRaggedStructureInequality(self, spec1, spec2):
# pylint: disable=g-generic-assert
self.assertNotEqual(spec1, spec2) # check __ne__ operator.
self.assertFalse(spec1 == spec2) # check __eq__ operator.
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
_test_hash_combinations()))
def testHash(self, value1_fn, value2_fn, value3_fn):
s1 = structure.type_spec_from_value(value1_fn())
s2 = structure.type_spec_from_value(value2_fn())
s3 = structure.type_spec_from_value(value3_fn())
for c1, c2, c3 in zip(nest.flatten(s1), nest.flatten(s2), nest.flatten(s3)):
self.assertEqual(hash(c1), hash(c1))
self.assertEqual(hash(c1), hash(c2))
self.assertNotEqual(hash(c1), hash(c3))
self.assertNotEqual(hash(c2), hash(c3))
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
_test_round_trip_conversion_combinations()))
def testRoundTripConversion(self, value_fn):
value = value_fn()
s = structure.type_spec_from_value(value)
def maybe_stack_ta(v):
if isinstance(v, tensor_array_ops.TensorArray):
return v.stack()
return v
before = self.evaluate(maybe_stack_ta(value))
after = self.evaluate(
maybe_stack_ta(
structure.from_tensor_list(s, structure.to_tensor_list(s, value))))
flat_before = nest.flatten(before)
flat_after = nest.flatten(after)
for b, a in zip(flat_before, flat_after):
if isinstance(b, sparse_tensor.SparseTensorValue):
self.assertAllEqual(b.indices, a.indices)
self.assertAllEqual(b.values, a.values)
self.assertAllEqual(b.dense_shape, a.dense_shape)
elif isinstance(
b,
(ragged_tensor.RaggedTensor, ragged_tensor_value.RaggedTensorValue)):
self.assertAllEqual(b, a)
else:
self.assertAllEqual(b, a)
# pylint: enable=g-long-lambda
def preserveStaticShape(self):
rt = ragged_factory_ops.constant([[1, 2], [], [3]])
rt_s = structure.type_spec_from_value(rt)
rt_after = structure.from_tensor_list(rt_s,
structure.to_tensor_list(rt_s, rt))
self.assertEqual(rt_after.row_splits.shape.as_list(),
rt.row_splits.shape.as_list())
self.assertEqual(rt_after.values.shape.as_list(), [None])
st = sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[4, 5])
st_s = structure.type_spec_from_value(st)
st_after = structure.from_tensor_list(st_s,
structure.to_tensor_list(st_s, st))
self.assertEqual(st_after.indices.shape.as_list(), [None, 2])
self.assertEqual(st_after.values.shape.as_list(), [None])
self.assertEqual(st_after.dense_shape.shape.as_list(),
st.dense_shape.shape.as_list())
@combinations.generate(test_base.default_test_combinations())
def testPreserveTensorArrayShape(self):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.int32, size=1, element_shape=(3,))
ta_s = structure.type_spec_from_value(ta)
ta_after = structure.from_tensor_list(ta_s,
structure.to_tensor_list(ta_s, ta))
self.assertEqual(ta_after.element_shape.as_list(), [3])
@combinations.generate(test_base.default_test_combinations())
def testPreserveInferredTensorArrayShape(self):
ta = tensor_array_ops.TensorArray(dtype=dtypes.int32, size=1)
# Shape is inferred from the write.
ta = ta.write(0, [1, 2, 3])
ta_s = structure.type_spec_from_value(ta)
ta_after = structure.from_tensor_list(ta_s,
structure.to_tensor_list(ta_s, ta))
self.assertEqual(ta_after.element_shape.as_list(), [3])
@combinations.generate(test_base.default_test_combinations())
def testIncompatibleStructure(self):
# Define three mutually incompatible values/structures, and assert that:
# 1. Using one structure to flatten a value with an incompatible structure
# fails.
# 2. Using one structure to restructure a flattened value with an
# incompatible structure fails.
value_tensor = constant_op.constant(42.0)
s_tensor = structure.type_spec_from_value(value_tensor)
flat_tensor = structure.to_tensor_list(s_tensor, value_tensor)
value_sparse_tensor = sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1])
s_sparse_tensor = structure.type_spec_from_value(value_sparse_tensor)
flat_sparse_tensor = structure.to_tensor_list(s_sparse_tensor,
value_sparse_tensor)
value_nest = {
"a": constant_op.constant(37.0),
"b": constant_op.constant([1, 2, 3])
}
s_nest = structure.type_spec_from_value(value_nest)
flat_nest = structure.to_tensor_list(s_nest, value_nest)
with self.assertRaisesRegex(
ValueError, r"SparseTensor.* is not convertible to a tensor with "
r"dtype.*float32.* and shape \(\)"):
structure.to_tensor_list(s_tensor, value_sparse_tensor)
with self.assertRaisesRegex(
ValueError, "The two structures don't have the same nested structure."):
structure.to_tensor_list(s_tensor, value_nest)
with self.assertRaisesRegex(TypeError,
"neither a SparseTensor nor SparseTensorValue"):
structure.to_tensor_list(s_sparse_tensor, value_tensor)
with self.assertRaisesRegex(
ValueError, "The two structures don't have the same nested structure."):
structure.to_tensor_list(s_sparse_tensor, value_nest)
with self.assertRaisesRegex(
ValueError, "The two structures don't have the same nested structure."):
structure.to_tensor_list(s_nest, value_tensor)
with self.assertRaisesRegex(
ValueError, "The two structures don't have the same nested structure."):
structure.to_tensor_list(s_nest, value_sparse_tensor)
with self.assertRaisesRegex(
ValueError,
"Cannot create a Tensor from the tensor list because item 0 "
".*tf.Tensor.* is incompatible with the expected TypeSpec "
".*TensorSpec.*"):
structure.from_tensor_list(s_tensor, flat_sparse_tensor)
with self.assertRaisesRegex(ValueError, "Expected 1 tensors but got 2."):
structure.from_tensor_list(s_tensor, flat_nest)
with self.assertRaisesRegex(
ValueError, "Cannot create a SparseTensor from the tensor list because "
"item 0 .*tf.Tensor.* is incompatible with the expected TypeSpec "
".*TensorSpec.*"):
structure.from_tensor_list(s_sparse_tensor, flat_tensor)
with self.assertRaisesRegex(ValueError, "Expected 1 tensors but got 2."):
structure.from_tensor_list(s_sparse_tensor, flat_nest)
with self.assertRaisesRegex(ValueError, "Expected 2 tensors but got 1."):
structure.from_tensor_list(s_nest, flat_tensor)
with self.assertRaisesRegex(ValueError, "Expected 2 tensors but got 1."):
structure.from_tensor_list(s_nest, flat_sparse_tensor)
@combinations.generate(test_base.default_test_combinations())
def testIncompatibleNestedStructure(self):
# Define three mutually incompatible nested values/structures, and assert
# that:
# 1. Using one structure to flatten a value with an incompatible structure
# fails.
# 2. Using one structure to restructure a flattened value with an
# incompatible structure fails.
value_0 = {
"a": constant_op.constant(37.0),
"b": constant_op.constant([1, 2, 3])
}
s_0 = structure.type_spec_from_value(value_0)
flat_s_0 = structure.to_tensor_list(s_0, value_0)
# `value_1` has compatible nested structure with `value_0`, but different
# classes.
value_1 = {
"a":
constant_op.constant(37.0),
"b":
sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1])
}
s_1 = structure.type_spec_from_value(value_1)
flat_s_1 = structure.to_tensor_list(s_1, value_1)
# `value_2` has incompatible nested structure with `value_0` and `value_1`.
value_2 = {
"a":
constant_op.constant(37.0),
"b": (sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1]),
sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[4, 5]))
}
s_2 = structure.type_spec_from_value(value_2)
flat_s_2 = structure.to_tensor_list(s_2, value_2)
with self.assertRaisesRegex(
ValueError, r"SparseTensor.* is not convertible to a tensor with "
r"dtype.*int32.* and shape \(3,\)"):
structure.to_tensor_list(s_0, value_1)
with self.assertRaisesRegex(
ValueError, "The two structures don't have the same nested structure."):
structure.to_tensor_list(s_0, value_2)
with self.assertRaisesRegex(TypeError,
"neither a SparseTensor nor SparseTensorValue"):
structure.to_tensor_list(s_1, value_0)
with self.assertRaisesRegex(
ValueError, "The two structures don't have the same nested structure."):
structure.to_tensor_list(s_1, value_2)
# NOTE(mrry): The repr of the dictionaries is not sorted, so the regexp
# needs to account for "a" coming before or after "b". It might be worth
# adding a deterministic repr for these error messages (among other
# improvements).
with self.assertRaisesRegex(
ValueError, "The two structures don't have the same nested structure."):
structure.to_tensor_list(s_2, value_0)
with self.assertRaisesRegex(
ValueError, "The two structures don't have the same nested structure."):
structure.to_tensor_list(s_2, value_1)
with self.assertRaisesRegex(ValueError,
r"Cannot create a Tensor from the tensor list"):
structure.from_tensor_list(s_0, flat_s_1)
with self.assertRaisesRegex(ValueError, "Expected 2 tensors but got 3"):
structure.from_tensor_list(s_0, flat_s_2)
with self.assertRaisesRegex(
ValueError, "Cannot create a SparseTensor from the tensor list"):
structure.from_tensor_list(s_1, flat_s_0)
with self.assertRaisesRegex(ValueError, "Expected 2 tensors but got 3"):
structure.from_tensor_list(s_1, flat_s_2)
with self.assertRaisesRegex(ValueError, "Expected 3 tensors but got 2"):
structure.from_tensor_list(s_2, flat_s_0)
with self.assertRaisesRegex(ValueError, "Expected 3 tensors but got 2"):
structure.from_tensor_list(s_2, flat_s_1)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
_test_convert_legacy_structure_combinations()))
def testConvertLegacyStructure(self, output_types, output_shapes,
output_classes, expected_structure):
actual_structure = structure.convert_legacy_structure(
output_types, output_shapes, output_classes)
self.assertEqual(actual_structure, expected_structure)
@combinations.generate(test_base.default_test_combinations())
def testConvertLegacyStructureFail(self):
with self.assertRaisesRegex(
TypeError, "Could not build a structure for output class "
"_EagerTensorArray. Make sure any component class in "
"`output_classes` inherits from one of the following classes: "
"`tf.TypeSpec`, `tf.sparse.SparseTensor`, `tf.Tensor`, "
"`tf.TensorArray`."):
structure.convert_legacy_structure(dtypes.int32,
tensor_shape.TensorShape([2, None]),
tensor_array_ops._EagerTensorArray)
@combinations.generate(test_base.default_test_combinations())
def testNestedNestedStructure(self):
s = (tensor.TensorSpec([], dtypes.int64),
(tensor.TensorSpec([], dtypes.float32),
tensor.TensorSpec([], dtypes.string)))
int64_t = constant_op.constant(37, dtype=dtypes.int64)
float32_t = constant_op.constant(42.0)
string_t = constant_op.constant("Foo")
nested_tensors = (int64_t, (float32_t, string_t))
tensor_list = structure.to_tensor_list(s, nested_tensors)
for expected, actual in zip([int64_t, float32_t, string_t], tensor_list):
self.assertIs(expected, actual)
(actual_int64_t,
(actual_float32_t,
actual_string_t)) = structure.from_tensor_list(s, tensor_list)
self.assertIs(int64_t, actual_int64_t)
self.assertIs(float32_t, actual_float32_t)
self.assertIs(string_t, actual_string_t)
(actual_int64_t, (actual_float32_t, actual_string_t)) = (
structure.from_compatible_tensor_list(s, tensor_list))
self.assertIs(int64_t, actual_int64_t)
self.assertIs(float32_t, actual_float32_t)
self.assertIs(string_t, actual_string_t)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
_test_batch_combinations()))
def testBatch(self, element_structure, batch_size,
expected_batched_structure):
batched_structure = nest.map_structure(
lambda component_spec: component_spec._batch(batch_size),
element_structure)
self.assertEqual(batched_structure, expected_batched_structure)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
_test_unbatch_combinations()))
def testUnbatch(self, element_structure, expected_unbatched_structure):
unbatched_structure = nest.map_structure(
lambda component_spec: component_spec._unbatch(), element_structure)
self.assertEqual(unbatched_structure, expected_unbatched_structure)
# pylint: disable=g-long-lambda
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
_test_to_batched_tensor_list_combinations()))
def testToBatchedTensorList(self, value_fn, element_0_fn):
batched_value = value_fn()
s = structure.type_spec_from_value(batched_value)
batched_tensor_list = structure.to_batched_tensor_list(s, batched_value)
# The batch dimension is 2 for all of the test cases.
# NOTE(mrry): `tf.shape()` does not currently work for the DT_VARIANT
# tensors in which we store sparse tensors.
for t in batched_tensor_list:
if t.dtype != dtypes.variant:
self.assertEqual(2, self.evaluate(array_ops.shape(t)[0]))
# Test that the 0th element from the unbatched tensor is equal to the
# expected value.
expected_element_0 = self.evaluate(element_0_fn())
unbatched_s = nest.map_structure(
lambda component_spec: component_spec._unbatch(), s)
actual_element_0 = structure.from_tensor_list(
unbatched_s, [t[0] for t in batched_tensor_list])
for expected, actual in zip(
nest.flatten(expected_element_0), nest.flatten(actual_element_0)):
self.assertValuesEqual(expected, actual)
# pylint: enable=g-long-lambda
@combinations.generate(test_base.default_test_combinations())
def testDatasetSpecConstructor(self):
rt_spec = ragged_tensor.RaggedTensorSpec([10, None], dtypes.int32)
st_spec = sparse_tensor.SparseTensorSpec([10, 20], dtypes.float32)
t_spec = tensor.TensorSpec([10, 8], dtypes.string)
element_spec = {"rt": rt_spec, "st": st_spec, "t": t_spec}
ds_struct = dataset_ops.DatasetSpec(element_spec, [5])
self.assertEqual(ds_struct._element_spec, element_spec)
# Note: shape was automatically converted from a list to a TensorShape.
self.assertEqual(ds_struct._dataset_shape, tensor_shape.TensorShape([5]))
@combinations.generate(test_base.default_test_combinations())
def testCustomMapping(self):
elem = CustomMap(foo=constant_op.constant(37.))
spec = structure.type_spec_from_value(elem)
self.assertIsInstance(spec, CustomMap)
self.assertEqual(spec["foo"], tensor.TensorSpec([], dtypes.float32))
@combinations.generate(test_base.default_test_combinations())
def testObjectProxy(self):
nt_type = collections.namedtuple("A", ["x", "y"])
proxied = wrapt.ObjectProxy(nt_type(1, 2))
proxied_spec = structure.type_spec_from_value(proxied)
self.assertEqual(
structure.type_spec_from_value(nt_type(1, 2)), proxied_spec)
@combinations.generate(test_base.default_test_combinations())
def testTypeSpecNotBuild(self):
with self.assertRaisesRegex(
TypeError, "Could not build a `TypeSpec` for 100 with type int"):
structure.type_spec_from_value(100, use_fallback=False)
@combinations.generate(test_base.default_test_combinations())
def testTypeSpecNotCompatible(self):
test_obj = structure.NoneTensorSpec()
with self.assertRaisesRegex(
ValueError, r"No `TypeSpec` is compatible with both NoneTensorSpec\(\) "
"and 100"):
test_obj.most_specific_compatible_shape(100)
self.assertEqual(test_obj,
test_obj.most_specific_compatible_shape(test_obj))
@combinations.generate(test_base.default_test_combinations())
def testDataclasses(self):
mt = MaskedTensor(mask=True, value=constant_op.constant([1]))
mt_type_spec = structure.type_spec_from_value(mt)
self.assertEqual(mt_type_spec.mask, mt.mask)
self.assertEqual(
mt_type_spec.value, structure.type_spec_from_value(mt.value)
)
mt2 = MaskedTensor(mask=True, value=constant_op.constant([2]))
mt3 = MaskedTensor(mask=False, value=constant_op.constant([1]))
mt2_type_spec = structure.type_spec_from_value(mt2)
mt3_type_spec = structure.type_spec_from_value(mt3)
self.assertEqual(mt_type_spec, mt2_type_spec)
self.assertNotEqual(mt_type_spec, mt3_type_spec)
| StructureTest |
python | pandas-dev__pandas | pandas/tests/extension/base/ops.py | {
"start": 7694,
"end": 9145
} | class ____(BaseOpsUtil):
"""Various Series and DataFrame comparison ops methods."""
def _compare_other(self, ser: pd.Series, data, op, other):
if op.__name__ in ["eq", "ne"]:
# comparison should match point-wise comparisons
result = op(ser, other)
expected = ser.combine(other, op)
expected = self._cast_pointwise_result(op.__name__, ser, other, expected)
tm.assert_series_equal(result, expected)
else:
exc = None
try:
result = op(ser, other)
except Exception as err:
exc = err
if exc is None:
# Didn't error, then should match pointwise behavior
expected = ser.combine(other, op)
expected = self._cast_pointwise_result(
op.__name__, ser, other, expected
)
tm.assert_series_equal(result, expected)
else:
with pytest.raises(type(exc)):
ser.combine(other, op)
def test_compare_scalar(self, data, comparison_op):
ser = pd.Series(data)
self._compare_other(ser, data, comparison_op, 0)
def test_compare_array(self, data, comparison_op):
ser = pd.Series(data)
other = pd.Series([data[0]] * len(data), dtype=data.dtype)
self._compare_other(ser, data, comparison_op, other)
| BaseComparisonOpsTests |
python | sqlalchemy__sqlalchemy | test/typing/plain_files/ext/hybrid/hybrid_four.py | {
"start": 1556,
"end": 1872
} | class ____(FirstNameOnly):
last_name: Mapped[str]
@FirstNameOnly.name.getter
def name(self) -> str:
return self.first_name + " " + self.last_name
@name.inplace.setter
def _name_setter(self, value: str) -> None:
self.first_name, self.last_name = value.split(" ", 1)
| FirstNameLastName |
python | pytorch__pytorch | test/quantization/pt2e/test_xnnpack_quantizer.py | {
"start": 1330,
"end": 43232
} | class ____(PT2EQuantizationTestCase):
def test_conv1d(self):
quantizer = XNNPACKQuantizer()
quantization_config = get_symmetric_quantization_config(is_per_channel=True)
quantizer.set_global(quantization_config)
example_inputs = (torch.randn(1, 3, 5),)
node_occurrence = {
# input and output are using quantize_per_tensor and weight is using quantize_per_channel
torch.ops.quantized_decomposed.quantize_per_tensor.default: 2,
torch.ops.quantized_decomposed.dequantize_per_tensor.default: 2,
torch.ops.quantized_decomposed.quantize_per_channel.default: 0,
torch.ops.quantized_decomposed.dequantize_per_channel.default: 1,
}
node_list = [
torch.ops.quantized_decomposed.dequantize_per_tensor.default,
torch.ops.aten.conv1d.default,
torch.ops.quantized_decomposed.quantize_per_tensor.default,
]
self._test_quantizer(
TestHelperModules.ConvWithBNRelu(dim=1, relu=False, bn=False),
example_inputs,
quantizer,
node_occurrence,
node_list,
)
def test_conv2d(self):
quantizer = XNNPACKQuantizer()
quantization_config = get_symmetric_quantization_config(is_per_channel=True)
quantizer.set_global(quantization_config)
example_inputs = (torch.randn(1, 3, 5, 5),)
node_occurrence = {
# input and output are using quantize_per_tensor and weight is using quantize_per_channel
torch.ops.quantized_decomposed.quantize_per_tensor.default: 2,
torch.ops.quantized_decomposed.dequantize_per_tensor.default: 2,
# quantize_per_channel for weights are const propagated
torch.ops.quantized_decomposed.quantize_per_channel.default: 0,
torch.ops.quantized_decomposed.dequantize_per_channel.default: 1,
}
node_list = [
torch.ops.quantized_decomposed.dequantize_per_tensor.default,
torch.ops.aten.conv2d.default,
torch.ops.quantized_decomposed.quantize_per_tensor.default,
]
self._test_quantizer(
TestHelperModules.ConvWithBNRelu(relu=False, bn=False),
example_inputs,
quantizer,
node_occurrence,
node_list,
)
def test_conv1d_with_conv2d(self):
quantizer = XNNPACKQuantizer()
quantization_config = get_symmetric_quantization_config(is_per_channel=True)
quantizer.set_global(quantization_config)
node_occurrence = {
# input and output are using quantize_per_tensor and weight is using quantize_per_channel
torch.ops.quantized_decomposed.quantize_per_tensor.default: 4,
torch.ops.quantized_decomposed.dequantize_per_tensor.default: 4,
torch.ops.quantized_decomposed.quantize_per_channel.default: 0,
torch.ops.quantized_decomposed.dequantize_per_channel.default: 2,
}
node_list = [
torch.ops.quantized_decomposed.dequantize_per_tensor.default,
torch.ops.aten.conv2d.default,
torch.ops.quantized_decomposed.quantize_per_tensor.default,
torch.ops.quantized_decomposed.dequantize_per_tensor.default,
torch.ops.aten.conv1d.default,
torch.ops.quantized_decomposed.quantize_per_tensor.default,
]
m = TestHelperModules.Conv2dThenConv1d()
self._test_quantizer(
m,
m.example_inputs(),
quantizer,
node_occurrence,
node_list,
)
def test_linear(self):
quantizer = XNNPACKQuantizer()
quantization_config = get_symmetric_quantization_config(is_per_channel=True)
quantizer.set_global(quantization_config)
m_eager = TestHelperModules.TwoLinearModule().eval()
# Test with 2d inputs
example_inputs_2d = (torch.randn(9, 8),)
example_inputs_3d = (torch.randn(9, 10, 8),)
example_inputs_4d = (torch.randn(9, 10, 11, 8),)
node_occurrence = {
# input and output are using quantize_per_tensor and weight is using quantize_per_channel
torch.ops.quantized_decomposed.quantize_per_tensor.default: 3,
torch.ops.quantized_decomposed.dequantize_per_tensor.default: 3,
# quantize_per_channel for weights are const propagated
torch.ops.quantized_decomposed.quantize_per_channel.default: 0,
torch.ops.quantized_decomposed.dequantize_per_channel.default: 2,
}
qconfig = default_per_channel_symmetric_qnnpack_qconfig
qconfig_mapping = QConfigMapping().set_global(qconfig)
for example_inputs in [example_inputs_2d, example_inputs_3d, example_inputs_4d]:
self._test_quantizer(
m_eager,
example_inputs,
quantizer,
node_occurrence,
[],
True,
qconfig_mapping,
)
def test_linear_relu(self):
quantizer = XNNPACKQuantizer()
quantization_config = get_symmetric_quantization_config(is_per_channel=True)
quantizer.set_global(quantization_config)
m_eager = TestHelperModules.LinearReluModel().eval()
# Test with 2d inputs
example_inputs_2d = (torch.randn(1, 5),)
example_inputs_3d = (torch.randn(1, 2, 5),)
example_inputs_4d = (torch.randn(1, 2, 3, 5),)
node_occurrence = {
# input and output are using quantize_per_tensor and weight is using quantize_per_channel
# There should not be extra quantize_per_tensor or dequantize_per_tensors for relu
torch.ops.quantized_decomposed.quantize_per_tensor.default: 2,
torch.ops.quantized_decomposed.dequantize_per_tensor.default: 2,
# quantize_per_channel for weights are const propagated
torch.ops.quantized_decomposed.quantize_per_channel.default: 0,
torch.ops.quantized_decomposed.dequantize_per_channel.default: 1,
}
qconfig = default_per_channel_symmetric_qnnpack_qconfig
qconfig_mapping = QConfigMapping().set_global(qconfig)
for example_inputs in [example_inputs_2d, example_inputs_3d, example_inputs_4d]:
self._test_quantizer(
m_eager,
example_inputs,
quantizer,
node_occurrence,
[], # node_list
False, # executorch_backend_config() does not fuse linear-relu
qconfig_mapping,
)
def test_conv_linear_no_permute(self):
quantizer = XNNPACKQuantizer()
quantization_config = get_symmetric_quantization_config(is_per_channel=True)
quantizer.set_global(quantization_config)
node_occurrence = {
# input and output are using quantize_per_tensor and weight is using quantize_per_channel
torch.ops.quantized_decomposed.quantize_per_tensor.default: 5,
torch.ops.quantized_decomposed.dequantize_per_tensor.default: 5,
# quantize_per_channel for weights are const propagated
torch.ops.quantized_decomposed.quantize_per_channel.default: 0,
torch.ops.quantized_decomposed.dequantize_per_channel.default: 3,
}
qconfig = default_per_channel_symmetric_qnnpack_qconfig
qconfig_mapping = QConfigMapping().set_global(qconfig)
# Test with 2d inputs
example_inputs = (torch.randn(2, 3, 4, 4),)
self._test_quantizer(
TestHelperModules.Conv2dWithTwoLinear(),
example_inputs,
quantizer,
node_occurrence,
[],
True,
qconfig_mapping,
)
def test_conv_linear(self):
quantizer = XNNPACKQuantizer()
quantization_config = get_symmetric_quantization_config(is_per_channel=True)
quantizer.set_global(quantization_config)
# Test with 2d inputs
example_inputs = (torch.randn(2, 3, 4, 4),)
node_occurrence = {
torch.ops.quantized_decomposed.quantize_per_tensor.default: 5,
torch.ops.quantized_decomposed.dequantize_per_tensor.default: 5,
# quantize_per_channel for weights are const propagated
torch.ops.quantized_decomposed.quantize_per_channel.default: 0,
torch.ops.quantized_decomposed.dequantize_per_channel.default: 3,
}
qconfig = default_per_channel_symmetric_qnnpack_qconfig
qconfig_mapping = QConfigMapping().set_global(qconfig)
self._test_quantizer(
TestHelperModules.Conv2dWithTwoLinearPermute(),
example_inputs,
quantizer,
node_occurrence,
[],
True,
qconfig_mapping,
)
def test_linear_with_dynamic_shape(self):
quantizer = XNNPACKQuantizer()
quantization_config = get_symmetric_quantization_config(is_per_channel=True)
quantizer.set_global(quantization_config)
m_eager = TestHelperModules.TwoLinearModule().eval()
# Test with 2d inputs
example_inputs_3d = (torch.randn(9, 10, 8),)
node_occurrence = {
# input and output are using quantize_per_tensor and weight is using quantize_per_channel
torch.ops.quantized_decomposed.quantize_per_tensor.default: 3,
torch.ops.quantized_decomposed.dequantize_per_tensor.default: 3,
# quantize_per_channel for weights are const propagated
torch.ops.quantized_decomposed.quantize_per_channel.default: 0,
torch.ops.quantized_decomposed.dequantize_per_channel.default: 2,
}
qconfig = default_per_channel_symmetric_qnnpack_qconfig
qconfig_mapping = QConfigMapping().set_global(qconfig)
self._test_quantizer(
m_eager,
example_inputs_3d,
quantizer,
node_occurrence,
[],
True,
qconfig_mapping,
export_with_dynamic_shape=True,
)
def test_obs_sharing_ops(self):
quantizer = XNNPACKQuantizer()
quantization_config = get_symmetric_quantization_config(is_per_channel=True)
quantizer.set_global(quantization_config)
m = TestHelperModules.Conv2dWithObsSharingOps().eval()
example_inputs = (torch.randn(1, 3, 5, 5),)
node_occurrence = {
# input and output are using quantize_per_tensor and weight is using quantize_per_channel
torch.ops.quantized_decomposed.quantize_per_tensor.default: 5,
torch.ops.quantized_decomposed.dequantize_per_tensor.default: 5,
# quantize_per_channel for weights are const propagated
torch.ops.quantized_decomposed.quantize_per_channel.default: 0,
torch.ops.quantized_decomposed.dequantize_per_channel.default: 1,
}
node_list = [
torch.ops.quantized_decomposed.dequantize_per_tensor.default,
torch.ops.aten.conv2d.default,
torch.ops.quantized_decomposed.quantize_per_tensor.default,
torch.ops.quantized_decomposed.dequantize_per_tensor.default,
torch.ops.aten.adaptive_avg_pool2d.default,
torch.ops.quantized_decomposed.quantize_per_tensor.default,
torch.ops.quantized_decomposed.dequantize_per_tensor.default,
torch.ops.aten.hardtanh.default,
torch.ops.quantized_decomposed.quantize_per_tensor.default,
torch.ops.quantized_decomposed.dequantize_per_tensor.default,
torch.ops.aten.mean.default,
torch.ops.quantized_decomposed.quantize_per_tensor.default,
torch.ops.quantized_decomposed.dequantize_per_tensor.default,
]
self._test_quantizer(m, example_inputs, quantizer, node_occurrence, node_list)
def test_set_module_name(self):
class Sub(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(5, 5)
def forward(self, x):
return self.linear(x)
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(5, 5)
self.sub = Sub()
def forward(self, x):
x = self.linear(x)
x = self.sub(x)
return x
m = M().eval()
example_inputs = (torch.randn(3, 5),)
quantizer = XNNPACKQuantizer()
quantization_config = get_symmetric_quantization_config(is_per_channel=True)
quantizer.set_module_name("sub", quantization_config)
node_occurrence = {
torch.ops.aten.linear.default: 2,
# input and output for the second linear
torch.ops.quantized_decomposed.quantize_per_tensor.default: 2,
torch.ops.quantized_decomposed.dequantize_per_tensor.default: 2,
}
node_list = [
# first linear is not quantized
torch.ops.aten.linear.default,
# second linear is quantized
torch.ops.quantized_decomposed.quantize_per_tensor.default,
torch.ops.quantized_decomposed.dequantize_per_tensor.default,
torch.ops.aten.linear.default,
torch.ops.quantized_decomposed.quantize_per_tensor.default,
torch.ops.quantized_decomposed.dequantize_per_tensor.default,
]
self._test_quantizer(m, example_inputs, quantizer, node_occurrence, node_list)
def test_set_module_name_with_underscores(self) -> None:
"""Test that if a module name has an underscore, we can still quantize it"""
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
# This module name has underscores, which can be part of a mangled
# name.
self.foo_bar = torch.nn.Linear(2, 2)
self.baz = torch.nn.Linear(2, 2)
def forward(self, x):
return self.baz(self.foo_bar(x))
quantizer = XNNPACKQuantizer()
# Set global to no quantization and then per-channel for a specific submodule.
quantizer.set_module_name(
"foo_bar", get_symmetric_quantization_config(is_per_channel=True)
)
example_inputs = (torch.randn(2, 2),)
m = M().eval()
m = export(m, example_inputs, strict=True).module()
m = prepare_pt2e(m, quantizer)
# Use a linear count instead of names because the names might change, but
# the order should be the same.
count = 0
for n in m.graph.nodes:
if n.op == "call_function" and n.target == torch.ops.aten.linear.default:
# Get the weight observer to see the per-channel vs per-tensor.
weight_observer_node = n.args[1]
if count == 0:
# The weight tensor should be per-tensor and not per-channel
# for foo_bar.
self.assertEqual(weight_observer_node.op, "call_module")
observer_instance = getattr(m, weight_observer_node.target)
self.assertEqual(
observer_instance.qscheme, torch.per_channel_symmetric
)
else:
# For baz it should have no observer at all.
self.assertNotEqual(weight_observer_node.op, "call_module")
count += 1
def test_set_module_type(self):
class Sub(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(5, 5)
def forward(self, x):
return self.linear(x)
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(5, 5)
self.sub = Sub()
def forward(self, x):
x = self.linear(x)
x = self.sub(x)
return x
m = M().eval()
example_inputs = (torch.randn(3, 5),)
quantizer = XNNPACKQuantizer()
quantization_config = get_symmetric_quantization_config(is_per_channel=True)
quantizer.set_module_type(Sub, quantization_config)
node_occurrence = {
torch.ops.aten.linear.default: 2,
# input and output for the second linear
torch.ops.quantized_decomposed.quantize_per_tensor.default: 2,
torch.ops.quantized_decomposed.dequantize_per_tensor.default: 2,
}
node_list = [
# first linear is not quantized
torch.ops.aten.linear.default,
# second linear is quantized
torch.ops.quantized_decomposed.quantize_per_tensor.default,
torch.ops.quantized_decomposed.dequantize_per_tensor.default,
torch.ops.aten.linear.default,
torch.ops.quantized_decomposed.quantize_per_tensor.default,
torch.ops.quantized_decomposed.dequantize_per_tensor.default,
]
self._test_quantizer(m, example_inputs, quantizer, node_occurrence, node_list)
def test_set_module_type_case_2(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(
in_channels=3,
out_channels=3,
kernel_size=3,
stride=1,
padding=1,
bias=True,
)
self.conv2 = torch.nn.Conv2d(
in_channels=3,
out_channels=3,
kernel_size=3,
stride=1,
padding=1,
bias=True,
)
self.conv3 = torch.nn.Conv2d(
in_channels=3,
out_channels=3,
kernel_size=3,
stride=1,
padding=1,
bias=True,
)
self.relu = torch.nn.ReLU()
self.avgpool = torch.nn.AdaptiveAvgPool2d((1, 1))
self.fc = torch.nn.Linear(3, 16)
def forward(self, x):
x1 = self.conv(x)
x2 = self.relu(self.conv2(x1) + self.conv3(x1))
x3 = self.avgpool(x2)
x4 = torch.flatten(x3, 1)
x5 = self.fc(x4)
return x5
m = M().eval()
example_inputs = (torch.randn(1, 3, 16, 16),)
quantizer = XNNPACKQuantizer()
quantization_config = get_symmetric_quantization_config(is_per_channel=True)
# We only want to annotate Linear type
quantizer.set_module_type(torch.nn.Linear, quantization_config)
node_occurrence = {
torch.ops.aten.conv2d.default: 3,
torch.ops.aten.linear.default: 1,
# input and output for the linear
torch.ops.quantized_decomposed.quantize_per_tensor.default: 2,
torch.ops.quantized_decomposed.dequantize_per_tensor.default: 2,
}
node_list = [
# only the linear is quantized
torch.ops.quantized_decomposed.quantize_per_tensor.default,
torch.ops.quantized_decomposed.dequantize_per_tensor.default,
torch.ops.aten.linear.default,
torch.ops.quantized_decomposed.quantize_per_tensor.default,
torch.ops.quantized_decomposed.dequantize_per_tensor.default,
]
self._test_quantizer(m, example_inputs, quantizer, node_occurrence, node_list)
def test_propagate_annotation(self):
quantizer = XNNPACKQuantizer()
quantization_config = get_symmetric_quantization_config(is_per_channel=True)
quantizer.set_global(quantization_config)
m = TestHelperModules.Conv2dPropAnnotaton().eval()
example_inputs = (torch.randn(1, 3, 5, 5),)
# program capture
m = export(m, example_inputs, strict=True).module()
m = prepare_pt2e(m, quantizer)
m(*example_inputs)
for n in m.graph.nodes:
if n.target in [
torch.ops.aten.view.default,
torch.ops.aten.hardtanh.default,
]:
input_act = getattr(m, n.args[0].target)
output_act = getattr(m, next(iter(n.users)).target)
self.assertIs(input_act, output_act)
m = convert_pt2e(m)
node_occurrence = {
# input and output are using quantize_per_tensor and weight is using quantize_per_channel
ns.call_function(
torch.ops.quantized_decomposed.quantize_per_tensor.default
): 5,
ns.call_function(
torch.ops.quantized_decomposed.dequantize_per_tensor.default
): 5,
# note: quantize op for weights are const propagated
ns.call_function(
torch.ops.quantized_decomposed.quantize_per_channel.default
): 0,
ns.call_function(
torch.ops.quantized_decomposed.dequantize_per_channel.default
): 2,
}
self.checkGraphModuleNodes(m, expected_node_occurrence=node_occurrence)
def test_dynamic_linear(self):
quantizer = XNNPACKQuantizer()
quantization_config = get_symmetric_quantization_config(
is_per_channel=True, is_dynamic=True
)
quantizer.set_global(quantization_config)
m_eager = TestHelperModules.TwoLinearModule().eval()
node_occurrence = {
# input and output are using quantize_per_tensor and weight is using quantize_per_channel
torch.ops.quantized_decomposed.quantize_per_tensor.tensor: 2,
torch.ops.quantized_decomposed.dequantize_per_tensor.tensor: 2,
# note: quantize op for weights are const propagated
torch.ops.quantized_decomposed.quantize_per_channel.default: 0,
torch.ops.quantized_decomposed.dequantize_per_channel.default: 2,
}
act_affine_quant_obs = observer.PlaceholderObserver.with_args(
dtype=torch.qint8,
qscheme=torch.per_tensor_affine,
quant_min=-128,
quant_max=127,
eps=2**-12,
is_dynamic=True,
)
qconfig = QConfig(
activation=act_affine_quant_obs,
weight=per_channel_weight_observer_range_neg_127_to_127,
)
qconfig_mapping = QConfigMapping().set_global(qconfig)
# Test with 2d inputs
example_inputs_2d = (torch.randn(9, 8),)
example_inputs_4d = (torch.randn(9, 10, 11, 8),)
for example_inputs in [example_inputs_2d, example_inputs_4d]:
self._test_quantizer(
m_eager,
example_inputs,
quantizer,
node_occurrence,
[],
True,
qconfig_mapping,
)
def test_dynamic_linear_int4_weight(self):
quantizer = XNNPACKQuantizer()
quantization_config = get_symmetric_quantization_config(
is_per_channel=True,
is_dynamic=True,
weight_qmin=0,
weight_qmax=15,
)
quantizer.set_global(quantization_config)
m_eager = TestHelperModules.TwoLinearModule().eval()
node_occurrence = {
# input and output are using quantize_per_tensor and weight is using quantize_per_channel
torch.ops.quantized_decomposed.quantize_per_tensor.tensor: 2,
torch.ops.quantized_decomposed.dequantize_per_tensor.tensor: 2,
# note: quantize op for weights are const propagated
torch.ops.quantized_decomposed.quantize_per_channel.default: 0,
torch.ops.quantized_decomposed.dequantize_per_channel.default: 2,
}
act_affine_quant_obs = observer.PlaceholderObserver.with_args(
dtype=torch.qint8,
qscheme=torch.per_tensor_affine,
quant_min=-128,
quant_max=127,
eps=2**-12,
is_dynamic=True,
)
qconfig = QConfig(
activation=act_affine_quant_obs,
weight=per_channel_weight_observer_range_neg_127_to_127.with_args(
quant_min=0, quant_max=15
),
)
qconfig_mapping = QConfigMapping().set_global(qconfig)
# Test with 2d inputs
example_inputs_2d = (torch.randn(9, 8),)
example_inputs_4d = (torch.randn(9, 10, 11, 8),)
for example_inputs in [example_inputs_2d, example_inputs_4d]:
self._test_quantizer(
m_eager,
example_inputs,
quantizer,
node_occurrence,
[],
True,
qconfig_mapping,
)
def test_qat_dynamic_linear(self):
quantizer = XNNPACKQuantizer()
quantization_config = get_symmetric_quantization_config(
is_per_channel=True,
is_dynamic=True,
is_qat=True,
)
quantizer.set_global(quantization_config)
m_eager = TestHelperModules.TwoLinearModule().eval()
node_occurrence = {
torch.ops.quantized_decomposed.choose_qparams.tensor: 2,
# input and output are using quantize_per_tensor and weight is using quantize_per_channel
torch.ops.quantized_decomposed.quantize_per_tensor.tensor: 2,
torch.ops.quantized_decomposed.dequantize_per_tensor.tensor: 2,
# note: quantize op for weights are const propagated
torch.ops.quantized_decomposed.quantize_per_channel.default: 0,
torch.ops.quantized_decomposed.dequantize_per_channel.default: 2,
}
act_affine_quant_obs = default_dynamic_fake_quant
qconfig = QConfig(
activation=act_affine_quant_obs,
weight=per_channel_weight_observer_range_neg_127_to_127,
)
qconfig_mapping = QConfigMapping().set_global(qconfig)
# Test with 2d inputs
example_inputs_2d = (torch.randn(9, 8),)
example_inputs_4d = (torch.randn(9, 10, 11, 8),)
for example_inputs in [example_inputs_2d, example_inputs_4d]:
self._test_quantizer(
m_eager,
example_inputs,
quantizer,
node_occurrence,
[],
True,
qconfig_mapping,
is_qat=True,
)
def test_dynamic_linear_with_conv(self):
quantizer = XNNPACKQuantizer()
quantization_config = get_symmetric_quantization_config(
is_per_channel=False, is_dynamic=True
)
quantizer.set_global(quantization_config)
m_eager = TestHelperModules.ConvLinearWPermute().eval()
node_occurrence = {
# input and output are using quantize_per_tensor and weight is using quantize_per_channel
torch.ops.quantized_decomposed.quantize_per_tensor.tensor: 1,
torch.ops.quantized_decomposed.dequantize_per_tensor.tensor: 1,
# note: quantize op for weights are const propagated
torch.ops.quantized_decomposed.quantize_per_tensor.default: 0,
torch.ops.quantized_decomposed.dequantize_per_tensor.default: 1,
}
training_ir_node_occurrence = {
# input and output are using quantize_per_tensor and weight is using quantize_per_channel
# In training IR, the decomposition is different.
# `torch.ops.quantized_decomposed.quantize_per_tensor.default` nodes becomes
# `torch.ops.quantized_decomposed.quantize_per_tensor.tensor` nodes.
torch.ops.quantized_decomposed.quantize_per_tensor.tensor: 2,
torch.ops.quantized_decomposed.dequantize_per_tensor.tensor: 2,
# note: quantize op for weights are const propagated
torch.ops.quantized_decomposed.quantize_per_tensor.default: 0,
torch.ops.quantized_decomposed.dequantize_per_tensor.default: 0,
}
act_affine_quant_obs = observer.PlaceholderObserver.with_args(
dtype=torch.qint8,
qscheme=torch.per_tensor_affine,
quant_min=-128,
quant_max=127,
eps=2**-12,
is_dynamic=True,
)
qconfig = QConfig(
activation=act_affine_quant_obs,
weight=weight_observer_range_neg_127_to_127,
)
# Test with 2d inputs
example_inputs = (torch.randn(2, 3, 4, 4),)
qconfig_mapping = QConfigMapping().set_global(qconfig)
self._test_quantizer(
m_eager,
example_inputs,
quantizer,
node_occurrence,
[],
True,
qconfig_mapping,
training_ir_node_occurrence=training_ir_node_occurrence,
)
def test_gru(self):
"""this is a test for annotating fp32 GRU so that it produces
q -> dq -> fp32_gru -> q -> dq, this is currently enough for our use cases,
but we may change the annotation to be more precise in the future
"""
class RNNDynamicModel(torch.nn.Module):
def __init__(self, mod_type):
super().__init__()
self.qconfig = default_dynamic_qconfig
if mod_type == "GRU":
self.mod = torch.nn.GRU(2, 2).to(dtype=torch.float)
if mod_type == "LSTM":
self.mod = torch.nn.LSTM(2, 2).to(dtype=torch.float)
def forward(self, input_tensor, hidden_tensor):
input_tensor = 1 * input_tensor
hidden_tensor = 1 * hidden_tensor
output_tensor, hidden_out = self.mod(input_tensor, hidden_tensor)
return 1 * output_tensor, 1 * hidden_out
with override_quantized_engine("qnnpack"):
model_fx = RNNDynamicModel("GRU")
niter = 10
example_inputs = (
# input_tensor
torch.tensor([[100, -155], [-155, 100], [100, -155]], dtype=torch.float)
.unsqueeze(0)
.repeat(niter, 1, 1),
# hidden_tensor
# (D * num_layers, N, H_out)
torch.tensor([[[100, -155]]], dtype=torch.float).repeat(1, 3, 1),
)
model_graph = copy.deepcopy(model_fx)
qconfig_mapping = QConfigMapping().set_object_type(
operator.mul, default_symmetric_qnnpack_qconfig
)
model_fx = prepare_fx(
model_fx,
qconfig_mapping,
example_inputs,
backend_config=get_qnnpack_backend_config(),
)
model_fx(*example_inputs)
model_fx = _convert_to_reference_decomposed_fx(model_fx)
with torchdynamo.config.patch(allow_rnn=True):
model_graph = export(model_graph, example_inputs, strict=True).module()
quantizer = XNNPACKQuantizer()
quantization_config = get_symmetric_quantization_config(
is_per_channel=False, is_dynamic=False
)
quantizer.set_global(quantization_config)
model_graph = prepare_pt2e(model_graph, quantizer)
model_graph(*example_inputs)
model_graph = convert_pt2e(model_graph)
self.assertEqual(model_fx(*example_inputs), model_graph(*example_inputs))
def test_linear_gru(self):
"""this test is to make sure GRU annotation does not interfere with linear annotation"""
class RNNDynamicModel(torch.nn.Module):
def __init__(self, mod_type):
super().__init__()
self.qconfig = default_dynamic_qconfig
self.linear = torch.nn.Linear(2, 2)
if mod_type == "GRU":
self.mod = torch.nn.GRU(2, 2).to(dtype=torch.float)
if mod_type == "LSTM":
self.mod = torch.nn.LSTM(2, 2).to(dtype=torch.float)
def forward(self, input_tensor, hidden_tensor):
input_tensor = self.linear(input_tensor)
input_tensor = 1 * input_tensor
hidden_tensor = 1 * hidden_tensor
output_tensor, hidden_out = self.mod(input_tensor, hidden_tensor)
return 1 * output_tensor, 1 * hidden_out
with override_quantized_engine("qnnpack"):
model_fx = RNNDynamicModel("GRU")
niter = 10
example_inputs = (
# input_tensor
torch.tensor([[100, -155], [-155, 100], [100, -155]], dtype=torch.float)
.unsqueeze(0)
.repeat(niter, 1, 1),
# hidden_tensor
# (D * num_layers, N, H_out)
torch.tensor([[[100, -155]]], dtype=torch.float).repeat(1, 3, 1),
)
model_graph = copy.deepcopy(model_fx)
qconfig_mapping = (
QConfigMapping()
.set_object_type(operator.mul, default_symmetric_qnnpack_qconfig)
.set_object_type(torch.nn.Linear, default_symmetric_qnnpack_qconfig)
)
model_fx = prepare_fx(
model_fx,
qconfig_mapping,
example_inputs,
backend_config=get_qnnpack_backend_config(),
)
model_fx(*example_inputs)
model_fx = _convert_to_reference_decomposed_fx(model_fx)
with torchdynamo.config.patch(allow_rnn=True):
model_graph = export(model_graph, example_inputs, strict=True).module()
quantizer = XNNPACKQuantizer()
quantization_config = get_symmetric_quantization_config(
is_per_channel=False, is_dynamic=False
)
quantizer.set_global(quantization_config)
model_graph = prepare_pt2e(model_graph, quantizer)
model_graph(*example_inputs)
model_graph = convert_pt2e(model_graph)
self.assertEqual(model_fx(*example_inputs), model_graph(*example_inputs))
def test_add_and_inplace_add(self):
quantizer = XNNPACKQuantizer()
quantization_config = get_symmetric_quantization_config(is_per_channel=True)
quantizer.set_global(quantization_config)
example_inputs = (
torch.randn(1, 3, 5, 5),
torch.randn(1, 3, 5, 5),
)
node_occurrence = {
# two input and one output for first add, and output for second add
torch.ops.quantized_decomposed.quantize_per_tensor.default: 4,
torch.ops.quantized_decomposed.dequantize_per_tensor.default: 5,
}
node_list = [
torch.ops.quantized_decomposed.dequantize_per_tensor.default,
torch.ops.quantized_decomposed.dequantize_per_tensor.default,
torch.ops.aten.add.Tensor,
torch.ops.quantized_decomposed.quantize_per_tensor.default,
torch.ops.quantized_decomposed.dequantize_per_tensor.default,
# TODO torch.ops.aten.add.Tensor,
torch.ops.quantized_decomposed.quantize_per_tensor.default,
]
self._test_quantizer(
TestHelperModules.AddInplaceAdd(),
example_inputs,
quantizer,
node_occurrence,
node_list,
)
def test_mul_and_inplace_mul(self):
quantizer = XNNPACKQuantizer()
quantization_config = get_symmetric_quantization_config(is_per_channel=True)
quantizer.set_global(quantization_config)
example_inputs = (
torch.randn(1, 3, 5, 5),
torch.randn(1, 3, 5, 5),
)
node_occurrence = {
# two input and one output for first add, and output for second add
torch.ops.quantized_decomposed.quantize_per_tensor.default: 4,
torch.ops.quantized_decomposed.dequantize_per_tensor.default: 5,
}
node_list = [
torch.ops.quantized_decomposed.dequantize_per_tensor.default,
torch.ops.quantized_decomposed.dequantize_per_tensor.default,
torch.ops.aten.mul.Tensor,
torch.ops.quantized_decomposed.quantize_per_tensor.default,
torch.ops.quantized_decomposed.dequantize_per_tensor.default,
# TODO torch.ops.aten.mul.Tensor,
torch.ops.quantized_decomposed.quantize_per_tensor.default,
]
self._test_quantizer(
TestHelperModules.MulInplaceMul(),
example_inputs,
quantizer,
node_occurrence,
node_list,
)
def test_add_mul_scalar(self):
quantizer = XNNPACKQuantizer()
quantization_config = get_symmetric_quantization_config(is_per_channel=True)
quantizer.set_global(quantization_config)
example_inputs = (torch.randn(1, 3, 5, 5),)
node_occurrence = {
# two input and one output for first add, and output for second add
torch.ops.quantized_decomposed.quantize_per_tensor.default: 5,
# TODO torch.ops.quantized_decomposed.dequantize_per_tensor.default: 9,
}
node_list = [
torch.ops.quantized_decomposed.dequantize_per_tensor.default,
torch.ops.quantized_decomposed.dequantize_per_tensor.default,
torch.ops.aten.add.Tensor,
torch.ops.quantized_decomposed.quantize_per_tensor.default,
torch.ops.quantized_decomposed.dequantize_per_tensor.default,
torch.ops.aten.mul.Tensor,
torch.ops.quantized_decomposed.quantize_per_tensor.default,
torch.ops.quantized_decomposed.dequantize_per_tensor.default,
# TODO torch.ops.aten.add.Tensor,
torch.ops.quantized_decomposed.quantize_per_tensor.default,
torch.ops.quantized_decomposed.dequantize_per_tensor.default,
# TODO torch.ops.aten.mul.Tensor,
torch.ops.quantized_decomposed.quantize_per_tensor.default,
]
self._test_quantizer(
TestHelperModules.AddMulScalar(),
example_inputs,
quantizer,
node_occurrence,
node_list,
)
def test_mul_float32_max(self):
class M(torch.nn.Module):
def forward(self, x):
return x * 3.4028235e38
quantizer = XNNPACKQuantizer()
quantization_config = get_symmetric_quantization_config(is_per_channel=True)
quantizer.set_global(quantization_config)
example_inputs = (torch.randn(1, 3, 5, 5),)
# not quantized
node_occurrence = {
torch.ops.quantized_decomposed.quantize_per_tensor.default: 0,
torch.ops.quantized_decomposed.dequantize_per_tensor.default: 0,
}
node_list = [
torch.ops.aten.mul.Tensor,
]
self._test_quantizer(
M(),
example_inputs,
quantizer,
node_occurrence,
node_list,
)
def test_add_mul_long(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.t = torch.tensor([100])
def forward(self, x):
x = x + self.t
x = x * self.t
return x
quantizer = XNNPACKQuantizer()
quantization_config = get_symmetric_quantization_config(is_per_channel=True)
quantizer.set_global(quantization_config)
example_inputs = (torch.randn(1, 3, 5, 5),)
# not quantized
node_occurrence = {
torch.ops.quantized_decomposed.quantize_per_tensor.default: 0,
torch.ops.quantized_decomposed.dequantize_per_tensor.default: 0,
}
node_list = [
torch.ops.aten.add.Tensor,
torch.ops.aten.mul.Tensor,
]
self._test_quantizer(
M(),
example_inputs,
quantizer,
node_occurrence,
node_list,
)
def test_cat_same_node(self):
"""Ensure that concatenating the same node does not cause any unexpected behavior"""
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
x = torch.cat([x, x])
return x
quantizer = XNNPACKQuantizer()
quantization_config = get_symmetric_quantization_config(is_per_channel=True)
quantizer.set_global(quantization_config)
example_inputs = (torch.randn(1, 3, 5, 5),)
node_occurrence = {
torch.ops.quantized_decomposed.quantize_per_tensor.default: 2,
torch.ops.quantized_decomposed.dequantize_per_tensor.default: 2,
}
node_list = [
torch.ops.quantized_decomposed.quantize_per_tensor.default,
torch.ops.quantized_decomposed.dequantize_per_tensor.default,
torch.ops.aten.cat.default,
torch.ops.quantized_decomposed.quantize_per_tensor.default,
torch.ops.quantized_decomposed.dequantize_per_tensor.default,
]
self._test_quantizer(
M(),
example_inputs,
quantizer,
node_occurrence,
node_list,
)
# TODO: express this using self._test_quantizer, add test for inception_v4
| TestXNNPACKQuantizer |
python | pytorch__pytorch | torch/_inductor/exc.py | {
"start": 658,
"end": 1030
} | class ____(RuntimeError):
@staticmethod
def operator_str(target: Any, args: list[Any], kwargs: dict[str, Any]) -> str:
lines = [f"target: {target}"] + [
f"args[{i}]: {arg}" for i, arg in enumerate(args)
]
if kwargs:
lines.append(f"kwargs: {kwargs}")
return textwrap.indent("\n".join(lines), " ")
| OperatorIssue |
python | scipy__scipy | scipy/linalg/tests/test_decomp_update.py | {
"start": 24896,
"end": 24959
} | class ____(BaseQRdelete):
dtype = np.dtype('f')
| TestQRdelete_f |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0060_make_rank_not_null.py | {
"start": 179,
"end": 767
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0059_migrate_null_rank"),
]
operations = [
migrations.AlterField(
model_name="importedfile",
name="rank",
field=models.IntegerField(
default=0,
validators=[
django.core.validators.MinValueValidator(-10),
django.core.validators.MaxValueValidator(10),
],
verbose_name="Page search rank",
),
),
]
| Migration |
python | lxml__lxml | src/lxml/tests/test_etree.py | {
"start": 184808,
"end": 197325
} | class ____(HelperTestCase):
def test_c14n(self):
tree = self.parse(b'<a><b/></a>')
f = BytesIO()
tree.write_c14n(f)
s = f.getvalue()
self.assertEqual(b'<a><b></b></a>',
s)
def test_c14n_gzip(self):
tree = self.parse(b'<a>'+b'<b/>'*200+b'</a>')
f = BytesIO()
tree.write_c14n(f, compression=9)
with gzip.GzipFile(fileobj=BytesIO(f.getvalue())) as gzfile:
s = gzfile.read()
self.assertEqual(b'<a>'+b'<b></b>'*200+b'</a>',
s)
def test_c14n_file(self):
tree = self.parse(b'<a><b/></a>')
with tmpfile() as filename:
tree.write_c14n(filename)
data = read_file(filename, 'rb')
self.assertEqual(b'<a><b></b></a>',
data)
def test_c14n_file_pathlike(self):
tree = self.parse(b'<a><b/></a>')
with tmpfile() as filename:
tree.write_c14n(SimpleFSPath(filename))
data = read_file(filename, 'rb')
self.assertEqual(b'<a><b></b></a>',
data)
def test_c14n_file_gzip(self):
tree = self.parse(b'<a>'+b'<b/>'*200+b'</a>')
with tmpfile() as filename:
tree.write_c14n(filename, compression=9)
with gzip.open(filename, 'rb') as f:
data = f.read()
self.assertEqual(b'<a>'+b'<b></b>'*200+b'</a>',
data)
def test_c14n_file_gzip_pathlike(self):
tree = self.parse(b'<a>'+b'<b/>'*200+b'</a>')
with tmpfile() as filename:
tree.write_c14n(SimpleFSPath(filename), compression=9)
with gzip.open(filename, 'rb') as f:
data = f.read()
self.assertEqual(b'<a>'+b'<b></b>'*200+b'</a>',
data)
def test_c14n2_file_gzip(self):
tree = self.parse(b'<a>'+b'<b/>'*200+b'</a>')
with tmpfile() as filename:
tree.write(filename, method='c14n2', compression=9)
with gzip.open(filename, 'rb') as f:
data = f.read()
self.assertEqual(b'<a>'+b'<b></b>'*200+b'</a>',
data)
def test_c14n2_with_text(self):
tree = self.parse(
b'<?xml version="1.0"?> <a> abc \n <b> btext </b> btail <c/> ctail </a> ')
f = BytesIO()
tree.write(f, method='c14n2')
s = f.getvalue()
self.assertEqual(b'<a> abc \n <b> btext </b> btail <c></c> ctail </a>',
s)
f = BytesIO()
tree.write(f, method='c14n2', strip_text=True)
s = f.getvalue()
self.assertEqual(b'<a>abc<b>btext</b>btail<c></c>ctail</a>',
s)
def test_c14n_with_comments(self):
tree = self.parse(b'<!--hi--><a><!--ho--><b/></a><!--hu-->')
f = BytesIO()
tree.write_c14n(f)
s = f.getvalue()
self.assertEqual(b'<!--hi-->\n<a><!--ho--><b></b></a>\n<!--hu-->',
s)
f = BytesIO()
tree.write_c14n(f, with_comments=True)
s = f.getvalue()
self.assertEqual(b'<!--hi-->\n<a><!--ho--><b></b></a>\n<!--hu-->',
s)
f = BytesIO()
tree.write_c14n(f, with_comments=False)
s = f.getvalue()
self.assertEqual(b'<a><b></b></a>',
s)
def test_c14n2_with_comments(self):
tree = self.parse(b'<!--hi--> <a> <!-- ho --> <b/> </a> <!-- hu -->')
self.assertEqual(
b'<!--hi-->\n<a> <!-- ho --> <b></b> </a>\n<!-- hu -->',
etree.tostring(tree, method='c14n2'))
self.assertEqual(
b'<!--hi-->\n<a> <!-- ho --> <b></b> </a>\n<!-- hu -->',
etree.tostring(tree, method='c14n2', with_comments=True))
self.assertEqual(
b'<a> <b></b> </a>',
etree.tostring(tree, method='c14n2', with_comments=False))
def test_c14n2_with_comments_strip_text(self):
tree = self.parse(b'<!--hi--> <a> <!-- ho --> <b/> </a> <!-- hu -->')
self.assertEqual(
b'<!--hi-->\n<a><!-- ho --><b></b></a>\n<!-- hu -->',
etree.tostring(tree, method='c14n2', with_comments=True, strip_text=True))
self.assertEqual(
b'<a><b></b></a>',
etree.tostring(tree, method='c14n2', with_comments=False, strip_text=True))
def test_c14n_tostring_with_comments(self):
tree = self.parse(b'<!--hi--><a><!--ho--><b/></a><!--hu-->')
s = etree.tostring(tree, method='c14n')
self.assertEqual(b'<!--hi-->\n<a><!--ho--><b></b></a>\n<!--hu-->',
s)
s = etree.tostring(tree, method='c14n', with_comments=True)
self.assertEqual(b'<!--hi-->\n<a><!--ho--><b></b></a>\n<!--hu-->',
s)
s = etree.tostring(tree, method='c14n', with_comments=False)
self.assertEqual(b'<a><b></b></a>',
s)
def test_c14n2_tostring_with_comments(self):
tree = self.parse(b'<!--hi--><a><!--ho--><b/></a><!--hu-->')
s = etree.tostring(tree, method='c14n2')
self.assertEqual(b'<!--hi-->\n<a><!--ho--><b></b></a>\n<!--hu-->',
s)
s = etree.tostring(tree, method='c14n2', with_comments=True)
self.assertEqual(b'<!--hi-->\n<a><!--ho--><b></b></a>\n<!--hu-->',
s)
s = etree.tostring(tree, method='c14n2', with_comments=False)
self.assertEqual(b'<a><b></b></a>',
s)
def test_c14n_element_tostring_with_comments(self):
tree = self.parse(b'<!--hi--><a><!--ho--><b/></a><!--hu-->')
s = etree.tostring(tree.getroot(), method='c14n')
self.assertEqual(b'<a><!--ho--><b></b></a>',
s)
s = etree.tostring(tree.getroot(), method='c14n', with_comments=True)
self.assertEqual(b'<a><!--ho--><b></b></a>',
s)
s = etree.tostring(tree.getroot(), method='c14n', with_comments=False)
self.assertEqual(b'<a><b></b></a>',
s)
def test_c14n_exclusive(self):
tree = self.parse(_bytes(
'<a xmlns="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"><z:b/></a>'))
f = BytesIO()
tree.write_c14n(f)
s = f.getvalue()
self.assertEqual(b'<a xmlns="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"><z:b></z:b></a>',
s)
f = BytesIO()
tree.write_c14n(f, exclusive=False)
s = f.getvalue()
self.assertEqual(b'<a xmlns="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"><z:b></z:b></a>',
s)
f = BytesIO()
tree.write_c14n(f, exclusive=True)
s = f.getvalue()
self.assertEqual(b'<a xmlns="http://abc"><z:b xmlns:z="http://cde"></z:b></a>',
s)
f = BytesIO()
tree.write_c14n(f, exclusive=True, inclusive_ns_prefixes=['z'])
s = f.getvalue()
self.assertEqual(b'<a xmlns="http://abc" xmlns:z="http://cde"><z:b></z:b></a>',
s)
def test_c14n_tostring_exclusive(self):
tree = self.parse(_bytes(
'<a xmlns="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"><z:b/></a>'))
s = etree.tostring(tree, method='c14n')
self.assertEqual(b'<a xmlns="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"><z:b></z:b></a>',
s)
s = etree.tostring(tree, method='c14n', exclusive=False)
self.assertEqual(b'<a xmlns="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"><z:b></z:b></a>',
s)
s = etree.tostring(tree, method='c14n', exclusive=True)
self.assertEqual(b'<a xmlns="http://abc"><z:b xmlns:z="http://cde"></z:b></a>',
s)
s = etree.tostring(tree, method='c14n', exclusive=True, inclusive_ns_prefixes=['y'])
self.assertEqual(b'<a xmlns="http://abc" xmlns:y="http://bcd"><z:b xmlns:z="http://cde"></z:b></a>',
s)
def test_c14n_element_tostring_exclusive(self):
tree = self.parse(_bytes(
'<a xmlns="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"><z:b/></a>'))
s = etree.tostring(tree.getroot(), method='c14n')
self.assertEqual(b'<a xmlns="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"><z:b></z:b></a>',
s)
s = etree.tostring(tree.getroot(), method='c14n', exclusive=False)
self.assertEqual(b'<a xmlns="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"><z:b></z:b></a>',
s)
s = etree.tostring(tree.getroot(), method='c14n', exclusive=True)
self.assertEqual(b'<a xmlns="http://abc"><z:b xmlns:z="http://cde"></z:b></a>',
s)
s = etree.tostring(tree.getroot()[0], method='c14n', exclusive=False)
self.assertEqual(b'<z:b xmlns="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"></z:b>',
s)
s = etree.tostring(tree.getroot()[0], method='c14n', exclusive=True)
self.assertEqual(b'<z:b xmlns:z="http://cde"></z:b>',
s)
s = etree.tostring(tree.getroot()[0], method='c14n', exclusive=True, inclusive_ns_prefixes=['y'])
self.assertEqual(b'<z:b xmlns:y="http://bcd" xmlns:z="http://cde"></z:b>',
s)
def test_c14n_tostring_inclusive_ns_prefixes(self):
""" Regression test to fix memory allocation issues (use 3+ inclusive NS spaces)"""
tree = self.parse(_bytes(
'<a xmlns:x="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"><z:b/></a>'))
s = etree.tostring(tree, method='c14n', exclusive=True, inclusive_ns_prefixes=['x', 'y', 'z'])
self.assertEqual(b'<a xmlns:x="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"><z:b></z:b></a>',
s)
def test_python3_problem_bytesio_iterparse(self):
content = BytesIO(b'''<?xml version="1.0" encoding="utf-8"?> <some_ns_id:some_head_elem xmlns:some_ns_id="http://www.example.com" xmlns:xhtml="http://www.w3.org/1999/xhtml"><xhtml:div></xhtml:div></some_ns_id:some_head_elem>''')
def handle_div_end(event, element):
if event == 'end' and element.tag.lower().startswith("{http://www.w3.org/1999/xhtml}div"):
# for ns_id, ns_uri in element.nsmap.items():
# print(type(ns_id), type(ns_uri), ns_id, '=', ns_uri)
etree.tostring(element, method="c14n2")
for event, element in etree.iterparse(
source=content,
events=('start', 'end')
):
handle_div_end(event, element)
def test_python3_problem_filebased_iterparse(self):
with open('test.xml', 'w+b') as f:
f.write(b'''<?xml version="1.0" encoding="utf-8"?> <some_ns_id:some_head_elem xmlns:some_ns_id="http://www.example.com" xmlns:xhtml="http://www.w3.org/1999/xhtml"><xhtml:div></xhtml:div></some_ns_id:some_head_elem>''')
def handle_div_end(event, element):
if event == 'end' and element.tag.lower() == "{http://www.w3.org/1999/xhtml}div":
# for ns_id, ns_uri in element.nsmap.items():
# print(type(ns_id), type(ns_uri), ns_id, '=', ns_uri)
etree.tostring(element, method="c14n2")
for event, element in etree.iterparse(
source='test.xml',
events=('start', 'end')
):
handle_div_end(event, element)
def test_python3_problem_filebased_parse(self):
with open('test.xml', 'w+b') as f:
f.write(b'''<?xml version="1.0" encoding="utf-8"?> <some_ns_id:some_head_elem xmlns:some_ns_id="http://www.example.com" xmlns:xhtml="http://www.w3.org/1999/xhtml"><xhtml:div></xhtml:div></some_ns_id:some_head_elem>''')
def serialize_div_element(element):
# for ns_id, ns_uri in element.nsmap.items():
# print(type(ns_id), type(ns_uri), ns_id, '=', ns_uri)
etree.tostring(element, method="c14n2")
tree = etree.parse(source='test.xml')
root = tree.getroot()
div = root.xpath('//xhtml:div', namespaces={'xhtml':'http://www.w3.org/1999/xhtml'})[0]
serialize_div_element(div)
| ETreeC14NTestCase |
python | graphql-python__graphene | graphene/types/tests/test_scalar.py | {
"start": 1198,
"end": 1512
} | class ____(ObjectType):
int = Int(input=Int(), resolver=return_input)
big_int = BigInt(input=BigInt(), resolver=return_input)
float = Float(input=Float(), resolver=return_input)
bool = Boolean(input=Boolean(), resolver=return_input)
string = String(input=String(), resolver=return_input)
| Optional |
python | openai__openai-python | src/openai/types/responses/response_computer_tool_call_param.py | {
"start": 2970,
"end": 3454
} | class ____(TypedDict, total=False):
scroll_x: Required[int]
"""The horizontal scroll distance."""
scroll_y: Required[int]
"""The vertical scroll distance."""
type: Required[Literal["scroll"]]
"""Specifies the event type.
For a scroll action, this property is always set to `scroll`.
"""
x: Required[int]
"""The x-coordinate where the scroll occurred."""
y: Required[int]
"""The y-coordinate where the scroll occurred."""
| ActionScroll |
python | huggingface__transformers | tests/models/llava/test_processing_llava.py | {
"start": 822,
"end": 4912
} | class ____(ProcessorTesterMixin, unittest.TestCase):
processor_class = LlavaProcessor
@classmethod
def _setup_image_processor(cls):
image_processor_class = cls._get_component_class_from_processor("image_processor")
return image_processor_class(do_center_crop=False)
@classmethod
def _setup_tokenizer(cls):
tokenizer_class = cls._get_component_class_from_processor("tokenizer")
tokenizer = tokenizer_class.from_pretrained("huggyllama/llama-7b")
tokenizer.add_special_tokens({"additional_special_tokens": ["<image>"]})
if not tokenizer.pad_token:
tokenizer.pad_token = "[PAD]"
if tokenizer.pad_token_id is None:
tokenizer.pad_token_id = 0
return tokenizer
@classmethod
def _setup_test_attributes(cls, processor):
cls.image_token = processor.image_token
@staticmethod
def prepare_processor_dict():
return {
"chat_template": "{% for message in messages %}{% if message['role'] != 'system' %}{{ message['role'].upper() + ': '}}{% endif %}{# Render all images first #}{% for content in message['content'] | selectattr('type', 'equalto', 'image') %}{{ '<image>\n' }}{% endfor %}{# Render all text next #}{% if message['role'] != 'assistant' %}{% for content in message['content'] | selectattr('type', 'equalto', 'text') %}{{ content['text'] + ' '}}{% endfor %}{% else %}{% for content in message['content'] | selectattr('type', 'equalto', 'text') %}{% generation %}{{ content['text'] + ' '}}{% endgeneration %}{% endfor %}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'ASSISTANT:' }}{% endif %}",
"patch_size": 128,
"vision_feature_select_strategy": "default"
} # fmt: skip
def test_get_num_vision_tokens(self):
"Tests general functionality of the helper used internally in vLLM"
processor = self.get_processor()
output = processor._get_num_multimodal_tokens(image_sizes=[(100, 100), (300, 100), (500, 30)])
self.assertTrue("num_image_tokens" in output)
self.assertEqual(len(output["num_image_tokens"]), 3)
self.assertTrue("num_image_patches" in output)
self.assertEqual(len(output["num_image_patches"]), 3)
def test_chat_template_is_saved(self):
processor_loaded = self.processor_class.from_pretrained(self.tmpdirname)
processor_dict_loaded = json.loads(processor_loaded.to_json_string())
# chat templates aren't serialized to json in processors
self.assertFalse("chat_template" in processor_dict_loaded)
# they have to be saved as separate file and loaded back from that file
# so we check if the same template is loaded
processor_dict = self.prepare_processor_dict()
self.assertTrue(processor_loaded.chat_template == processor_dict.get("chat_template", None))
def test_can_load_various_tokenizers(self):
for checkpoint in ["Intel/llava-gemma-2b", "llava-hf/llava-1.5-7b-hf"]:
processor = LlavaProcessor.from_pretrained(checkpoint)
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
self.assertEqual(processor.tokenizer.__class__, tokenizer.__class__)
def test_special_mm_token_truncation(self):
"""Tests that special vision tokens do not get truncated when `truncation=True` is set."""
processor = LlavaProcessor.from_pretrained("llava-hf/llava-1.5-7b-hf")
input_str = self.prepare_text_inputs(batch_size=2, modalities="image")
image_input = self.prepare_image_inputs(batch_size=2)
_ = processor(
text=input_str,
images=image_input,
return_tensors="pt",
truncation=None,
padding=True,
)
with self.assertRaises(ValueError):
_ = processor(
text=input_str,
images=image_input,
return_tensors="pt",
truncation=True,
padding=True,
max_length=5,
)
| LlavaProcessorTest |
python | getsentry__sentry | src/sentry/releases/endpoints/project_release_stats.py | {
"start": 1395,
"end": 4320
} | class ____(ProjectEndpoint):
publish_status = {
"GET": ApiPublishStatus.UNKNOWN,
}
permission_classes = (ProjectReleasePermission,)
def get(self, request: Request, project, version) -> Response:
"""
Get a Project Release's Stats
`````````````````````````````
Returns the stats of a given release under a project.
:pparam string organization_id_or_slug: the id or slug of the organization the
release belongs to.
:pparam string project_id_or_slug: the id or slug of the project to list the
release files of.
:pparam string version: the version identifier of the release.
:auth: required
"""
stats_type = request.GET.get("type") or "sessions"
if not is_overview_stat(stats_type):
return Response({"detail": "invalid stat"}, status=400)
try:
params = self.get_filter_params(request, project)
rollup = get_rollup_from_request(
request,
params["end"] - params["start"],
default_interval="24h",
error=ProjectEventsError(
"Your interval and date range would create too many results. "
"Use a larger interval, or a smaller date range."
),
)
# The minimum interval is one hour on the server
rollup = max(rollup, 3600)
except ProjectEventsError as e:
return Response({"detail": str(e)}, status=400)
release_date_added = upsert_missing_release(project, version)
if release_date_added is None:
raise ResourceDoesNotExist
stats, totals = release_health.backend.get_project_release_stats(
project_id=params["project_id"][0],
release=version,
stat=stats_type,
rollup=rollup,
start=params["start"],
end=params["end"],
environments=params.get("environment"),
)
users_breakdown = []
for data in release_health.backend.get_crash_free_breakdown(
project_id=params["project_id"][0],
release=version,
environments=params.get("environment"),
start=release_date_added,
):
users_breakdown.append(
{
"date": data["date"],
"totalUsers": data["total_users"],
"crashFreeUsers": data["crash_free_users"],
"totalSessions": data["total_sessions"],
"crashFreeSessions": data["crash_free_sessions"],
}
)
return Response(
serialize({"stats": stats, "statTotals": totals, "usersBreakdown": users_breakdown}),
status=200,
)
| ProjectReleaseStatsEndpoint |
python | google__jax | tests/random_test.py | {
"start": 28322,
"end": 28730
} | class ____(jtu.JaxTestCase):
@parameterized.parameters([{'make_key': ctor} for ctor in [
partial(random.PRNGKey, impl='threefry2x32'),
partial(random.key, impl='threefry2x32')]])
def test_seed_no_implicit_transfers(self, make_key):
# See https://github.com/jax-ml/jax/issues/15613
with jax.transfer_guard('disallow'):
make_key(jax.device_put(42)) # doesn't crash
| ThreefryPrngTest |
python | huggingface__transformers | src/transformers/models/roformer/configuration_roformer.py | {
"start": 785,
"end": 6140
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`RoFormerModel`]. It is used to instantiate an
RoFormer model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the RoFormer
[junnyu/roformer_chinese_base](https://huggingface.co/junnyu/roformer_chinese_base) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50000):
Vocabulary size of the RoFormer model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`RoFormerModel`].
embedding_size (`int`, *optional*, defaults to None):
Dimensionality of the encoder layers and the pooler layer. Defaults to the `hidden_size` if not provided.
hidden_size (`int`, *optional*, defaults to 768):
Dimension of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 1536):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 1536).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`RoFormerModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
is_decoder (`bool`, *optional*, defaults to `False`):
Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
rotary_value (`bool`, *optional*, defaults to `False`):
Whether or not apply rotary position embeddings on value layer.
Example:
```python
>>> from transformers import RoFormerModel, RoFormerConfig
>>> # Initializing a RoFormer junnyu/roformer_chinese_base style configuration
>>> configuration = RoFormerConfig()
>>> # Initializing a model (with random weights) from the junnyu/roformer_chinese_base style configuration
>>> model = RoFormerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "roformer"
def __init__(
self,
vocab_size=50000,
embedding_size=None,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=1536,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
pad_token_id=0,
rotary_value=False,
use_cache=True,
**kwargs,
):
super().__init__(pad_token_id=pad_token_id, **kwargs)
self.vocab_size = vocab_size
self.embedding_size = hidden_size if embedding_size is None else embedding_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.rotary_value = rotary_value
self.use_cache = use_cache
__all__ = ["RoFormerConfig"]
| RoFormerConfig |
python | PyCQA__pylint | tests/functional/a/abstract/abstract_method.py | {
"start": 1773,
"end": 1878
} | class ____(Structure):
__hash__ = 42
# +1: [abstract-method, abstract-method, abstract-method]
| Hashable |
python | realpython__materials | solid-principles-python/app_dip.py | {
"start": 634,
"end": 716
} | class ____(ABC):
@abstractmethod
def get_data(self):
pass
| DataSource |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/taint_in_taint_out.py | {
"start": 1457,
"end": 2689
} | class ____(FieldIsTITO):
pass
def adds_tito_inherited(x: InheritsFromTITO) -> int:
return x.add_tito
def adds_tito_with_indirect_sink(src: FieldIsTITO) -> None:
indirect_sink(src)
def indirect_sink(x: FieldIsTITO) -> None:
_test_sink(x.add_tito)
def issue_with_indirect_sink_tito():
x = _test_source()
adds_tito_with_indirect_sink(x)
def approximate_return_access_paths(x):
return {
"a": x.a,
"b": x.b,
"c": x.c,
"d": x.d,
"e": x.e,
"f": x.f,
"g": x.g,
"h": x.h,
"j": x.j,
"k": x.k,
"l": x.l,
}
async def return_taint(tainted: str, b1: str, b2: str) -> Tuple[str, str, str]:
return tainted, b1, b2
async def test_tuple_tito_indices():
tainted, b1, b2 = await return_taint(_test_source(), "", "")
_test_sink(b2)
def return_taint_in_list(tainted: str, a: str, b: str) -> List[str]:
return [tainted, a, b]
def add_feature(arg):
return arg
def tito_with_feature(arg):
if arg:
return arg
else:
return add_feature(arg)
def test_always_via_feature():
_test_sink(tito_with_feature(_test_source()))
# Test TITO through explicit super.
| InheritsFromTITO |
python | huggingface__transformers | src/transformers/models/idefics2/modeling_idefics2.py | {
"start": 29057,
"end": 31282
} | class ____(Idefics2PreTrainedModel):
config: Idefics2PerceiverConfig
input_modalities = ("image",)
_supports_sdpa = True
_supports_flash_attention_2 = True
_supports_flex_attn = True
def __init__(self, config) -> None:
super().__init__(config)
self.hidden_size = config.hidden_size
self.hidden_act = config.hidden_act
self.n_latents = config.resampler_n_latents
self.depth = config.resampler_depth
self.rms_norm_eps = config.rms_norm_eps
# Create Latents for Perceiver
self.latents = nn.Parameter(torch.ones(self.n_latents, self.hidden_size))
# Create Transformer Blocks
self.layers = nn.ModuleList([Idefics2PerceiverLayer(config, idx) for idx in range(self.depth)])
self.norm = Idefics2RMSNorm(self.hidden_size, eps=self.rms_norm_eps)
@auto_docstring
def forward(
self,
context: torch.Tensor,
attention_mask: torch.Tensor,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
r"""
context (`torch.FloatTensor` of shape `(batch, seq_len, embed_dim)`):
Input to the layer.
"""
# seq embed -> bsz seq embed
latents = self.latents.unsqueeze(0).expand((context.shape[0], *self.latents.size()))
latent_attention_mask = torch.ones(
(attention_mask.size(0), latents.size(1)), dtype=attention_mask.dtype, device=attention_mask.device
)
attention_mask = torch.cat([attention_mask, latent_attention_mask], dim=-1)
attention_mask = (
_prepare_4d_attention_mask(attention_mask, latents.dtype, tgt_len=self.n_latents)
if self.config._attn_implementation != "flash_attention_2"
else attention_mask
)
compressed_context = latents
for perceiver_layer in self.layers:
compressed_context = perceiver_layer(
compressed_context,
context,
attention_mask=attention_mask,
position_ids=None,
**kwargs,
)
compressed_context = self.norm(compressed_context)
return compressed_context
| Idefics2PerceiverResampler |
python | ApeWorX__ape | src/ape/api/projects.py | {
"start": 258,
"end": 1647
} | class ____(BaseInterfaceModel):
"""
An API for obtaining sources.
"""
name: str
"""
The package-name of the dependency.
"""
config_override: dict = Field(default_factory=dict, repr=False)
"""
Set different config than what Ape can deduce.
"""
@property
@abstractmethod
def package_id(self) -> str:
"""
The full name of the package, used for storage.
Example: ``OpenZeppelin/openzeppelin-contracts``.
"""
@property
@abstractmethod
def version_id(self) -> str:
"""
The ID to use as the sub-directory in the download cache.
Most often, this is either a version number or a branch name.
"""
@property
@abstractmethod
def uri(self) -> str:
"""
The URI for the package.
"""
@abstractmethod
def fetch(self, destination: Path):
"""
Fetch the dependency. E.g. for GitHub dependency,
download the files to the destination.
Args:
destination (Path): The destination for the dependency
files.
"""
@field_validator("name", mode="before")
@classmethod
def validate_name(cls, value):
return (value or "").lower().replace("_", "-")
def __hash__(self) -> int:
return hash(f"{self.package_id}@{self.version_id}")
| DependencyAPI |
python | pytorch__pytorch | torch/onnx/_internal/fx/passes/type_promotion.py | {
"start": 11193,
"end": 12361
} | class ____(ReductionTypePromotionRule):
"""Reference type promotion rule from torch.ops.aten.all or torch.ops.aten.any.
This is a special case where computation dtype is always torch.bool.
The result dtype is always uint8 if `dtype` kwarg is uint8, otherwise torch.bool.
"""
def __init__(self, op_name: str) -> None:
super().__init__(
"aten",
op_name,
_prims_common.REDUCTION_OUTPUT_TYPE_KIND.ALWAYS_BOOL,
)
def preview_type_promotion(
self, args: tuple, kwargs: dict
) -> TypePromotionSnapshot:
assert len(args) >= 1, (
f"Reduction op torch.ops.{self.namespace}.{self.op_name} expects at least one argument"
)
arg = args[0]
assert isinstance(arg, torch.Tensor), f"{type(arg)=} is not torch.Tensor"
computation_dtype = torch.bool
# Preserves uint8 -- probably a legacy mask thing
result_dtype = torch.uint8 if arg.dtype == torch.uint8 else torch.bool
return TypePromotionSnapshot(
{0: computation_dtype},
{},
result_dtype,
)
| AllOrAnyReductionTypePromotionRule |
python | altair-viz__altair | altair/utils/schemapi.py | {
"start": 36861,
"end": 37831
} | class ____(Generic[_JSON_VT_co], Protocol): # type: ignore[misc]
"""
Represents ``altair`` classes which *may* not derive ``SchemaBase``.
Attributes
----------
_schema
A single item JSON Schema using the `type`_ keyword.
Notes
-----
Should be kept tightly defined to the **minimum** requirements for:
- Converting into a form that can be validated by `jsonschema`_.
- Avoiding calling ``.to_dict()`` on a class external to ``altair``.
- ``_schema`` is more accurately described as a ``ClassVar``
- See `discussion`_ for blocking issue.
.. _jsonschema:
https://github.com/python-jsonschema/jsonschema
.. _type:
https://json-schema.org/understanding-json-schema/reference/type
.. _discussion:
https://github.com/python/typing/discussions/1424
"""
_schema: _TypeMap[_JSON_VT_co]
def to_dict(self, *args, **kwds) -> Any: ...
@runtime_checkable
| SchemaLike |
python | django__django | tests/gis_tests/test_data.py | {
"start": 895,
"end": 1204
} | class ____(TestObj):
"""
Object for testing GDAL data sources.
"""
def __init__(self, name, *, ext="shp", **kwargs):
# Shapefile is default extension, unless specified otherwise.
self.name = name
self.ds = get_ds_file(name, ext)
super().__init__(**kwargs)
| TestDS |
python | facelessuser__pymdown-extensions | pymdownx/emoji.py | {
"start": 11686,
"end": 13990
} | class ____(Extension):
"""Add emoji extension to Markdown class."""
def __init__(self, *args, **kwargs):
"""Initialize."""
self.config = {
'emoji_index': [
emojione,
"Function that returns the desired emoji index. - Default: 'pymdownx.emoji.emojione'"
],
'emoji_generator': [
to_png,
"Emoji generator method. - Default: pymdownx.emoji.to_png"
],
'title': [
'short',
"What title to use on images. You can use 'long' which shows the long name, "
"'short' which shows the shortname (:short:), or 'none' which shows no title. "
"- Default: 'short'"
],
'alt': [
'unicode',
"Control alt form. 'short' sets alt to the shortname (:short:), 'uniocde' sets "
"alt to the raw Unicode value, and 'html_entity' sets alt to the HTML entity. "
"- Default: 'unicode'"
],
'remove_variation_selector': [
False,
"Remove variation selector 16 from unicode. - Default: False"
],
'strict': [
False,
"When enabled, if an emoji with a missing name is detected, an exception will be raised."
],
'options': [
{},
"Emoji options see documentation for options for github and emojione."
]
}
super().__init__(*args, **kwargs)
def reset(self):
"""Reset."""
self.strict_cache.clear()
def extendMarkdown(self, md):
"""Add support for emoji."""
md.registerExtension(self)
config = self.getConfigs()
util.escape_chars(md, [':'])
self.strict_cache = set()
md.inlinePatterns.register(EmojiPattern(RE_EMOJI, config, self.strict_cache, md), "emoji", 75)
if config['strict']:
md.postprocessors.register(EmojiAlertPostprocessor(self.strict_cache, md), "emoji-alert", 50)
###################
# Make Available
###################
def makeExtension(*args, **kwargs):
"""Return extension."""
return EmojiExtension(*args, **kwargs)
| EmojiExtension |
python | langchain-ai__langchain | libs/partners/nomic/langchain_nomic/embeddings.py | {
"start": 244,
"end": 4148
} | class ____(Embeddings):
"""`NomicEmbeddings` embedding model.
Example:
```python
from langchain_nomic import NomicEmbeddings
model = NomicEmbeddings()
```
"""
@overload
def __init__(
self,
*,
model: str,
nomic_api_key: str | None = ...,
dimensionality: int | None = ...,
inference_mode: Literal["remote"] = ...,
) -> None: ...
@overload
def __init__(
self,
*,
model: str,
nomic_api_key: str | None = ...,
dimensionality: int | None = ...,
inference_mode: Literal["local", "dynamic"],
device: str | None = ...,
) -> None: ...
@overload
def __init__(
self,
*,
model: str,
nomic_api_key: str | None = ...,
dimensionality: int | None = ...,
inference_mode: str,
device: str | None = ...,
) -> None: ...
def __init__(
self,
*,
model: str,
nomic_api_key: str | None = None,
dimensionality: int | None = None,
inference_mode: str = "remote",
device: str | None = None,
vision_model: str | None = None,
):
"""Initialize `NomicEmbeddings` model.
Args:
model: Model name
nomic_api_key: Optionally, set the Nomic API key. Uses the `NOMIC_API_KEY`
environment variable by default.
dimensionality: The embedding dimension, for use with Matryoshka-capable
models. Defaults to full-size.
inference_mode: How to generate embeddings. One of `'remote'`, `'local'`
(Embed4All), or `'dynamic'` (automatic).
device: The device to use for local embeddings. Choices include
`'cpu'`, `'gpu'`, `'nvidia'`, `'amd'`, or a specific device
name. See the docstring for `GPT4All.__init__` for more info.
Typically defaults to `'cpu'`.
!!! warning
Do not use on macOS.
vision_model: The vision model to use for image embeddings.
"""
_api_key = nomic_api_key or os.environ.get("NOMIC_API_KEY")
if _api_key:
nomic.login(_api_key)
self.model = model
self.dimensionality = dimensionality
self.inference_mode = inference_mode
self.device = device
self.vision_model = vision_model
def embed(self, texts: list[str], *, task_type: str) -> list[list[float]]:
"""Embed texts.
Args:
texts: List of texts to embed
task_type: The task type to use when embedding. One of `'search_query'`,
`'search_document'`, `'classification'`, `'clustering'`
"""
output = embed.text(
texts=texts,
model=self.model,
task_type=task_type,
dimensionality=self.dimensionality,
inference_mode=self.inference_mode,
device=self.device,
)
return output["embeddings"]
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""Embed search docs.
Args:
texts: List of texts to embed as documents
"""
return self.embed(
texts=texts,
task_type="search_document",
)
def embed_query(self, text: str) -> list[float]:
"""Embed query text.
Args:
text: Query text
"""
return self.embed(
texts=[text],
task_type="search_query",
)[0]
def embed_image(self, uris: list[str]) -> list[list[float]]:
"""Embed images.
Args:
uris: List of image URIs to embed
"""
return embed.image(
images=uris,
model=self.vision_model,
)["embeddings"]
| NomicEmbeddings |
python | pandas-dev__pandas | pandas/tests/arrays/categorical/test_operators.py | {
"start": 4553,
"end": 15909
} | class ____:
@pytest.mark.parametrize(
"categories",
[["a", "b"], [0, 1], [Timestamp("2019"), Timestamp("2020")]],
)
def test_not_equal_with_na(self, categories):
# https://github.com/pandas-dev/pandas/issues/32276
c1 = Categorical.from_codes([-1, 0], categories=categories)
c2 = Categorical.from_codes([0, 1], categories=categories)
result = c1 != c2
assert result.all()
def test_compare_frame(self):
# GH#24282 check that Categorical.__cmp__(DataFrame) defers to frame
data = ["a", "b", 2, "a"]
cat = Categorical(data)
df = DataFrame(cat)
result = cat == df.T
expected = DataFrame([[True, True, True, True]])
tm.assert_frame_equal(result, expected)
result = cat[::-1] != df.T
expected = DataFrame([[False, True, True, False]])
tm.assert_frame_equal(result, expected)
def test_compare_frame_raises(self, comparison_op):
# alignment raises unless we transpose
op = comparison_op
cat = Categorical(["a", "b", 2, "a"])
df = DataFrame(cat)
msg = "Unable to coerce to Series, length must be 1: given 4"
with pytest.raises(ValueError, match=msg):
op(cat, df)
def test_datetime_categorical_comparison(self):
dt_cat = Categorical(date_range("2014-01-01", periods=3), ordered=True)
tm.assert_numpy_array_equal(dt_cat > dt_cat[0], np.array([False, True, True]))
tm.assert_numpy_array_equal(dt_cat[0] < dt_cat, np.array([False, True, True]))
def test_reflected_comparison_with_scalars(self):
# GH8658
cat = Categorical([1, 2, 3], ordered=True)
tm.assert_numpy_array_equal(cat > cat[0], np.array([False, True, True]))
tm.assert_numpy_array_equal(cat[0] < cat, np.array([False, True, True]))
def test_comparison_with_unknown_scalars(self):
# https://github.com/pandas-dev/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = Categorical([1, 2, 3], ordered=True)
msg = "Invalid comparison between dtype=category and int"
with pytest.raises(TypeError, match=msg):
cat < 4
with pytest.raises(TypeError, match=msg):
cat > 4
with pytest.raises(TypeError, match=msg):
4 < cat
with pytest.raises(TypeError, match=msg):
4 > cat
tm.assert_numpy_array_equal(cat == 4, np.array([False, False, False]))
tm.assert_numpy_array_equal(cat != 4, np.array([True, True, True]))
def test_comparison_with_tuple(self):
cat = Categorical(np.array(["foo", (0, 1), 3, (0, 1)], dtype=object))
result = cat == "foo"
expected = np.array([True, False, False, False], dtype=bool)
tm.assert_numpy_array_equal(result, expected)
result = cat == (0, 1)
expected = np.array([False, True, False, True], dtype=bool)
tm.assert_numpy_array_equal(result, expected)
result = cat != (0, 1)
tm.assert_numpy_array_equal(result, ~expected)
@pytest.mark.filterwarnings("ignore::RuntimeWarning")
def test_comparison_of_ordered_categorical_with_nan_to_scalar(
self, compare_operators_no_eq_ne
):
# https://github.com/pandas-dev/pandas/issues/26504
# BUG: fix ordered categorical comparison with missing values (#26504 )
# and following comparisons with scalars in categories with missing
# values should be evaluated as False
cat = Categorical([1, 2, 3, None], categories=[1, 2, 3], ordered=True)
scalar = 2
expected = getattr(np.array(cat), compare_operators_no_eq_ne)(scalar)
actual = getattr(cat, compare_operators_no_eq_ne)(scalar)
tm.assert_numpy_array_equal(actual, expected)
@pytest.mark.filterwarnings("ignore::RuntimeWarning")
def test_comparison_of_ordered_categorical_with_nan_to_listlike(
self, compare_operators_no_eq_ne
):
# https://github.com/pandas-dev/pandas/issues/26504
# and following comparisons of missing values in ordered Categorical
# with listlike should be evaluated as False
cat = Categorical([1, 2, 3, None], categories=[1, 2, 3], ordered=True)
other = Categorical([2, 2, 2, 2], categories=[1, 2, 3], ordered=True)
expected = getattr(np.array(cat), compare_operators_no_eq_ne)(2)
actual = getattr(cat, compare_operators_no_eq_ne)(other)
tm.assert_numpy_array_equal(actual, expected)
@pytest.mark.parametrize(
"data,reverse,base",
[(list("abc"), list("cba"), list("bbb")), ([1, 2, 3], [3, 2, 1], [2, 2, 2])],
)
def test_comparisons(self, data, reverse, base):
cat_rev = Series(Categorical(data, categories=reverse, ordered=True))
cat_rev_base = Series(Categorical(base, categories=reverse, ordered=True))
cat = Series(Categorical(data, ordered=True))
cat_base = Series(
Categorical(base, categories=cat.cat.categories, ordered=True)
)
s = Series(base, dtype=object if base == list("bbb") else None)
a = np.array(base)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = Series([True, False, False])
tm.assert_series_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = Series([False, False, True])
tm.assert_series_equal(res_rev, exp_rev)
res = cat > cat_base
exp = Series([False, False, True])
tm.assert_series_equal(res, exp)
scalar = base[1]
res = cat > scalar
exp = Series([False, False, True])
exp2 = cat.values > scalar
tm.assert_series_equal(res, exp)
tm.assert_numpy_array_equal(res.values, exp2)
res_rev = cat_rev > scalar
exp_rev = Series([True, False, False])
exp_rev2 = cat_rev.values > scalar
tm.assert_series_equal(res_rev, exp_rev)
tm.assert_numpy_array_equal(res_rev.values, exp_rev2)
# Only categories with same categories can be compared
msg = "Categoricals can only be compared if 'categories' are the same"
with pytest.raises(TypeError, match=msg):
cat > cat_rev
# categorical cannot be compared to Series or numpy array, and also
# not the other way around
msg = (
"Cannot compare a Categorical for op __gt__ with type "
r"<class 'numpy\.ndarray'>"
)
with pytest.raises(TypeError, match=msg):
cat > s
with pytest.raises(TypeError, match=msg):
cat_rev > s
with pytest.raises(TypeError, match=msg):
cat > a
with pytest.raises(TypeError, match=msg):
cat_rev > a
with pytest.raises(TypeError, match=msg):
s < cat
with pytest.raises(TypeError, match=msg):
s < cat_rev
with pytest.raises(TypeError, match=msg):
a < cat
with pytest.raises(TypeError, match=msg):
a < cat_rev
@pytest.mark.parametrize("box", [lambda x: x, Series])
def test_unordered_different_order_equal(self, box):
# https://github.com/pandas-dev/pandas/issues/16014
c1 = box(Categorical(["a", "b"], categories=["a", "b"], ordered=False))
c2 = box(Categorical(["a", "b"], categories=["b", "a"], ordered=False))
assert (c1 == c2).all()
c1 = box(Categorical(["a", "b"], categories=["a", "b"], ordered=False))
c2 = box(Categorical(["b", "a"], categories=["b", "a"], ordered=False))
assert (c1 != c2).all()
c1 = box(Categorical(["a", "a"], categories=["a", "b"], ordered=False))
c2 = box(Categorical(["b", "b"], categories=["b", "a"], ordered=False))
assert (c1 != c2).all()
c1 = box(Categorical(["a", "a"], categories=["a", "b"], ordered=False))
c2 = box(Categorical(["a", "b"], categories=["b", "a"], ordered=False))
result = c1 == c2
tm.assert_numpy_array_equal(np.array(result), np.array([True, False]))
def test_unordered_different_categories_raises(self):
c1 = Categorical(["a", "b"], categories=["a", "b"], ordered=False)
c2 = Categorical(["a", "c"], categories=["c", "a"], ordered=False)
with pytest.raises(TypeError, match=("Categoricals can only be compared")):
c1 == c2
def test_compare_different_lengths(self):
c1 = Categorical([], categories=["a", "b"])
c2 = Categorical([], categories=["a"])
msg = "Categoricals can only be compared if 'categories' are the same."
with pytest.raises(TypeError, match=msg):
c1 == c2
def test_compare_unordered_different_order(self):
# https://github.com/pandas-dev/pandas/issues/16603#issuecomment-
# 349290078
a = Categorical(["a"], categories=["a", "b"])
b = Categorical(["b"], categories=["b", "a"])
assert not a.equals(b)
def test_numeric_like_ops(self):
df = DataFrame({"value": np.random.default_rng(2).integers(0, 10000, 100)})
labels = [f"{i} - {i + 499}" for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=["value"], ascending=True)
df["value_group"] = pd.cut(
df.value, range(0, 10500, 500), right=False, labels=cat_labels
)
# numeric ops should not succeed
for op, str_rep in [
("__add__", r"\+"),
("__sub__", "-"),
("__mul__", r"\*"),
("__truediv__", "/"),
]:
msg = f"Series cannot perform the operation {str_rep}|unsupported operand"
with pytest.raises(TypeError, match=msg):
getattr(df, op)(df)
# reduction ops should not succeed (unless specifically defined, e.g.
# min/max)
s = df["value_group"]
for op in ["kurt", "skew", "var", "std", "mean", "sum", "median"]:
msg = f"does not support operation '{op}'"
with pytest.raises(TypeError, match=msg):
getattr(s, op)(numeric_only=False)
def test_numeric_like_ops_series(self):
# numpy ops
s = Series(Categorical([1, 2, 3, 4]))
with pytest.raises(TypeError, match="does not support operation 'sum'"):
np.sum(s)
@pytest.mark.parametrize(
"op, str_rep",
[
("__add__", r"\+"),
("__sub__", "-"),
("__mul__", r"\*"),
("__truediv__", "/"),
],
)
def test_numeric_like_ops_series_arith(self, op, str_rep):
# numeric ops on a Series
s = Series(Categorical([1, 2, 3, 4]))
msg = f"Series cannot perform the operation {str_rep}|unsupported operand"
with pytest.raises(TypeError, match=msg):
getattr(s, op)(2)
def test_numeric_like_ops_series_invalid(self):
# invalid ufunc
s = Series(Categorical([1, 2, 3, 4]))
msg = "Object with dtype category cannot perform the numpy op log"
with pytest.raises(TypeError, match=msg):
np.log(s)
| TestCategoricalOps |
python | doocs__leetcode | solution/0600-0699/0658.Find K Closest Elements/Solution3.py | {
"start": 0,
"end": 362
} | class ____:
def findClosestElements(self, arr: List[int], k: int, x: int) -> List[int]:
left, right = 0, len(arr) - k
while left < right:
mid = (left + right) >> 1
if x - arr[mid] <= arr[mid + k] - x:
right = mid
else:
left = mid + 1
return arr[left : left + k]
| Solution |
python | langchain-ai__langchain | libs/langchain/tests/unit_tests/agents/test_agent.py | {
"start": 1258,
"end": 43655
} | class ____(LLM):
"""Fake LLM for testing that outputs elements of a list."""
responses: list[str]
i: int = -1
@override
def _call(
self,
prompt: str,
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> str:
"""Increment counter, and then return response in that index."""
self.i += 1
print(f"=== Mock Response #{self.i} ===") # noqa: T201
print(self.responses[self.i]) # noqa: T201
return self.responses[self.i]
def get_num_tokens(self, text: str) -> int:
"""Return number of tokens in text."""
return len(text.split())
async def _acall(self, *args: Any, **kwargs: Any) -> str:
return self._call(*args, **kwargs)
@property
def _identifying_params(self) -> dict[str, Any]:
return {}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "fake_list"
def _get_agent(**kwargs: Any) -> AgentExecutor:
"""Get agent for testing."""
bad_action_name = "BadAction"
responses = [
f"I'm turning evil\nAction: {bad_action_name}\nAction Input: misalignment",
"Oh well\nFinal Answer: curses foiled again",
]
fake_llm = FakeListLLM(cache=False, responses=responses)
tools = [
Tool(
name="Search",
func=lambda x: x,
description="Useful for searching",
),
Tool(
name="Lookup",
func=lambda x: x,
description="Useful for looking up things in a table",
),
]
return initialize_agent(
tools,
fake_llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
**kwargs,
)
def test_agent_bad_action() -> None:
"""Test react chain when bad action given."""
agent = _get_agent()
output = agent.run("when was langchain made")
assert output == "curses foiled again"
def test_agent_stopped_early() -> None:
"""Test react chain when max iterations or max execution time is exceeded."""
# iteration limit
agent = _get_agent(max_iterations=0)
output = agent.run("when was langchain made")
assert output == "Agent stopped due to iteration limit or time limit."
# execution time limit
agent = _get_agent(max_execution_time=0.0)
output = agent.run("when was langchain made")
assert output == "Agent stopped due to iteration limit or time limit."
def test_agent_with_callbacks() -> None:
"""Test react chain with callbacks by setting verbose globally."""
handler1 = FakeCallbackHandler()
handler2 = FakeCallbackHandler()
tool = "Search"
responses = [
f"FooBarBaz\nAction: {tool}\nAction Input: misalignment",
"Oh well\nFinal Answer: curses foiled again",
]
# Only fake LLM gets callbacks for handler2
fake_llm = FakeListLLM(responses=responses, callbacks=[handler2])
tools = [
Tool(
name="Search",
func=lambda x: x,
description="Useful for searching",
),
]
agent = initialize_agent(
tools,
fake_llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
)
output = agent.run("when was langchain made", callbacks=[handler1])
assert output == "curses foiled again"
# 1 top level chain run runs, 2 LLMChain runs, 2 LLM runs, 1 tool run
assert handler1.chain_starts == handler1.chain_ends == 3
assert handler1.llm_starts == handler1.llm_ends == 2
assert handler1.tool_starts == 1
assert handler1.tool_ends == 1
# 1 extra agent action
assert handler1.starts == 7
# 1 extra agent end
assert handler1.ends == 7
assert handler1.errors == 0
# during LLMChain
assert handler1.text == 2
assert handler2.llm_starts == 2
assert handler2.llm_ends == 2
assert (
handler2.chain_starts
== handler2.tool_starts
== handler2.tool_ends
== handler2.chain_ends
== 0
)
def test_agent_stream() -> None:
"""Test react chain with callbacks by setting verbose globally."""
tool = "Search"
responses = [
f"FooBarBaz\nAction: {tool}\nAction Input: misalignment",
f"FooBarBaz\nAction: {tool}\nAction Input: something else",
"Oh well\nFinal Answer: curses foiled again",
]
# Only fake LLM gets callbacks for handler2
fake_llm = FakeListLLM(responses=responses)
tools = [
Tool(
name="Search",
func=lambda x: f"Results for: {x}",
description="Useful for searching",
),
]
agent = initialize_agent(
tools,
fake_llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
)
output = list(agent.stream("when was langchain made"))
assert output == [
{
"actions": [
AgentAction(
tool="Search",
tool_input="misalignment",
log="FooBarBaz\nAction: Search\nAction Input: misalignment",
),
],
"messages": [
AIMessage(
content="FooBarBaz\nAction: Search\nAction Input: misalignment",
),
],
},
{
"steps": [
AgentStep(
action=AgentAction(
tool="Search",
tool_input="misalignment",
log="FooBarBaz\nAction: Search\nAction Input: misalignment",
),
observation="Results for: misalignment",
),
],
"messages": [HumanMessage(content="Results for: misalignment")],
},
{
"actions": [
AgentAction(
tool="Search",
tool_input="something else",
log="FooBarBaz\nAction: Search\nAction Input: something else",
),
],
"messages": [
AIMessage(
content="FooBarBaz\nAction: Search\nAction Input: something else",
),
],
},
{
"steps": [
AgentStep(
action=AgentAction(
tool="Search",
tool_input="something else",
log="FooBarBaz\nAction: Search\nAction Input: something else",
),
observation="Results for: something else",
),
],
"messages": [HumanMessage(content="Results for: something else")],
},
{
"output": "curses foiled again",
"messages": [
AIMessage(content="Oh well\nFinal Answer: curses foiled again"),
],
},
]
assert add(output) == {
"actions": [
AgentAction(
tool="Search",
tool_input="misalignment",
log="FooBarBaz\nAction: Search\nAction Input: misalignment",
),
AgentAction(
tool="Search",
tool_input="something else",
log="FooBarBaz\nAction: Search\nAction Input: something else",
),
],
"steps": [
AgentStep(
action=AgentAction(
tool="Search",
tool_input="misalignment",
log="FooBarBaz\nAction: Search\nAction Input: misalignment",
),
observation="Results for: misalignment",
),
AgentStep(
action=AgentAction(
tool="Search",
tool_input="something else",
log="FooBarBaz\nAction: Search\nAction Input: something else",
),
observation="Results for: something else",
),
],
"messages": [
AIMessage(content="FooBarBaz\nAction: Search\nAction Input: misalignment"),
HumanMessage(content="Results for: misalignment"),
AIMessage(
content="FooBarBaz\nAction: Search\nAction Input: something else",
),
HumanMessage(content="Results for: something else"),
AIMessage(content="Oh well\nFinal Answer: curses foiled again"),
],
"output": "curses foiled again",
}
def test_agent_tool_return_direct() -> None:
"""Test agent using tools that return directly."""
tool = "Search"
responses = [
f"FooBarBaz\nAction: {tool}\nAction Input: misalignment",
"Oh well\nFinal Answer: curses foiled again",
]
fake_llm = FakeListLLM(responses=responses)
tools = [
Tool(
name="Search",
func=lambda x: x,
description="Useful for searching",
return_direct=True,
),
]
agent = initialize_agent(
tools,
fake_llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
)
output = agent.run("when was langchain made")
assert output == "misalignment"
def test_agent_tool_return_direct_in_intermediate_steps() -> None:
"""Test agent using tools that return directly."""
tool = "Search"
responses = [
f"FooBarBaz\nAction: {tool}\nAction Input: misalignment",
"Oh well\nFinal Answer: curses foiled again",
]
fake_llm = FakeListLLM(responses=responses)
tools = [
Tool(
name="Search",
func=lambda x: x,
description="Useful for searching",
return_direct=True,
),
]
agent = initialize_agent(
tools,
fake_llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
return_intermediate_steps=True,
)
resp = agent("when was langchain made")
assert isinstance(resp, dict)
assert resp["output"] == "misalignment"
assert len(resp["intermediate_steps"]) == 1
action, _action_intput = resp["intermediate_steps"][0]
assert action.tool == "Search"
def test_agent_with_new_prefix_suffix() -> None:
"""Test agent initialization kwargs with new prefix and suffix."""
fake_llm = FakeListLLM(
responses=["FooBarBaz\nAction: Search\nAction Input: misalignment"],
)
tools = [
Tool(
name="Search",
func=lambda x: x,
description="Useful for searching",
return_direct=True,
),
]
prefix = "FooBarBaz"
suffix = "Begin now!\nInput: {input}\nThought: {agent_scratchpad}"
agent = initialize_agent(
tools=tools,
llm=fake_llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
agent_kwargs={"prefix": prefix, "suffix": suffix},
)
# avoids "BasePromptTemplate" has no attribute "template" error
assert hasattr(agent.agent.llm_chain.prompt, "template") # type: ignore[union-attr]
prompt_str = agent.agent.llm_chain.prompt.template # type: ignore[union-attr]
assert prompt_str.startswith(prefix), "Prompt does not start with prefix"
assert prompt_str.endswith(suffix), "Prompt does not end with suffix"
def test_agent_lookup_tool() -> None:
"""Test agent lookup tool."""
fake_llm = FakeListLLM(
responses=["FooBarBaz\nAction: Search\nAction Input: misalignment"],
)
tools = [
Tool(
name="Search",
func=lambda x: x,
description="Useful for searching",
return_direct=True,
),
]
agent = initialize_agent(
tools=tools,
llm=fake_llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
)
assert agent.lookup_tool("Search") == tools[0]
def test_agent_invalid_tool() -> None:
"""Test agent invalid tool and correct suggestions."""
fake_llm = FakeListLLM(responses=["FooBarBaz\nAction: Foo\nAction Input: Bar"])
tools = [
Tool(
name="Search",
func=lambda x: x,
description="Useful for searching",
return_direct=True,
),
]
agent = initialize_agent(
tools=tools,
llm=fake_llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
return_intermediate_steps=True,
max_iterations=1,
)
resp = agent("when was langchain made")
assert (
resp["intermediate_steps"][0][1]
== "Foo is not a valid tool, try one of [Search]."
)
async def test_runnable_agent() -> None:
"""Simple test to verify that an agent built via composition works."""
# Will alternate between responding with hello and goodbye
infinite_cycle = cycle([AIMessage(content="hello world!")])
# When streaming GenericFakeChatModel breaks AIMessage into chunks based on spaces
model = GenericFakeChatModel(messages=infinite_cycle)
template = ChatPromptTemplate.from_messages(
[
("system", "You are Cat Agent 007"),
("human", "{question}"),
],
)
def fake_parse(_: dict) -> AgentFinish | AgentAction:
"""A parser."""
return AgentFinish(return_values={"foo": "meow"}, log="hard-coded-message")
agent = template | model | fake_parse
executor = AgentExecutor(agent=agent, tools=[])
# Invoke
result: Any = await asyncio.to_thread(executor.invoke, {"question": "hello"})
assert result == {"foo": "meow", "question": "hello"}
# ainvoke
result = await executor.ainvoke({"question": "hello"})
assert result == {"foo": "meow", "question": "hello"}
# Batch
result = await asyncio.to_thread(
executor.batch,
[{"question": "hello"}, {"question": "hello"}],
)
assert result == [
{"foo": "meow", "question": "hello"},
{"foo": "meow", "question": "hello"},
]
# abatch
result = await executor.abatch([{"question": "hello"}, {"question": "hello"}])
assert result == [
{"foo": "meow", "question": "hello"},
{"foo": "meow", "question": "hello"},
]
# Stream
results = await asyncio.to_thread(list, executor.stream({"question": "hello"}))
assert results == [
{"foo": "meow", "messages": [AIMessage(content="hard-coded-message")]},
]
# astream
results = [r async for r in executor.astream({"question": "hello"})]
assert results == [
{
"foo": "meow",
"messages": [
AIMessage(content="hard-coded-message"),
],
},
]
# stream log
log_results: list[RunLogPatch] = [
r async for r in executor.astream_log({"question": "hello"})
]
# # Let's stream just the llm tokens.
messages = []
for log_record in log_results:
for op in log_record.ops:
if op["op"] == "add" and isinstance(op["value"], AIMessageChunk):
messages.append(op["value"]) # noqa: PERF401
assert messages != []
# Aggregate state
run_log = reduce(operator.add, log_results)
assert isinstance(run_log, RunLog)
assert run_log.state["final_output"] == {
"foo": "meow",
"messages": [AIMessage(content="hard-coded-message")],
}
async def test_runnable_agent_with_function_calls() -> None:
"""Test agent with intermediate agent actions."""
# Will alternate between responding with hello and goodbye
infinite_cycle = cycle(
[
AIMessage(content="looking for pet..."),
AIMessage(content="Found Pet"),
],
)
model = GenericFakeChatModel(messages=infinite_cycle)
template = ChatPromptTemplate.from_messages(
[
("system", "You are Cat Agent 007"),
("human", "{question}"),
],
)
parser_responses = cycle(
[
AgentAction(
tool="find_pet",
tool_input={
"pet": "cat",
},
log="find_pet()",
),
AgentFinish(
return_values={"foo": "meow"},
log="hard-coded-message",
),
],
)
def fake_parse(_: dict) -> AgentFinish | AgentAction:
"""A parser."""
return cast("AgentFinish | AgentAction", next(parser_responses))
@tool
def find_pet(pet: str) -> str:
"""Find the given pet."""
if pet != "cat":
msg = "Only cats allowed"
raise ValueError(msg)
return "Spying from under the bed."
agent = template | model | fake_parse
executor = AgentExecutor(agent=agent, tools=[find_pet])
# Invoke
result = await asyncio.to_thread(executor.invoke, {"question": "hello"})
assert result == {"foo": "meow", "question": "hello"}
# ainvoke
result = await executor.ainvoke({"question": "hello"})
assert result == {"foo": "meow", "question": "hello"}
# astream
results = [r async for r in executor.astream({"question": "hello"})]
assert results == [
{
"actions": [
AgentAction(
tool="find_pet",
tool_input={"pet": "cat"},
log="find_pet()",
),
],
"messages": [AIMessage(content="find_pet()")],
},
{
"messages": [HumanMessage(content="Spying from under the bed.")],
"steps": [
AgentStep(
action=AgentAction(
tool="find_pet",
tool_input={"pet": "cat"},
log="find_pet()",
),
observation="Spying from under the bed.",
),
],
},
{"foo": "meow", "messages": [AIMessage(content="hard-coded-message")]},
]
# astream log
messages = []
async for patch in executor.astream_log({"question": "hello"}):
messages.extend(
[
op["value"].content
for op in patch.ops
if op["op"] == "add"
and isinstance(op["value"], AIMessageChunk)
and op["value"].content != ""
]
)
assert messages == ["looking", " ", "for", " ", "pet...", "Found", " ", "Pet"]
async def test_runnable_with_multi_action_per_step() -> None:
"""Test an agent that can make multiple function calls at once."""
# Will alternate between responding with hello and goodbye
infinite_cycle = cycle(
[
AIMessage(content="looking for pet..."),
AIMessage(content="Found Pet"),
],
)
model = GenericFakeChatModel(messages=infinite_cycle)
template = ChatPromptTemplate.from_messages(
[
("system", "You are Cat Agent 007"),
("human", "{question}"),
],
)
parser_responses = cycle(
[
[
AgentAction(
tool="find_pet",
tool_input={
"pet": "cat",
},
log="find_pet()",
),
AgentAction(
tool="pet_pet", # A function that allows you to pet the given pet.
tool_input={
"pet": "cat",
},
log="pet_pet()",
),
],
AgentFinish(
return_values={"foo": "meow"},
log="hard-coded-message",
),
],
)
def fake_parse(_: dict) -> AgentFinish | AgentAction:
"""A parser."""
return cast("AgentFinish | AgentAction", next(parser_responses))
@tool
def find_pet(pet: str) -> str:
"""Find the given pet."""
if pet != "cat":
msg = "Only cats allowed"
raise ValueError(msg)
return "Spying from under the bed."
@tool
def pet_pet(pet: str) -> str:
"""Pet the given pet."""
if pet != "cat":
msg = "Only cats should be petted."
raise ValueError(msg)
return "purrrr"
agent = template | model | fake_parse
executor = AgentExecutor(agent=agent, tools=[find_pet])
# Invoke
result = await asyncio.to_thread(executor.invoke, {"question": "hello"})
assert result == {"foo": "meow", "question": "hello"}
# ainvoke
result = await executor.ainvoke({"question": "hello"})
assert result == {"foo": "meow", "question": "hello"}
# astream
results = [r async for r in executor.astream({"question": "hello"})]
assert results == [
{
"actions": [
AgentAction(
tool="find_pet",
tool_input={"pet": "cat"},
log="find_pet()",
),
],
"messages": [AIMessage(content="find_pet()")],
},
{
"actions": [
AgentAction(tool="pet_pet", tool_input={"pet": "cat"}, log="pet_pet()"),
],
"messages": [AIMessage(content="pet_pet()")],
},
{
# By-default observation gets converted into human message.
"messages": [HumanMessage(content="Spying from under the bed.")],
"steps": [
AgentStep(
action=AgentAction(
tool="find_pet",
tool_input={"pet": "cat"},
log="find_pet()",
),
observation="Spying from under the bed.",
),
],
},
{
"messages": [
HumanMessage(
content="pet_pet is not a valid tool, try one of [find_pet].",
),
],
"steps": [
AgentStep(
action=AgentAction(
tool="pet_pet",
tool_input={"pet": "cat"},
log="pet_pet()",
),
observation="pet_pet is not a valid tool, try one of [find_pet].",
),
],
},
{"foo": "meow", "messages": [AIMessage(content="hard-coded-message")]},
]
# astream log
messages = []
async for patch in executor.astream_log({"question": "hello"}):
for op in patch.ops:
if op["op"] != "add":
continue
value = op["value"]
if not isinstance(value, AIMessageChunk):
continue
if value.content == "": # Then it's a function invocation message
continue
messages.append(value.content)
assert messages == ["looking", " ", "for", " ", "pet...", "Found", " ", "Pet"]
def _make_func_invocation(name: str, **kwargs: Any) -> AIMessage:
"""Create an AIMessage that represents a function invocation.
Args:
name: Name of the function to invoke.
kwargs: Keyword arguments to pass to the function.
Returns:
AIMessage that represents a request to invoke a function.
"""
return AIMessage(
content="",
additional_kwargs={
"function_call": {
"name": name,
"arguments": json.dumps(kwargs),
},
},
)
def _recursive_dump(obj: Any) -> Any:
"""Recursively dump the object if encountering any pydantic models."""
if isinstance(obj, dict):
return {
k: _recursive_dump(v)
for k, v in obj.items()
if k != "id" # Remove the id field for testing purposes
}
if isinstance(obj, list):
return [_recursive_dump(v) for v in obj]
if hasattr(obj, "dict"):
# if the object contains an ID field, we'll remove it for testing purposes
if hasattr(obj, "id"):
d = obj.model_dump()
d.pop("id")
return _recursive_dump(d)
return _recursive_dump(obj.model_dump())
return obj
async def test_openai_agent_with_streaming() -> None:
"""Test openai agent with streaming."""
infinite_cycle = cycle(
[
_make_func_invocation("find_pet", pet="cat"),
AIMessage(content="The cat is spying from under the bed."),
],
)
model = GenericFakeChatModel(messages=infinite_cycle)
@tool
def find_pet(pet: str) -> str:
"""Find the given pet."""
if pet != "cat":
msg = "Only cats allowed"
raise ValueError(msg)
return "Spying from under the bed."
template = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful AI bot. Your name is kitty power meow."),
("human", "{question}"),
MessagesPlaceholder(
variable_name="agent_scratchpad",
),
],
)
# type error due to base tool type below -- would need to be adjusted on tool
# decorator.
agent = create_openai_functions_agent(
model,
[find_pet],
template,
)
executor = AgentExecutor(agent=agent, tools=[find_pet])
# Invoke
result = await asyncio.to_thread(executor.invoke, {"question": "hello"})
assert result == {
"output": "The cat is spying from under the bed.",
"question": "hello",
}
# astream
chunks = [chunk async for chunk in executor.astream({"question": "hello"})]
assert _recursive_dump(chunks) == [
{
"actions": [
{
"log": "\nInvoking: `find_pet` with `{'pet': 'cat'}`\n\n\n",
"message_log": [
{
"additional_kwargs": {
"function_call": {
"arguments": '{"pet": "cat"}',
"name": "find_pet",
},
},
"content": "",
"name": None,
"response_metadata": {},
"type": "AIMessageChunk",
},
],
"tool": "find_pet",
"tool_input": {"pet": "cat"},
"type": "AgentActionMessageLog",
},
],
"messages": [
{
"additional_kwargs": {
"function_call": {
"arguments": '{"pet": "cat"}',
"name": "find_pet",
},
},
"chunk_position": "last",
"content": "",
"invalid_tool_calls": [],
"name": None,
"response_metadata": {},
"tool_call_chunks": [],
"tool_calls": [],
"type": "AIMessageChunk",
"usage_metadata": None,
},
],
},
{
"messages": [
{
"additional_kwargs": {},
"content": "Spying from under the bed.",
"name": "find_pet",
"response_metadata": {},
"type": "function",
},
],
"steps": [
{
"action": {
"log": "\nInvoking: `find_pet` with `{'pet': 'cat'}`\n\n\n",
"tool": "find_pet",
"tool_input": {"pet": "cat"},
"type": "AgentActionMessageLog",
},
"observation": "Spying from under the bed.",
},
],
},
{
"messages": [
{
"additional_kwargs": {},
"content": "The cat is spying from under the bed.",
"invalid_tool_calls": [],
"name": None,
"response_metadata": {},
"tool_calls": [],
"type": "ai",
"usage_metadata": None,
},
],
"output": "The cat is spying from under the bed.",
},
]
#
# # astream_log
log_patches = [
log_patch async for log_patch in executor.astream_log({"question": "hello"})
]
messages = []
for log_patch in log_patches:
for op in log_patch.ops:
if op["op"] == "add" and isinstance(op["value"], AIMessageChunk):
value = op["value"]
if value.content: # Filter out function call messages
messages.append(value.content)
assert messages == [
"The",
" ",
"cat",
" ",
"is",
" ",
"spying",
" ",
"from",
" ",
"under",
" ",
"the",
" ",
"bed.",
]
def _make_tools_invocation(name_to_arguments: dict[str, dict[str, Any]]) -> AIMessage:
"""Create an AIMessage that represents a tools invocation.
Args:
name_to_arguments: A dictionary mapping tool names to an invocation.
Returns:
AIMessage that represents a request to invoke a tool.
"""
raw_tool_calls = [
{"function": {"name": name, "arguments": json.dumps(arguments)}, "id": str(idx)}
for idx, (name, arguments) in enumerate(name_to_arguments.items())
]
tool_calls = [
ToolCall(name=name, args=args, id=str(idx), type="tool_call")
for idx, (name, args) in enumerate(name_to_arguments.items())
]
return AIMessage(
content="",
additional_kwargs={
"tool_calls": raw_tool_calls,
},
tool_calls=tool_calls,
)
async def test_openai_agent_tools_agent() -> None:
"""Test OpenAI tools agent."""
infinite_cycle = cycle(
[
_make_tools_invocation(
{
"find_pet": {"pet": "cat"},
"check_time": {},
},
),
AIMessage(content="The cat is spying from under the bed."),
],
)
GenericFakeChatModel.bind_tools = lambda self, _: self # type: ignore[assignment,misc]
model = GenericFakeChatModel(messages=infinite_cycle)
@tool
def find_pet(pet: str) -> str:
"""Find the given pet."""
if pet != "cat":
msg = "Only cats allowed"
raise ValueError(msg)
return "Spying from under the bed."
@tool
def check_time() -> str:
"""Find the given pet."""
return "It's time to pet the cat."
template = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful AI bot. Your name is kitty power meow."),
("human", "{question}"),
MessagesPlaceholder(
variable_name="agent_scratchpad",
),
],
)
# type error due to base tool type below -- would need to be adjusted on tool
# decorator.
openai_agent = create_openai_tools_agent(
model,
[find_pet],
template,
)
tool_calling_agent = create_tool_calling_agent(
model,
[find_pet],
template,
)
for agent in [openai_agent, tool_calling_agent]:
executor = AgentExecutor(agent=agent, tools=[find_pet])
# Invoke
result = await asyncio.to_thread(executor.invoke, {"question": "hello"})
assert result == {
"output": "The cat is spying from under the bed.",
"question": "hello",
}
# astream
chunks = [chunk async for chunk in executor.astream({"question": "hello"})]
assert chunks == [
{
"actions": [
OpenAIToolAgentAction(
tool="find_pet",
tool_input={"pet": "cat"},
log="\nInvoking: `find_pet` with `{'pet': 'cat'}`\n\n\n",
message_log=[
_AnyIdAIMessageChunk(
content="",
additional_kwargs={
"tool_calls": [
{
"function": {
"name": "find_pet",
"arguments": '{"pet": "cat"}',
},
"id": "0",
},
{
"function": {
"name": "check_time",
"arguments": "{}",
},
"id": "1",
},
],
},
chunk_position="last",
),
],
tool_call_id="0",
),
],
"messages": [
_AnyIdAIMessageChunk(
content="",
additional_kwargs={
"tool_calls": [
{
"function": {
"name": "find_pet",
"arguments": '{"pet": "cat"}',
},
"id": "0",
},
{
"function": {
"name": "check_time",
"arguments": "{}",
},
"id": "1",
},
],
},
chunk_position="last",
),
],
},
{
"actions": [
OpenAIToolAgentAction(
tool="check_time",
tool_input={},
log="\nInvoking: `check_time` with `{}`\n\n\n",
message_log=[
_AnyIdAIMessageChunk(
content="",
additional_kwargs={
"tool_calls": [
{
"function": {
"name": "find_pet",
"arguments": '{"pet": "cat"}',
},
"id": "0",
},
{
"function": {
"name": "check_time",
"arguments": "{}",
},
"id": "1",
},
],
},
chunk_position="last",
),
],
tool_call_id="1",
),
],
"messages": [
_AnyIdAIMessageChunk(
content="",
additional_kwargs={
"tool_calls": [
{
"function": {
"name": "find_pet",
"arguments": '{"pet": "cat"}',
},
"id": "0",
},
{
"function": {
"name": "check_time",
"arguments": "{}",
},
"id": "1",
},
],
},
chunk_position="last",
),
],
},
{
"messages": [
FunctionMessage(
content="Spying from under the bed.",
name="find_pet",
),
],
"steps": [
AgentStep(
action=OpenAIToolAgentAction(
tool="find_pet",
tool_input={"pet": "cat"},
log="\nInvoking: `find_pet` with `{'pet': 'cat'}`\n\n\n",
message_log=[
_AnyIdAIMessageChunk(
content="",
additional_kwargs={
"tool_calls": [
{
"function": {
"name": "find_pet",
"arguments": '{"pet": "cat"}',
},
"id": "0",
},
{
"function": {
"name": "check_time",
"arguments": "{}",
},
"id": "1",
},
],
},
chunk_position="last",
),
],
tool_call_id="0",
),
observation="Spying from under the bed.",
),
],
},
{
"messages": [
FunctionMessage(
content="check_time is not a valid tool, "
"try one of [find_pet].",
name="check_time",
),
],
"steps": [
AgentStep(
action=OpenAIToolAgentAction(
tool="check_time",
tool_input={},
log="\nInvoking: `check_time` with `{}`\n\n\n",
message_log=[
_AnyIdAIMessageChunk(
content="",
additional_kwargs={
"tool_calls": [
{
"function": {
"name": "find_pet",
"arguments": '{"pet": "cat"}',
},
"id": "0",
},
{
"function": {
"name": "check_time",
"arguments": "{}",
},
"id": "1",
},
],
},
chunk_position="last",
),
],
tool_call_id="1",
),
observation="check_time is not a valid tool, "
"try one of [find_pet].",
),
],
},
{
"messages": [
AIMessage(content="The cat is spying from under the bed."),
],
"output": "The cat is spying from under the bed.",
},
]
# astream_log
log_patches = [
log_patch async for log_patch in executor.astream_log({"question": "hello"})
]
# Get the tokens from the astream log response.
messages = []
for log_patch in log_patches:
for op in log_patch.ops:
if op["op"] == "add" and isinstance(op["value"], AIMessageChunk):
value = op["value"]
if value.content: # Filter out function call messages
messages.append(value.content)
assert messages == [
"The",
" ",
"cat",
" ",
"is",
" ",
"spying",
" ",
"from",
" ",
"under",
" ",
"the",
" ",
"bed.",
]
| FakeListLLM |
python | pydata__xarray | xarray/backends/lru_cache.py | {
"start": 221,
"end": 3661
} | class ____(MutableMapping[K, V]):
"""Thread-safe LRUCache based on an OrderedDict.
All dict operations (__getitem__, __setitem__, __contains__) update the
priority of the relevant key and take O(1) time. The dict is iterated over
in order from the oldest to newest key, which means that a complete pass
over the dict should not affect the order of any entries.
When a new item is set and the maximum size of the cache is exceeded, the
oldest item is dropped and called with ``on_evict(key, value)``.
The ``maxsize`` property can be used to view or adjust the capacity of
the cache, e.g., ``cache.maxsize = new_size``.
"""
_cache: OrderedDict[K, V]
_maxsize: int
_lock: threading.RLock
_on_evict: Callable[[K, V], Any] | None
__slots__ = ("_cache", "_lock", "_maxsize", "_on_evict")
def __init__(self, maxsize: int, on_evict: Callable[[K, V], Any] | None = None):
"""
Parameters
----------
maxsize : int
Integer maximum number of items to hold in the cache.
on_evict : callable, optional
Function to call like ``on_evict(key, value)`` when items are
evicted.
"""
if not isinstance(maxsize, int):
raise TypeError("maxsize must be an integer")
if maxsize < 0:
raise ValueError("maxsize must be non-negative")
self._maxsize = maxsize
self._cache = OrderedDict()
self._lock = threading.RLock()
self._on_evict = on_evict
def __getitem__(self, key: K) -> V:
# record recent use of the key by moving it to the front of the list
with self._lock:
value = self._cache[key]
self._cache.move_to_end(key)
return value
def _enforce_size_limit(self, capacity: int) -> None:
"""Shrink the cache if necessary, evicting the oldest items."""
while len(self._cache) > capacity:
key, value = self._cache.popitem(last=False)
if self._on_evict is not None:
self._on_evict(key, value)
def __setitem__(self, key: K, value: V) -> None:
with self._lock:
if key in self._cache:
# insert the new value at the end
del self._cache[key]
self._cache[key] = value
elif self._maxsize:
# make room if necessary
self._enforce_size_limit(self._maxsize - 1)
self._cache[key] = value
elif self._on_evict is not None:
# not saving, immediately evict
self._on_evict(key, value)
def __delitem__(self, key: K) -> None:
del self._cache[key]
def __iter__(self) -> Iterator[K]:
# create a list, so accessing the cache during iteration cannot change
# the iteration order
return iter(list(self._cache))
def __len__(self) -> int:
return len(self._cache)
@property
def maxsize(self) -> int:
"""Maximum number of items can be held in the cache."""
return self._maxsize
@maxsize.setter
def maxsize(self, size: int) -> None:
"""Resize the cache, evicting the oldest items if necessary."""
if size < 0:
raise ValueError("maxsize must be non-negative")
with self._lock:
self._enforce_size_limit(size)
self._maxsize = size
| LRUCache |
python | great-expectations__great_expectations | great_expectations/datasource/fluent/pandas_datasource.py | {
"start": 13193,
"end": 18408
} | class ____(_PandasDataAsset):
# instance attributes
type: Literal["dataframe"] = "dataframe"
class Config:
extra = pydantic.Extra.forbid
@override
def _get_reader_method(self) -> str:
raise NotImplementedError(
"""Pandas DataFrameAsset does not implement "_get_reader_method()" method, because DataFrame is already available.""" # noqa: E501 # FIXME CoP
)
def _get_reader_options_include(self) -> set[str]:
raise NotImplementedError(
"""Pandas DataFrameAsset does not implement "_get_reader_options_include()" method, because DataFrame is already available.""" # noqa: E501 # FIXME CoP
)
@override
def build_batch_request(
self,
options: Optional[BatchParameters] = None,
batch_slice: Optional[BatchSlice] = None,
partitioner: Optional[ColumnPartitioner] = None,
) -> BatchRequest:
"""A batch request that can be used to obtain batches for this DataAsset.
Args:
options: This should have 1 key, 'dataframe', whose value is the datafame to validate.
batch_slice: This is not currently supported and must be None for this data asset.
partitioner: This is not currently supported and must be None for this data asset.
Returns:
A BatchRequest object that can be used to obtain a batch from an Asset by calling the
get_batch method.
"""
if batch_slice is not None:
raise BuildBatchRequestError(
message="batch_slice is not currently supported for this DataAsset "
"and must be None."
)
if partitioner is not None:
raise BuildBatchRequestError(
message="partitioner is not currently supported for this DataAsset"
"and must be None."
)
if not (options is not None and "dataframe" in options and len(options) == 1):
raise BuildBatchRequestError(message="options must contain exactly 1 key, 'dataframe'.")
if not isinstance(options["dataframe"], pd.DataFrame):
raise BuildBatchRequestError(
message="Cannot build batch request for dataframe asset without a dataframe"
)
return BatchRequest(
datasource_name=self.datasource.name,
data_asset_name=self.name,
options=options,
)
@override
def _validate_batch_request(self, batch_request: BatchRequest) -> None:
"""Validates the batch_request has the correct form.
Args:
batch_request: A batch request object to be validated.
"""
if not (
batch_request.datasource_name == self.datasource.name
and batch_request.data_asset_name == self.name
and batch_request.options
and len(batch_request.options) == 1
and "dataframe" in batch_request.options
and isinstance(batch_request.options["dataframe"], pd.DataFrame)
):
expect_batch_request_form = BatchRequest[None](
datasource_name=self.datasource.name,
data_asset_name=self.name,
options={"dataframe": pd.DataFrame()},
batch_slice=batch_request._batch_slice_input,
)
raise gx_exceptions.InvalidBatchRequestError( # noqa: TRY003 # FIXME CoP
"BatchRequest should have form:\n"
f"{pf(expect_batch_request_form.dict())}\n"
f"but actually has form:\n{pf(batch_request.dict())}\n"
)
@override
def get_batch_identifiers_list(self, batch_request: BatchRequest) -> List[dict]:
return [IDDict(batch_request.options)]
@override
def get_batch(self, batch_request: BatchRequest) -> Batch:
self._validate_batch_request(batch_request)
batch_spec = RuntimeDataBatchSpec(batch_data=batch_request.options["dataframe"])
execution_engine: PandasExecutionEngine = self.datasource.get_execution_engine()
data, markers = execution_engine.get_batch_data_and_markers(batch_spec=batch_spec)
# batch_definition (along with batch_spec and markers) is only here to satisfy a
# legacy constraint when computing usage statistics in a validator. We hope to remove
# it in the future.
batch_definition = LegacyBatchDefinition(
datasource_name=self.datasource.name,
data_connector_name=_DATA_CONNECTOR_NAME,
data_asset_name=self.name,
batch_identifiers=make_batch_identifier(batch_request.options),
batch_spec_passthrough=None,
)
batch_metadata: BatchMetadata = self._get_batch_metadata_from_batch_request(
batch_request=batch_request, ignore_options=("dataframe",)
)
return Batch(
datasource=self.datasource,
data_asset=self,
batch_request=batch_request,
data=data,
metadata=batch_metadata,
batch_markers=markers,
batch_spec=batch_spec,
batch_definition=batch_definition,
)
| DataFrameAsset |
python | readthedocs__readthedocs.org | readthedocs/rtd_tests/tests/test_core_utils.py | {
"start": 768,
"end": 8540
} | class ____(TestCase):
def setUp(self):
self.project = get(
Project, container_time_limit=None, main_language_project=None
)
self.version = get(Version, project=self.project)
@mock.patch("readthedocs.projects.tasks.builds.update_docs_task")
def test_trigger_skipped_project(self, update_docs_task):
self.project.skip = True
self.project.save()
result = trigger_build(
project=self.project,
version=self.version,
)
self.assertEqual(result, (None, None))
self.assertFalse(update_docs_task.signature.called)
self.assertFalse(update_docs_task.signature().apply_async.called)
@mock.patch("readthedocs.projects.tasks.builds.update_docs_task")
def test_trigger_build_when_version_not_provided_default_version_exist(
self, update_docs_task
):
self.assertFalse(Version.objects.filter(slug="test-default-version").exists())
project_1 = get(Project)
version_1 = get(
Version, project=project_1, slug="test-default-version", active=True
)
project_1.default_version = "test-default-version"
project_1.save()
default_version = project_1.get_default_version()
self.assertEqual(default_version, "test-default-version")
trigger_build(project=project_1)
update_docs_task.signature.assert_called_with(
args=(
version_1.pk,
mock.ANY,
),
kwargs={
"build_commit": None,
"build_api_key": mock.ANY,
},
options=mock.ANY,
immutable=True,
)
@mock.patch("readthedocs.projects.tasks.builds.update_docs_task")
def test_trigger_build_when_version_not_provided_default_version_doesnt_exist(
self, update_docs_task
):
trigger_build(project=self.project)
default_version = self.project.get_default_version()
version = self.project.versions.get(slug=default_version)
self.assertEqual(version.slug, LATEST)
update_docs_task.signature.assert_called_with(
args=(
version.pk,
mock.ANY,
),
kwargs={
"build_commit": None,
"build_api_key": mock.ANY,
},
options=mock.ANY,
immutable=True,
)
@pytest.mark.xfail(reason="Fails while we work out Docker time limits", strict=True)
@mock.patch("readthedocs.projects.tasks.builds.update_docs_task")
def test_trigger_custom_queue(self, update_docs):
"""Use a custom queue when routing the task."""
self.project.build_queue = "build03"
trigger_build(project=self.project, version=self.version)
kwargs = {"build_pk": mock.ANY, "commit": None}
options = {
"queue": "build03",
"time_limit": 720,
"soft_time_limit": 600,
}
update_docs.signature.assert_called_with(
args=(self.version.pk,),
kwargs=kwargs,
options=options,
immutable=True,
)
@pytest.mark.xfail(reason="Fails while we work out Docker time limits", strict=True)
@mock.patch("readthedocs.projects.tasks.builds.update_docs_task")
def test_trigger_build_time_limit(self, update_docs):
"""Pass of time limit."""
trigger_build(project=self.project, version=self.version)
kwargs = {"build_pk": mock.ANY, "commit": None}
options = {
"queue": mock.ANY,
"time_limit": 720,
"soft_time_limit": 600,
}
update_docs.signature.assert_called_with(
args=(self.version.pk,),
kwargs=kwargs,
options=options,
immutable=True,
)
@pytest.mark.xfail(reason="Fails while we work out Docker time limits", strict=True)
@mock.patch("readthedocs.projects.tasks.builds.update_docs_task")
def test_trigger_build_invalid_time_limit(self, update_docs):
"""Time limit as string."""
self.project.container_time_limit = "200s"
trigger_build(project=self.project, version=self.version)
kwargs = {"build_pk": mock.ANY, "commit": None}
options = {
"queue": mock.ANY,
"time_limit": 720,
"soft_time_limit": 600,
}
update_docs.signature.assert_called_with(
args=(self.version.pk,),
kwargs=kwargs,
options=options,
immutable=True,
)
@mock.patch("readthedocs.projects.tasks.builds.update_docs_task")
def test_trigger_build_rounded_time_limit(self, update_docs):
"""Time limit should round down."""
self.project.container_time_limit = 3
trigger_build(project=self.project, version=self.version)
options = {
"time_limit": 3,
"soft_time_limit": 3,
}
update_docs.signature.assert_called_with(
args=(
self.version.pk,
mock.ANY,
),
kwargs={
"build_commit": None,
"build_api_key": mock.ANY,
},
options=options,
immutable=True,
)
@mock.patch("readthedocs.core.utils.app")
@mock.patch("readthedocs.projects.tasks.builds.update_docs_task")
def test_trigger_max_concurrency_reached(self, update_docs, app):
max_concurrent_builds = 2
for i in range(max_concurrent_builds):
get(
Build,
state=BUILD_STATE_BUILDING,
project=self.project,
version=self.version,
task_id=str(i),
)
self.project.max_concurrent_builds = max_concurrent_builds
self.project.save()
trigger_build(project=self.project, version=self.version)
kwargs = {"build_commit": None, "build_api_key": mock.ANY}
options = {
"time_limit": settings.BUILD_TIME_LIMIT * 1.2,
"soft_time_limit": settings.BUILD_TIME_LIMIT,
"countdown": 5 * 60,
"max_retries": 25,
}
update_docs.signature.assert_called_with(
args=(self.version.pk, mock.ANY),
kwargs=kwargs,
options=options,
immutable=True,
)
build = self.project.builds.first()
notification = build.notifications.first()
self.assertEqual(
notification.message_id,
BuildMaxConcurrencyError.LIMIT_REACHED,
)
app.control.revoke.assert_has_calls(
[
mock.call("1", signal="SIGINT", terminate=True),
mock.call("0", signal="SIGINT", terminate=True),
]
)
def test_slugify(self):
"""Test additional slugify."""
self.assertEqual(
slugify("This is a test"),
"this-is-a-test",
)
self.assertEqual(
slugify("project_with_underscores-v.1.0"),
"project-with-underscores-v10",
)
self.assertEqual(
slugify("__project_with_trailing-underscores---"),
"project-with-trailing-underscores",
)
self.assertEqual(
slugify("project_with_underscores-v.1.0", dns_safe=False),
"project_with_underscores-v10",
)
self.assertEqual(
slugify("A title_-_with separated parts"),
"a-title-with-separated-parts",
)
self.assertEqual(
slugify("A title_-_with separated parts", dns_safe=False),
"a-title_-_with-separated-parts",
)
| CoreUtilTests |
python | python-poetry__poetry | src/poetry/utils/env/python/installer.py | {
"start": 1022,
"end": 1134
} | class ____(PythonInstallerError, ValueError):
pass
@dataclasses.dataclass(frozen=True)
| PythonInstallationError |
python | pytorch__pytorch | torch/_inductor/memory.py | {
"start": 15458,
"end": 38862
} | class ____:
size_alloc: int
size_free: int
def estimate_peak_memory_allocfree(
nodes: list[BaseSchedulerNode],
name_to_freeable_input_buf: dict[str, FreeableInputBuffer],
graph_outputs: OrderedSet[str],
) -> tuple[
int,
list[tuple[int, int]],
dict[BaseSchedulerNode, SNodeMemory],
dict[Union[FreeableInputBuffer, SchedulerBuffer], BaseSchedulerNode],
]:
"""
Alternative version of estimate_peak_memory, that respects the fact,
that every SchedulerNode has multiple phases:
1. alloc ( outputs )
2. run_kernel
3. dealloc last_use buffers
estimate_peak_memory collapses memory into one value: size_alloc - size_free
While peak memory happens after alloc.
Duplicating the code to not migrate all callsites at once,
In future usages of estimate_peak_memory will migrate to this version.
"""
buf_info_list, _, buf_to_snode_last_use = compute_memory_timeline(
nodes, name_to_freeable_input_buf, graph_outputs
)
# incremental memory changes at each step
step_idx_allocfree = [SNodeMemory(0, 0) for _ in range(len(nodes))]
# for each buffer, update memory when created and when freed
for buf_info in buf_info_list:
step_idx_allocfree[buf_info.start_step].size_alloc += buf_info.size_alloc
if buf_info.end_step != -1:
step_idx_allocfree[buf_info.end_step].size_free += buf_info.size_free
snodes_allocfree = {}
for i, node in enumerate(nodes):
snodes_allocfree[node] = step_idx_allocfree[i]
max_memory = 0
cur_memory = 0
snodes_curr_memory = []
for t in range(len(nodes)):
alloc = step_idx_allocfree[t].size_alloc
free = step_idx_allocfree[t].size_free
cur_memory += alloc
post_alloc = cur_memory
max_memory = max(max_memory, cur_memory)
cur_memory -= free
post_free = cur_memory
snodes_curr_memory.append((post_alloc, post_free))
return (
max_memory,
snodes_curr_memory,
snodes_allocfree,
buf_to_snode_last_use,
)
def topological_sort_lpmf(
nodes: list[BaseSchedulerNode],
name_to_freeable_input_buf: dict[str, FreeableInputBuffer],
name_to_buf: dict[str, SchedulerBuffer],
graph_outputs: OrderedSet[str],
) -> list[BaseSchedulerNode]:
"""
A bfs-based greedy topological order. LPMF stands for "Least Peak Memory First".
The idea is from this paper:
Buffer memory optimization for video codec application modeled in Simulink
https://www.cs.york.ac.uk/rts/docs/DAC-1964-2006/PAPERS/2006/DAC06/PDFFILES/P0689.PDF
The algorithm maintains the max memory so far.
At every iteration, for each scheduleable node, it computes:
- how much memory needs to be allocated for the output buffers of this node;
- how much memory can be freed as a result of executing this node.
This gives us two values for each node:
(1) mem1: memory during the execution of the node;
(2) mem2: memory after executing the node, after some input buffers are freed.
The greedy approach select as follows:
(i) if there are nodes whose mem1 values are below the max memory so far,
then pick the node with the lowest mem2 value;
(ii) otherwise, pick the one with the lowest mem1 value.
"""
class NodeInfo(TypedDict):
indegree: int
memory_to_free: int
class BufferInfo(TypedDict):
outdegree: int
node_info: dict[BaseSchedulerNode, NodeInfo] = dict()
buf_info: dict[Union[SchedulerBuffer, FreeableInputBuffer], BufferInfo] = dict()
# compute nodes' number of unmet dependencies (for schedulability)
# initialize the list of nodes ready to be scheduled
nodes_to_schedule: OrderedSet[BaseSchedulerNode] = OrderedSet()
for node in nodes:
node_info[node] = {
"indegree": len(node.mpi_node.pred_nodes),
"memory_to_free": 0,
}
if node_info[node]["indegree"] == 0:
nodes_to_schedule.add(node)
# compute buffers' number of unmet successors (used to decide when to free)
for buf in list(name_to_buf.values()) + list(name_to_freeable_input_buf.values()):
buf_info[buf] = {
"outdegree": len(buf.mpi_buffer.succ_nodes)
+ (1 if buf.get_name() in graph_outputs else 0)
}
# initialize memory estimations
live_memory = sum(
input_buf.mpi_buffer.size_free
for input_buf in name_to_freeable_input_buf.values()
)
# this is the total output memory, which is a lower bound for peak memory
# we do not include the memory of non freeable input buffers
output_memory = 0
for buf_name in graph_outputs:
if buf_name in name_to_buf:
output_memory += name_to_buf[buf_name].mpi_buffer.size_free
elif buf_name in name_to_freeable_input_buf:
output_memory += name_to_freeable_input_buf[buf_name].mpi_buffer.size_free
max_memory = max(live_memory, output_memory)
memory_gap = max_memory - live_memory
# compute the amount of memory that is allocated when a node is scheduled
# and the amount of memory that can be freed when a node is scheduled
for node in nodes:
# 1. if a buffer read by this node is last used by this node
for buf in node.mpi_node.pred_buffers:
if buf_info[buf]["outdegree"] == 1:
node_info[node]["memory_to_free"] += buf.mpi_buffer.size_free
# 2. if a buffer written by this node is used internally and not used later
for buf in node.get_outputs():
if buf_info[buf]["outdegree"] == 0:
node_info[node]["memory_to_free"] += buf.mpi_buffer.size_free
# schedule nodes one at a time
schedule: list[BaseSchedulerNode] = []
size_threshold = config.size_threshold_for_succ_based_strategy
num_iters: int = 0
while num_iters < len(nodes) and nodes_to_schedule:
# select a node to schedule:
if (
size_threshold > 0
and min(node.mpi_node.size for node in nodes_to_schedule) > size_threshold
):
selected_node = min(
nodes_to_schedule,
key=lambda node: min(
(
succ_node.mpi_node.index
for succ_node in node.mpi_node.succ_nodes
),
default=len(nodes),
),
)
else:
selected_node = min(
nodes_to_schedule,
key=lambda node: (
node.mpi_node.size if node.mpi_node.size > memory_gap else 0,
node.mpi_node.size - node_info[node]["memory_to_free"],
node.mpi_node.index,
),
)
nodes_to_schedule.remove(selected_node)
schedule.append(selected_node)
num_iters += 1
# update memory usage
live_memory += selected_node.mpi_node.size
max_memory = max(max_memory, live_memory)
live_memory -= node_info[selected_node]["memory_to_free"]
memory_gap = max_memory - live_memory
# update successor nodes and nodes_to_schedule
for succ_node in selected_node.mpi_node.succ_nodes:
assert node_info[succ_node]["indegree"] > 0
node_info[succ_node]["indegree"] -= 1
if node_info[succ_node]["indegree"] == 0:
nodes_to_schedule.add(succ_node)
# update predecessor nodes
for buf in selected_node.mpi_node.pred_buffers:
assert buf_info[buf]["outdegree"] > 0
buf_info[buf]["outdegree"] -= 1
if buf_info[buf]["outdegree"] == 1:
for succ_node in buf.mpi_buffer.succ_nodes:
node_info[succ_node]["memory_to_free"] += buf.mpi_buffer.size_free
if num_iters > len(nodes):
raise RuntimeError("Failed to schedule, while loop ran too long for lpmf")
return schedule
def topological_sort_bfs(nodes: list[BaseSchedulerNode]) -> list[BaseSchedulerNode]:
"""
A BFS topological sort that selects nodes whose dependencies are executed the
earliest. This follows a FIFO idea. Specifically, at every iteration, for each node
that is schedulable, we gather the order in which its predecessor nodes are executed,
and this sorted list of execution orders of predecessor nodes defines the priority.
We select the node whose predecessors nodes are executed the earliest. The FIFO
idea aims to reduce the liveness duration of buffers created.
"""
class NodeInfo(TypedDict):
indegree: int
order: int
node_info: dict[BaseSchedulerNode, NodeInfo] = dict()
@dataclasses.dataclass
class NodeWithPriority:
priority: list[int]
node: BaseSchedulerNode
def __lt__(self, other: NodeWithPriority) -> bool:
if self.priority == other.priority:
return self.node.mpi_node.index < other.node.mpi_node.index
return self.priority < other.priority
def _node_priority(node: BaseSchedulerNode) -> list[int]:
# priority is the order in which predecessor nodes are executed
assert node_info[node]["indegree"] == 0
exec_orders = sorted(
OrderedSet(
node_info[pred_node]["order"] for pred_node in node.mpi_node.pred_nodes
)
)
return exec_orders
# compute nodes' number of unmet dependencies (for schedulability)
# initialize the list of nodes ready to be scheduled
nodes_to_schedule: list[NodeWithPriority] = []
for node in nodes:
node_info[node] = {"indegree": len(node.mpi_node.pred_nodes), "order": -1}
if node_info[node]["indegree"] == 0:
heapq.heappush(
nodes_to_schedule, NodeWithPriority(_node_priority(node), node)
)
# schedule nodes one at a time
schedule: list[BaseSchedulerNode] = []
num_iters: int = 0
while num_iters < len(nodes) and nodes_to_schedule:
# select a node to schedule
selected_node = heapq.heappop(nodes_to_schedule).node
node_info[selected_node]["order"] = len(schedule)
schedule.append(selected_node)
num_iters += 1
# update successor nodes and nodes_to_schedule
for succ_node in selected_node.mpi_node.succ_nodes:
assert node_info[succ_node]["indegree"] > 0
node_info[succ_node]["indegree"] -= 1
if node_info[succ_node]["indegree"] == 0:
heapq.heappush(
nodes_to_schedule,
NodeWithPriority(_node_priority(succ_node), succ_node),
)
if num_iters > len(nodes):
raise RuntimeError("Failed to schedule, while loop ran too long for bfs")
return schedule
def topological_sort_dfs(nodes: list[BaseSchedulerNode]) -> list[BaseSchedulerNode]:
"""
This is a DFS topological sort. The setup is similar to `topological_sort_schedule`
in scheduler.py. The difference is the order nodes are visited in the outer loop.
In `topological_sort_schedule`, nodes are visited in their original order.
In this function, nodes are visited based on their priority -- for each node, we
compute the total memory of all buffers it reads from or writes to, and we visit
the nodes in ascending order of this priority.
"""
seen: OrderedSet[BaseSchedulerNode] = OrderedSet()
name_to_node: dict[str, BaseSchedulerNode] = dict()
result: list[BaseSchedulerNode] = []
size_with_reads: dict[BaseSchedulerNode, int] = dict()
def visit(n: BaseSchedulerNode) -> None:
if n not in seen:
seen.add(n)
dep_nodes = [
name_to_node[dep.name]
for dep in n.unmet_dependencies
if dep.name in name_to_node
]
for node in sorted(
dep_nodes, key=lambda n: (size_with_reads[n], n.mpi_node.index)
):
visit(node)
result.append(n)
for node in nodes:
for name in node.get_buffer_names():
name_to_node[name] = node
for node in nodes:
size_with_reads[node] = node.mpi_node.size + sum(
pred_buf.mpi_buffer.size_free for pred_buf in node.mpi_node.pred_buffers
)
for node in sorted(nodes, key=lambda n: (size_with_reads[n], n.mpi_node.index)):
visit(node)
return result
def validate_graph_acyclic(nodes: list[BaseSchedulerNode]) -> None:
"""
Validate that the graph is acyclic by checking predecessor relationships.
Raises:
RuntimeError: If a cycle is detected in the graph
"""
# DFS coloring scheme for cycle detection:
# WHITE (0): Node has not been visited yet
# GRAY (1): Node is currently being processed (in the recursion stack)
# BLACK (2): Node has been completely processed (finished exploring all its predecessors)
# A back edge (cycle) is detected when we encounter a GRAY node during DFS traversal
WHITE, GRAY, BLACK = 0, 1, 2
color = dict.fromkeys(nodes, WHITE)
path: list[BaseSchedulerNode] = [] # Track current DFS path
def dfs_visit(node: BaseSchedulerNode) -> None:
if color[node] == BLACK:
return
if color[node] == GRAY:
path.append(node)
path_info = " -> ".join([node.get_name() for node in path])
raise RuntimeError(
f"Cycle detected in memory planning graph"
f"Path containing cycle (i -> j: j is a dependency of i): {path_info} "
f"This indicates invalid dependency relationships in the scheduler graph"
)
color[node] = GRAY
path.append(node)
for pred_node in node.mpi_node.pred_nodes:
assert pred_node != node
dfs_visit(pred_node)
path.pop()
color[node] = BLACK
# Start DFS from all unvisited nodes
for node in nodes:
if color[node] == WHITE:
dfs_visit(node)
def validate_unique_buffer_names(
nodes: list[BaseSchedulerNode],
name_to_buf: dict[str, SchedulerBuffer],
name_to_freeable_input_buf: dict[str, FreeableInputBuffer],
) -> None:
"""
Validate that for each node's output buffer, the name_to_buf mapping is correct.
For each output buffer buf, we should have name_to_buf[buf.get_name()] == buf.
Also validate that no buffer names overlap with freeable input buffer names.
Raises:
RuntimeError: If buffer name mapping is incorrect or names overlap
"""
for node in nodes:
for buf in node.get_outputs():
buf_name = buf.get_name()
# Check if buffer name exists in the mapping
if buf_name not in name_to_buf:
raise RuntimeError(
f"{buf_name} from {node.get_name()} is not found in name_to_buf mapping."
f" This indicates a missing buffer mapping."
)
# Check if the mapping points to the correct buffer object
if name_to_buf[buf_name] != buf:
raise RuntimeError(
f"Buffer name mapping is incorrect for '{buf_name}'."
f"Expected name_to_buf['{buf_name}'] to be {buf.debug_str()}"
f"but got {name_to_buf[buf_name].debug_str()}"
f"This indicates some buffers share the same name"
)
# Check if buffer name conflicts with freeable input buffer names
if buf_name in name_to_freeable_input_buf:
raise RuntimeError(
f"Buffer name conflict detected: '{buf_name}' from node {node.get_name()} "
f"is also used as a freeable input buffer name. "
)
def prepare_planning_info(
nodes: list[BaseSchedulerNode],
name_to_buf: dict[str, SchedulerBuffer],
name_to_fused_node: dict[str, BaseSchedulerNode],
graph_inputs: OrderedSet[str],
graph_outputs: OrderedSet[str],
) -> tuple[int, dict[str, FreeableInputBuffer]]:
"""
Prepare planning info. As nodes are scheduled one at a time, these help
keep track of when a buffer can be freed, and when a node can be scheduled
Returns:
int: peak memory estimation
dict[str, FreeableInputBuffer]: name to freeable input buffer
"""
name_to_freeable_input_buf = get_freeable_input_buf(nodes, graph_inputs)
assign_memory_planning_info_for_scheduler_buffers(nodes, name_to_buf)
assign_memory_planning_info_for_scheduler_nodes(
nodes, name_to_fused_node, name_to_buf, name_to_freeable_input_buf
)
# the default
estimated_peak_memory, _ = estimate_peak_memory(
nodes, name_to_freeable_input_buf, graph_outputs
)
return estimated_peak_memory, name_to_freeable_input_buf
def reorder_for_peak_memory(
nodes: list[BaseSchedulerNode],
name_to_buf: dict[str, SchedulerBuffer],
name_to_fused_node: dict[str, BaseSchedulerNode],
graph_inputs: OrderedSet[str],
graph_outputs: OrderedSet[str],
methods: list[Callable[..., list[BaseSchedulerNode]]] = [ # noqa: B006
topological_sort_lpmf,
topological_sort_bfs,
topological_sort_dfs,
],
) -> list[BaseSchedulerNode]:
"""
Try a few heuristics based topological sort algorithms, and pick the one whose
resulting topological order has the lowest peak memory estimation.
"""
torch_log.info("Reordering for peak memory -- %d nodes", len(nodes))
estimated_peak_memory, name_to_freeable_input_buf = prepare_planning_info(
nodes,
name_to_buf,
name_to_fused_node,
graph_inputs,
graph_outputs,
)
# export graph for simulator if needed
if config.reorder_for_peak_memory_debug:
export_graph_for_simulator(
nodes,
name_to_freeable_input_buf,
name_to_fused_node,
graph_inputs,
graph_outputs,
)
# Validate planning info before proceeding with reordering
try:
validate_graph_acyclic(nodes)
validate_unique_buffer_names(nodes, name_to_buf, name_to_freeable_input_buf)
except RuntimeError:
torch_log.exception("Memory planning validation failed")
if not is_fbcode(): # TODO: remove after ensuring OSS side is safe
raise
# keep track of the peak memory estimates of different methods
peak_memory_diff_methods: list[PeakMemoryResult] = []
peak_memory_diff_methods.append(
PeakMemoryResult(nodes, estimated_peak_memory, "baseline")
)
torch_log.info("Baseline peak memory: %d", estimated_peak_memory)
# other methods
for method in methods:
try:
if method is topological_sort_lpmf:
order = method(
nodes, name_to_freeable_input_buf, name_to_buf, graph_outputs
)
else:
order = method(nodes)
assert len(order) == len(nodes)
peak_memory, _ = estimate_peak_memory(
order, name_to_freeable_input_buf, graph_outputs
)
peak_memory_diff_methods.append(
PeakMemoryResult(order, peak_memory, method.__name__)
)
torch_log.info("%s peak memory: %d", method.__name__, peak_memory)
except Exception:
torch_log.exception("Failed to reorder for %s", method.__name__)
if not is_fbcode(): # TODO: remove after ensuring OSS side is safe
raise
signpost_event(
category="inductor",
name="memory",
parameters={
"orm": {elem.method: elem.peak_memory for elem in peak_memory_diff_methods},
},
)
# get the optimal one
best_result = min(peak_memory_diff_methods, key=lambda x: x.peak_memory)
return best_result.order
def export_graph_for_simulator(
nodes: list[BaseSchedulerNode],
name_to_freeable_input_buf: dict[str, FreeableInputBuffer],
name_to_fused_node: dict[str, BaseSchedulerNode],
graph_inputs: OrderedSet[str],
graph_outputs: OrderedSet[str],
) -> None:
"""
This is for debugging purposes. It will dump a json file that records graph information.
The graph can then be used in a simulator: https://fburl.com/code/3l3d3qi4
"""
class ORMBuffer(TypedDict):
name: str
size_alloc: int
size_free: int
size: int # for backward compatibility
is_input: bool
is_output: bool
deps: list[str]
unmet_deps: list[str]
class ORMNode(TypedDict):
name: str
buffer_names: list[str]
class ORMGraph(TypedDict):
nodes: list[ORMNode]
buffers: list[ORMBuffer]
orm_buffers: list[ORMBuffer] = []
orm_nodes: list[ORMNode] = []
# get orm buffers for freeable input buffers
for buf_name, input_buf in name_to_freeable_input_buf.items():
orm_buf_input_buffer: ORMBuffer = {
"name": buf_name,
"size_alloc": input_buf.mpi_buffer.size_free,
"size_free": input_buf.mpi_buffer.size_free,
"size": input_buf.mpi_buffer.size_free,
"is_input": True,
"is_output": buf_name in graph_outputs,
"deps": [],
"unmet_deps": [],
}
orm_buffers.append(orm_buf_input_buffer)
# get orm buffers for scheduler buffers
name_to_buf: dict[str, SchedulerBuffer] = {
buf.get_name(): buf for node in nodes for buf in node.get_outputs()
} # need to reassign due to probably node pruning
for buf_name, sched_buf in name_to_buf.items():
if sched_buf.defining_op is None:
continue
deps = [
pred_buf.get_name()
for pred_buf in name_to_fused_node[
sched_buf.defining_op.get_name()
].mpi_node.pred_buffers
]
orm_buf_scheduler_buffer: ORMBuffer = {
"name": buf_name,
"size_alloc": sched_buf.mpi_buffer.size_alloc,
"size_free": sched_buf.mpi_buffer.size_free,
"size": sched_buf.mpi_buffer.size_free,
"is_input": False,
"is_output": buf_name in graph_outputs,
"deps": deps,
"unmet_deps": [
buf_name for buf_name in deps if buf_name not in graph_inputs
],
}
orm_buffers.append(orm_buf_scheduler_buffer)
# get orm nodes
for node in nodes:
orm_node: ORMNode = {
"name": node.get_name(),
"buffer_names": list(node.get_buffer_names()),
}
orm_nodes.append(orm_node)
# create the graph object
g: ORMGraph = {
"nodes": orm_nodes,
"buffers": orm_buffers,
}
# dump the graph
import json
import os
import torch
from functorch.compile import get_graph_being_compiled
name = os.path.splitext(get_graph_being_compiled())[0] + "_fused"
g_str = json.dumps(g, indent=2)
torch._logging.trace_structured(
"artifact",
metadata_fn=lambda: {
"name": name,
"encoding": "string",
},
payload_fn=lambda: g_str,
)
| SNodeMemory |
python | allegroai__clearml | clearml/backend_api/services/v2_20/queues.py | {
"start": 29925,
"end": 31594
} | class ____(Request):
"""
Delete metadata from queue
:param queue: ID of the queue
:type queue: str
:param keys: The list of metadata keys to delete
:type keys: Sequence[str]
"""
_service = "queues"
_action = "delete_metadata"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"keys": {
"description": "The list of metadata keys to delete",
"items": {"type": "string"},
"type": "array",
},
"queue": {"description": "ID of the queue", "type": "string"},
},
"required": ["queue", "keys"],
"type": "object",
}
def __init__(self, queue: str, keys: List[str], **kwargs: Any) -> None:
super(DeleteMetadataRequest, self).__init__(**kwargs)
self.queue = queue
self.keys = keys
@schema_property("queue")
def queue(self) -> str:
return self._property_queue
@queue.setter
def queue(self, value: str) -> None:
if value is None:
self._property_queue = None
return
self.assert_isinstance(value, "queue", six.string_types)
self._property_queue = value
@schema_property("keys")
def keys(self) -> List[str]:
return self._property_keys
@keys.setter
def keys(self, value: List[str]) -> None:
if value is None:
self._property_keys = None
return
self.assert_isinstance(value, "keys", (list, tuple))
self.assert_isinstance(value, "keys", six.string_types, is_array=True)
self._property_keys = value
| DeleteMetadataRequest |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_emoji.py | {
"start": 1895,
"end": 3241
} | class ____(util.MdCase):
"""Test strict mode."""
def test_strict(self):
"""Test strict mode."""
MD1 = ":apple:"
MD2 = ":apple:\n:bad:\n:whatever:\n\n:yep:\n"
extension_configs = {
'pymdownx.emoji': {
'strict': True
}
}
extensions = ['pymdownx.emoji']
md = markdown.Markdown(extensions=extensions, extension_configs=extension_configs)
self.assertEqual(
md.convert(MD1),
f"""<p><img alt="🍎" class="emojione" src="{EMOJIONE_PNG_CDN}1f34e.png" title=":apple:" /></p>"""
)
md.reset()
error = ""
try:
md.convert(MD2)
except RuntimeError as e:
error = str(e)
self.assertEqual(
error,
textwrap.dedent(
"""
Emoji Extension (strict mode): The following emoji were detected and either had
their name change, were removed, or have never existed.
- :bad:
- :whatever:
- :yep:
"""
)
)
md.reset()
self.assertEqual(
md.convert(MD1),
f"""<p><img alt="🍎" class="emojione" src="{EMOJIONE_PNG_CDN}1f34e.png" title=":apple:" /></p>"""
)
| TestEmojiStrict |
python | doocs__leetcode | solution/0400-0499/0472.Concatenated Words/Solution.py | {
"start": 0,
"end": 356
} | class ____:
def __init__(self):
self.children = [None] * 26
self.is_end = False
def insert(self, w):
node = self
for c in w:
idx = ord(c) - ord('a')
if node.children[idx] is None:
node.children[idx] = Trie()
node = node.children[idx]
node.is_end = True
| Trie |
python | getsentry__sentry | src/sentry/sentry_metrics/querying/units.py | {
"start": 2853,
"end": 3011
} | class ____:
"""
Represents a placeholder object for the unit metadata of a given QueryExpression.
"""
pass
@dataclass(frozen=True)
| UnitMetadata |
python | neetcode-gh__leetcode | python/0912-sort-an-array.py | {
"start": 0,
"end": 976
} | class ____:
def sortArray(self, nums: List[int]) -> List[int]:
def merge(arr, L, M, R):
left, right = arr[L:M+1], arr[M+1:R+1]
i, j, k = L, 0, 0
while j < len(left) and k < len(right):
if left[j] <= right[k]:
arr[i] = left[j]
j += 1
else:
arr[i] = right[k]
k += 1
i += 1
while j < len(left):
nums[i] = left[j]
j += 1
i += 1
while k < len(right):
nums[i] = right[k]
k += 1
i += 1
def mergeSort(arr, l, r):
if l == r:
return arr
m = (l + r) // 2
mergeSort(arr, l, m)
mergeSort(arr, m + 1, r)
merge(arr, l, m, r)
return arr
return mergeSort(nums, 0, len(nums) - 1)
| Solution |
python | getsentry__sentry | tests/sentry/sentry_apps/api/parsers/test_video.py | {
"start": 142,
"end": 659
} | class ____(unittest.TestCase):
def setUp(self) -> None:
self.schema = {"type": "video", "url": "https://example.com/video.mov"}
def test_valid_schema(self) -> None:
validate_component(self.schema)
@invalid_schema
def test_missing_url(self) -> None:
del self.schema["url"]
validate_component(self.schema)
@invalid_schema
def test_invalid_url(self) -> None:
self.schema["url"] = "not-a-url"
validate_component(self.schema)
| TestVideoSchemaValidation |
python | getlogbook__logbook | src/logbook/handlers.py | {
"start": 3565,
"end": 11090
} | class ____(ContextObject, metaclass=_HandlerType):
"""Handler instances dispatch logging events to specific destinations.
The base handler class. Acts as a placeholder which defines the Handler
interface. Handlers can optionally use Formatter instances to format
records as desired. By default, no formatter is specified; in this case,
the 'raw' message as determined by record.message is logged.
To bind a handler you can use the :meth:`push_application`,
:meth:`push_thread` or :meth:`push_greenlet` methods.
This will push the handler on a stack of handlers.
To undo this, use the :meth:`pop_application`,
:meth:`pop_thread` methods and :meth:`pop_greenlet`::
handler = MyHandler()
handler.push_application()
# all here goes to that handler
handler.pop_application()
By default messages sent to that handler will not go to a handler on
an outer level on the stack, if handled. This can be changed by
setting bubbling to `True`.
There are also context managers to setup the handler for the duration
of a `with`-block::
with handler.applicationbound():
...
with handler.contextbound():
...
Because `contextbound` is a common operation, it is the default::
with handler:
...
"""
stack_manager = ContextStackManager()
#: a flag for this handler that can be set to `True` for handlers that
#: are consuming log records but are not actually displaying it. This
#: flag is set for the :class:`NullHandler` for instance.
blackhole = False
def __init__(self, level=NOTSET, filter=None, bubble=False):
#: the level for the handler. Defaults to `NOTSET` which
#: consumes all entries.
self.level = lookup_level(level)
#: the formatter to be used on records. This is a function
#: that is passed a log record as first argument and the
#: handler as second and returns something formatted
#: (usually a unicode string)
self.formatter = None
#: the filter to be used with this handler
self.filter = filter
#: the bubble flag of this handler
self.bubble = bubble
level_name = level_name_property()
def format(self, record):
"""Formats a record with the given formatter. If no formatter
is set, the record message is returned. Generally speaking the
return value is most likely a unicode string, but nothing in
the handler interface requires a formatter to return a unicode
string.
The combination of a handler and formatter might have the
formatter return an XML element tree for example.
"""
if self.formatter is None:
return record.message
return self.formatter(record, self)
def should_handle(self, record):
"""Returns `True` if this handler wants to handle the record. The
default implementation checks the level.
"""
return record.level >= self.level
def handle(self, record):
"""Emits the record and falls back. It tries to :meth:`emit` the
record and if that fails, it will call into :meth:`handle_error` with
the record and traceback. This function itself will always emit
when called, even if the logger level is higher than the record's
level.
If this method returns `False` it signals to the calling function that
no recording took place in which case it will automatically bubble.
This should not be used to signal error situations. The default
implementation always returns `True`.
"""
try:
self.emit(record)
except Exception:
self.handle_error(record, sys.exc_info())
return True
def emit(self, record):
"""Emit the specified logging record. This should take the
record and deliver it to whereever the handler sends formatted
log records.
"""
def emit_batch(self, records, reason):
"""Some handlers may internally queue up records and want to forward
them at once to another handler. For example the
:class:`~logbook.FingersCrossedHandler` internally buffers
records until a level threshold is reached in which case the buffer
is sent to this method and not :meth:`emit` for each record.
The default behaviour is to call :meth:`emit` for each record in
the buffer, but handlers can use this to optimize log handling. For
instance the mail handler will try to batch up items into one mail
and not to emit mails for each record in the buffer.
Note that unlike :meth:`emit` there is no wrapper method like
:meth:`handle` that does error handling. The reason is that this
is intended to be used by other handlers which are already protected
against internal breakage.
`reason` is a string that specifies the rason why :meth:`emit_batch`
was called, and not :meth:`emit`. The following are valid values:
``'buffer'``
Records were buffered for performance reasons or because the
records were sent to another process and buffering was the only
possible way. For most handlers this should be equivalent to
calling :meth:`emit` for each record.
``'escalation'``
Escalation means that records were buffered in case the threshold
was exceeded. In this case, the last record in the iterable is the
record that triggered the call.
``'group'``
All the records in the iterable belong to the same logical
component and happened in the same process. For example there was
a long running computation and the handler is invoked with a bunch
of records that happened there. This is similar to the escalation
reason, just that the first one is the significant one, not the
last.
If a subclass overrides this and does not want to handle a specific
reason it must call into the superclass because more reasons might
appear in future releases.
Example implementation::
def emit_batch(self, records, reason):
if reason not in ("escalation", "group"):
Handler.emit_batch(self, records, reason)
...
"""
for record in records:
self.emit(record)
def close(self):
"""Tidy up any resources used by the handler. This is automatically
called by the destructor of the class as well, but explicit calls are
encouraged. Make sure that multiple calls to close are possible.
"""
def handle_error(self, record, exc_info):
"""Handle errors which occur during an emit() call. The behaviour of
this function depends on the current `errors` setting.
Check :class:`Flags` for more information.
"""
try:
behaviour = Flags.get_flag("errors", "print")
if behaviour == "raise":
raise exc_info[1]
elif behaviour == "print":
traceback.print_exception(*exc_info, file=sys.stderr)
sys.stderr.write(
f"Logged from file {record.filename}, line {record.lineno}\n"
)
except OSError:
pass
| Handler |
python | TheAlgorithms__Python | data_structures/linked_list/print_reverse.py | {
"start": 192,
"end": 3611
} | class ____:
"""A class to represent a Linked List.
Use a tail pointer to speed up the append() operation.
"""
def __init__(self) -> None:
"""Initialize a LinkedList with the head node set to None.
>>> linked_list = LinkedList()
>>> (linked_list.head, linked_list.tail)
(None, None)
"""
self.head: Node | None = None
self.tail: Node | None = None # Speeds up the append() operation
def __iter__(self) -> Iterator[int]:
"""Iterate the LinkedList yielding each Node's data.
>>> linked_list = LinkedList()
>>> items = (1, 2, 3, 4, 5)
>>> linked_list.extend(items)
>>> tuple(linked_list) == items
True
"""
node = self.head
while node:
yield node.data
node = node.next_node
def __repr__(self) -> str:
"""Returns a string representation of the LinkedList.
>>> linked_list = LinkedList()
>>> str(linked_list)
''
>>> linked_list.append(1)
>>> str(linked_list)
'1'
>>> linked_list.extend([2, 3, 4, 5])
>>> str(linked_list)
'1 -> 2 -> 3 -> 4 -> 5'
"""
return " -> ".join([str(data) for data in self])
def append(self, data: int) -> None:
"""Appends a new node with the given data to the end of the LinkedList.
>>> linked_list = LinkedList()
>>> str(linked_list)
''
>>> linked_list.append(1)
>>> str(linked_list)
'1'
>>> linked_list.append(2)
>>> str(linked_list)
'1 -> 2'
"""
if self.tail:
self.tail.next_node = self.tail = Node(data)
else:
self.head = self.tail = Node(data)
def extend(self, items: Iterable[int]) -> None:
"""Appends each item to the end of the LinkedList.
>>> linked_list = LinkedList()
>>> linked_list.extend([])
>>> str(linked_list)
''
>>> linked_list.extend([1, 2])
>>> str(linked_list)
'1 -> 2'
>>> linked_list.extend([3,4])
>>> str(linked_list)
'1 -> 2 -> 3 -> 4'
"""
for item in items:
self.append(item)
def make_linked_list(elements_list: Iterable[int]) -> LinkedList:
"""Creates a Linked List from the elements of the given sequence
(list/tuple) and returns the head of the Linked List.
>>> make_linked_list([])
Traceback (most recent call last):
...
Exception: The Elements List is empty
>>> make_linked_list([7])
7
>>> make_linked_list(['abc'])
abc
>>> make_linked_list([7, 25])
7 -> 25
"""
if not elements_list:
raise Exception("The Elements List is empty")
linked_list = LinkedList()
linked_list.extend(elements_list)
return linked_list
def in_reverse(linked_list: LinkedList) -> str:
"""Prints the elements of the given Linked List in reverse order
>>> in_reverse(LinkedList())
''
>>> in_reverse(make_linked_list([69, 88, 73]))
'73 <- 88 <- 69'
"""
return " <- ".join(str(line) for line in reversed(tuple(linked_list)))
if __name__ == "__main__":
from doctest import testmod
testmod()
linked_list = make_linked_list((14, 52, 14, 12, 43))
print(f"Linked List: {linked_list}")
print(f"Reverse List: {in_reverse(linked_list)}")
| LinkedList |
python | getsentry__sentry | src/sentry/issues/endpoints/group_details.py | {
"start": 2467,
"end": 17874
} | class ____(GroupEndpoint):
publish_status = {
"DELETE": ApiPublishStatus.PRIVATE,
"GET": ApiPublishStatus.PRIVATE,
"PUT": ApiPublishStatus.PRIVATE,
}
enforce_rate_limit = True
rate_limits = RateLimitConfig(
limit_overrides={
"GET": {
RateLimitCategory.IP: RateLimit(limit=5, window=1),
RateLimitCategory.USER: RateLimit(limit=5, window=1),
RateLimitCategory.ORGANIZATION: RateLimit(limit=5, window=1),
},
"PUT": {
RateLimitCategory.IP: RateLimit(limit=5, window=1),
RateLimitCategory.USER: RateLimit(limit=5, window=1),
RateLimitCategory.ORGANIZATION: RateLimit(limit=5, window=1),
},
"DELETE": {
RateLimitCategory.IP: RateLimit(limit=5, window=5),
RateLimitCategory.USER: RateLimit(limit=5, window=5),
RateLimitCategory.ORGANIZATION: RateLimit(limit=5, window=5),
},
}
)
def _get_seen_by(self, request: Request, group: Group) -> list[dict[str, Any]]:
seen_by = list(GroupSeen.objects.filter(group=group).order_by("-last_seen"))
return [seen for seen in serialize(seen_by, request.user) if seen is not None]
def _get_context_plugins(self, request: Request, group: Group) -> list[dict[str, Any]]:
project = group.project
return serialize(
[
plugin
for plugin in plugins.for_project(project, version=None)
if plugin.has_project_conf()
and hasattr(plugin, "get_custom_contexts")
and plugin.get_custom_contexts()
],
request.user,
PluginSerializer(project),
)
@staticmethod
def __group_hourly_daily_stats(
group: Group, environment_ids: Sequence[int]
) -> tuple[list[list[float]], list[list[float]]]:
model = get_issue_tsdb_group_model(group.issue_category)
now = timezone.now()
hourly_stats = tsdb.backend.rollup(
tsdb.backend.get_range(
model=model,
keys=[group.id],
end=now,
start=now - timedelta(days=1),
environment_ids=environment_ids,
tenant_ids={"organization_id": group.project.organization_id},
),
3600,
)[group.id]
daily_stats = tsdb.backend.rollup(
tsdb.backend.get_range(
model=model,
keys=[group.id],
end=now,
start=now - timedelta(days=30),
environment_ids=environment_ids,
tenant_ids={"organization_id": group.project.organization_id},
),
3600 * 24,
)[group.id]
return hourly_stats, daily_stats
def get(self, request: Request, group: Group) -> Response:
"""
Retrieve an Issue
`````````````````
Return details on an individual issue. This returns the basic stats for
the issue (title, last seen, first seen), some overall numbers (number
of comments, user reports) as well as the summarized event data.
:pparam string organization_id_or_slug: the id or slug of the organization.
:pparam string issue_id: the ID of the issue to retrieve.
:auth: required
"""
from sentry.utils import snuba
try:
# TODO(dcramer): handle unauthenticated/public response
organization = group.project.organization
environments = get_environments(request, organization)
environment_ids = [e.id for e in environments]
expand = request.GET.getlist("expand", [])
collapse = request.GET.getlist("collapse", [])
# WARNING: the rest of this endpoint relies on this serializer
# populating the cache SO don't move this :)
data = serialize(
group, request.user, GroupSerializerSnuba(environment_ids=environment_ids)
)
# TODO: these probably should be another endpoint
activity = Activity.objects.get_activities_for_group(group, 100)
seen_by = self._get_seen_by(request, group)
if "release" not in collapse:
first_release, last_release = get_first_last_release(request, group)
data.update(
{
"firstRelease": first_release,
"lastRelease": last_release,
}
)
if "tags" not in collapse:
tags = tagstore.backend.get_group_tag_keys(
group,
environment_ids,
limit=100,
tenant_ids={"organization_id": group.project.organization_id},
)
data.update(
{
"tags": sorted(serialize(tags, request.user), key=lambda x: x["name"]),
}
)
user_reports = (
UserReport.objects.filter(group_id=group.id)
if not environment_ids
else UserReport.objects.filter(
group_id=group.id, environment_id__in=environment_ids
)
)
hourly_stats, daily_stats = self.__group_hourly_daily_stats(group, environment_ids)
if "inbox" in expand:
inbox_map = get_inbox_details([group])
inbox_reason = inbox_map.get(group.id)
data.update({"inbox": inbox_reason})
if "owners" in expand:
owner_details = get_owner_details([group])
owners = owner_details.get(group.id)
data.update({"owners": owners})
if "forecast" in expand:
fetched_forecast = EscalatingGroupForecast.fetch(group.project_id, group.id)
if fetched_forecast:
fetched_forecast_dict = fetched_forecast.to_dict()
data.update(
{
"forecast": {
"data": fetched_forecast_dict.get("forecast"),
"date_added": fetched_forecast_dict.get("date_added"),
}
}
)
if "integrationIssues" in expand:
external_issues = ExternalIssue.objects.filter(
id__in=GroupLink.objects.filter(group_id__in=[group.id]).values_list(
"linked_id", flat=True
),
)
integration_issues = serialize(
external_issues,
request.user,
serializer=ExternalIssueSerializer(),
)
data.update({"integrationIssues": integration_issues})
if "sentryAppIssues" in expand:
platform_external_issues = PlatformExternalIssue.objects.filter(group_id=group.id)
sentry_app_issues = serialize(
list(platform_external_issues),
request.user,
serializer=PlatformExternalIssueSerializer(),
)
data.update({"sentryAppIssues": sentry_app_issues})
if "latestEventHasAttachments" in expand:
if not features.has(
"organizations:event-attachments",
group.project.organization,
actor=request.user,
):
metrics.incr(
"group.get.http_response",
sample_rate=1.0,
tags={
"status": 404,
"detail": "group_details:get:no_attachments_feature_flag",
},
)
return self.respond(status=404)
latest_event = group.get_latest_event()
if latest_event is not None:
num_attachments = EventAttachment.objects.filter(
project_id=latest_event.project_id, event_id=latest_event.event_id
).count()
data.update({"latestEventHasAttachments": num_attachments > 0})
data.update(
{
"activity": serialize(activity, request.user),
"seenBy": seen_by,
"pluginActions": get_actions(group),
"pluginIssues": get_available_issue_plugins(group),
"pluginContexts": self._get_context_plugins(request, group),
"userReportCount": user_reports.count(),
"stats": {"24h": hourly_stats, "30d": daily_stats},
"count": get_group_global_count(group),
}
)
participants = user_service.serialize_many(
filter={"user_ids": GroupSubscriptionManager.get_participating_user_ids(group)},
as_user=request.user,
)
for participant in participants:
participant["type"] = "user"
data.update({"participants": participants})
metrics.incr(
"group.get.http_response",
sample_rate=1.0,
tags={"status": 200, "detail": "group_details:get:response"},
)
return Response(data)
except snuba.RateLimitExceeded:
metrics.incr(
"group.get.http_response",
sample_rate=1.0,
tags={"status": 429, "detail": "group_details:get:snuba.RateLimitExceeded"},
)
raise
except Exception:
metrics.incr(
"group.get.http_response",
sample_rate=1.0,
tags={"status": 500, "detail": "group_details:get:Exception"},
)
raise
def put(self, request: Request, group: Group) -> Response:
"""
Update an Issue
```````````````
Updates an individual issue's attributes. Only the attributes submitted
are modified.
:pparam string issue_id: the ID of the group to retrieve.
:param string status: the new status for the issue. Valid values
are ``"resolved"``, ``resolvedInNextRelease``,
``"unresolved"``, and ``"ignored"``.
:param map statusDetails: additional details about the resolution.
Valid values are ``"inRelease"``, ``"inNextRelease"``,
``"inCommit"``, ``"ignoreDuration"``, ``"ignoreCount"``,
``"ignoreWindow"``, ``"ignoreUserCount"``, and
``"ignoreUserWindow"``.
:param string assignedTo: the user or team that should be assigned to
this issue. Can be of the form ``"<user_id>"``,
``"user:<user_id>"``, ``"<username>"``,
``"<user_primary_email>"``, or ``"team:<team_id>"``.
:param string assignedBy: ``"suggested_assignee"`` | ``"assignee_selector"``
:param boolean hasSeen: in case this API call is invoked with a user
context this allows changing of the flag
that indicates if the user has seen the
event.
:param boolean isBookmarked: in case this API call is invoked with a
user context this allows changing of
the bookmark flag.
:param boolean isSubscribed:
:param boolean isPublic: sets the issue to public or private.
:param string substatus: the new substatus for the issues. Valid values
defined in GroupSubStatus.
:auth: required
"""
try:
discard = request.data.get("discard")
project = group.project
search_fn = functools.partial(prep_search, request, project)
response = update_groups_with_search_fn(
request, [group.id], [project], project.organization_id, search_fn
)
# if action was discard, there isn't a group to serialize anymore
# if response isn't 200, return the response update_groups gave us (i.e. helpful error)
# instead of serializing the updated group
if discard or response.status_code != 200:
return response
# we need to fetch the object against as the bulk mutation endpoint
# only returns a delta, and object mutation returns a complete updated
# entity.
# TODO(dcramer): we should update the API and have this be an explicit
# flag (or remove it entirely) so that delta's are the primary response
# for mutation.
group = Group.objects.get(id=group.id)
serialized = serialize(
group,
request.user,
GroupSerializer(
environment_func=get_environment_func(request, group.project.organization_id)
),
)
metrics.incr(
"group.update.http_response",
sample_rate=1.0,
tags={"status": 200, "detail": "group_details:update:Response"},
)
return Response(serialized, status=response.status_code)
except client.ApiError as e:
metrics.incr(
"group.update.http_response",
sample_rate=1.0,
tags={"status": e.status_code, "detail": "group_details:update:Response"},
)
logging.exception(
"group_details:put client.ApiError",
)
return Response(e.body, status=e.status_code)
def delete(self, request: Request, group: Group) -> Response:
"""
Remove an Issue
```````````````
Removes an individual issue.
:pparam string issue_id: the ID of the issue to delete.
:auth: required
"""
from sentry.utils import snuba
try:
delete_group_list(request, group.project, [group], "delete")
metrics.incr(
"group.delete.http_response",
sample_rate=1.0,
tags={"status": 200, "detail": "group_details:delete:Response"},
)
return Response(status=202)
except snuba.RateLimitExceeded:
metrics.incr(
"group.delete.http_response",
sample_rate=1.0,
tags={"status": 429, "detail": "group_details:delete:snuba.RateLimitExceeded"},
)
raise
except Exception:
metrics.incr(
"group.delete.http_response",
sample_rate=1.0,
tags={"status": 500, "detail": "group_details:delete:Exception"},
)
raise
| GroupDetailsEndpoint |
python | django__django | django/forms/models.py | {
"start": 21125,
"end": 24405
} | class ____(BaseModelForm, metaclass=ModelFormMetaclass):
pass
def modelform_factory(
model,
form=ModelForm,
fields=None,
exclude=None,
formfield_callback=None,
widgets=None,
localized_fields=None,
labels=None,
help_texts=None,
error_messages=None,
field_classes=None,
):
"""
Return a ModelForm containing form fields for the given model. You can
optionally pass a `form` argument to use as a starting point for
constructing the ModelForm.
``fields`` is an optional list of field names. If provided, include only
the named fields in the returned fields. If omitted or '__all__', use all
fields.
``exclude`` is an optional list of field names. If provided, exclude the
named fields from the returned fields, even if they are listed in the
``fields`` argument.
``widgets`` is a dictionary of model field names mapped to a widget.
``localized_fields`` is a list of names of fields which should be
localized.
``formfield_callback`` is a callable that takes a model field and returns
a form field.
``labels`` is a dictionary of model field names mapped to a label.
``help_texts`` is a dictionary of model field names mapped to a help text.
``error_messages`` is a dictionary of model field names mapped to a
dictionary of error messages.
``field_classes`` is a dictionary of model field names mapped to a form
field class.
"""
# Create the inner Meta class. FIXME: ideally, we should be able to
# construct a ModelForm without creating and passing in a temporary
# inner class.
# Build up a list of attributes that the Meta object will have.
attrs = {"model": model}
if fields is not None:
attrs["fields"] = fields
if exclude is not None:
attrs["exclude"] = exclude
if widgets is not None:
attrs["widgets"] = widgets
if localized_fields is not None:
attrs["localized_fields"] = localized_fields
if labels is not None:
attrs["labels"] = labels
if help_texts is not None:
attrs["help_texts"] = help_texts
if error_messages is not None:
attrs["error_messages"] = error_messages
if field_classes is not None:
attrs["field_classes"] = field_classes
# If parent form class already has an inner Meta, the Meta we're
# creating needs to inherit from the parent's inner meta.
bases = (form.Meta,) if hasattr(form, "Meta") else ()
Meta = type("Meta", bases, attrs)
if formfield_callback:
Meta.formfield_callback = staticmethod(formfield_callback)
# Give this new form class a reasonable name.
class_name = model.__name__ + "Form"
# Class attributes for the new form class.
form_class_attrs = {"Meta": Meta}
if getattr(Meta, "fields", None) is None and getattr(Meta, "exclude", None) is None:
raise ImproperlyConfigured(
"Calling modelform_factory without defining 'fields' or "
"'exclude' explicitly is prohibited."
)
# Instantiate type(form) in order to use the same metaclass as form.
return type(form)(class_name, (form,), form_class_attrs)
# ModelFormSets ##############################################################
| ModelForm |
python | kubernetes-client__python | kubernetes/client/models/v1beta1_cluster_trust_bundle_spec.py | {
"start": 383,
"end": 7385
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'signer_name': 'str',
'trust_bundle': 'str'
}
attribute_map = {
'signer_name': 'signerName',
'trust_bundle': 'trustBundle'
}
def __init__(self, signer_name=None, trust_bundle=None, local_vars_configuration=None): # noqa: E501
"""V1beta1ClusterTrustBundleSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._signer_name = None
self._trust_bundle = None
self.discriminator = None
if signer_name is not None:
self.signer_name = signer_name
self.trust_bundle = trust_bundle
@property
def signer_name(self):
"""Gets the signer_name of this V1beta1ClusterTrustBundleSpec. # noqa: E501
signerName indicates the associated signer, if any. In order to create or update a ClusterTrustBundle that sets signerName, you must have the following cluster-scoped permission: group=certificates.k8s.io resource=signers resourceName=<the signer name> verb=attest. If signerName is not empty, then the ClusterTrustBundle object must be named with the signer name as a prefix (translating slashes to colons). For example, for the signer name `example.com/foo`, valid ClusterTrustBundle object names include `example.com:foo:abc` and `example.com:foo:v1`. If signerName is empty, then the ClusterTrustBundle object's name must not have such a prefix. List/watch requests for ClusterTrustBundles can filter on this field using a `spec.signerName=NAME` field selector. # noqa: E501
:return: The signer_name of this V1beta1ClusterTrustBundleSpec. # noqa: E501
:rtype: str
"""
return self._signer_name
@signer_name.setter
def signer_name(self, signer_name):
"""Sets the signer_name of this V1beta1ClusterTrustBundleSpec.
signerName indicates the associated signer, if any. In order to create or update a ClusterTrustBundle that sets signerName, you must have the following cluster-scoped permission: group=certificates.k8s.io resource=signers resourceName=<the signer name> verb=attest. If signerName is not empty, then the ClusterTrustBundle object must be named with the signer name as a prefix (translating slashes to colons). For example, for the signer name `example.com/foo`, valid ClusterTrustBundle object names include `example.com:foo:abc` and `example.com:foo:v1`. If signerName is empty, then the ClusterTrustBundle object's name must not have such a prefix. List/watch requests for ClusterTrustBundles can filter on this field using a `spec.signerName=NAME` field selector. # noqa: E501
:param signer_name: The signer_name of this V1beta1ClusterTrustBundleSpec. # noqa: E501
:type: str
"""
self._signer_name = signer_name
@property
def trust_bundle(self):
"""Gets the trust_bundle of this V1beta1ClusterTrustBundleSpec. # noqa: E501
trustBundle contains the individual X.509 trust anchors for this bundle, as PEM bundle of PEM-wrapped, DER-formatted X.509 certificates. The data must consist only of PEM certificate blocks that parse as valid X.509 certificates. Each certificate must include a basic constraints extension with the CA bit set. The API server will reject objects that contain duplicate certificates, or that use PEM block headers. Users of ClusterTrustBundles, including Kubelet, are free to reorder and deduplicate certificate blocks in this file according to their own logic, as well as to drop PEM block headers and inter-block data. # noqa: E501
:return: The trust_bundle of this V1beta1ClusterTrustBundleSpec. # noqa: E501
:rtype: str
"""
return self._trust_bundle
@trust_bundle.setter
def trust_bundle(self, trust_bundle):
"""Sets the trust_bundle of this V1beta1ClusterTrustBundleSpec.
trustBundle contains the individual X.509 trust anchors for this bundle, as PEM bundle of PEM-wrapped, DER-formatted X.509 certificates. The data must consist only of PEM certificate blocks that parse as valid X.509 certificates. Each certificate must include a basic constraints extension with the CA bit set. The API server will reject objects that contain duplicate certificates, or that use PEM block headers. Users of ClusterTrustBundles, including Kubelet, are free to reorder and deduplicate certificate blocks in this file according to their own logic, as well as to drop PEM block headers and inter-block data. # noqa: E501
:param trust_bundle: The trust_bundle of this V1beta1ClusterTrustBundleSpec. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and trust_bundle is None: # noqa: E501
raise ValueError("Invalid value for `trust_bundle`, must not be `None`") # noqa: E501
self._trust_bundle = trust_bundle
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1ClusterTrustBundleSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1ClusterTrustBundleSpec):
return True
return self.to_dict() != other.to_dict()
| V1beta1ClusterTrustBundleSpec |
python | wandb__wandb | tests/unit_tests/test_automations/test_automations.py | {
"start": 450,
"end": 9914
} | class ____:
"""Checks on the internal helper that prepares the GraphQL input for CreateFilterTrigger mutations."""
def test_same_results_from_equivalent_args(self, input_event, input_action):
"""Check that preparing the CreateFilterTrigger input object by passing the same values in different ways produces identical results."""
name = "test-name"
description = "test-description"
enabled = True
attrs = dict(name=name, description=description, enabled=enabled)
# REFERENCE: via NewAutomation instance as only positional arg
expected_gql_input = prepare_to_create(
NewAutomation(event=input_event, action=input_action, **attrs)
)
# ------------------------------------------------------------------------------
# via only keyword args
gql_input_via_kws = prepare_to_create(
event=input_event, action=input_action, **attrs
)
assert expected_gql_input == gql_input_via_kws
# ------------------------------------------------------------------------------
# via `event >> action` and keyword args
gql_input_via_event_action_and_kws = prepare_to_create(
input_event >> input_action,
**attrs,
)
assert expected_gql_input == gql_input_via_event_action_and_kws
# ------------------------------------------------------------------------------
# via NewAutomation instance and keyword args as overrides
# Orig values deliberately different to check that they're overridden
orig_obj = NewAutomation(
event=input_event,
action=input_action,
name=f"REPLACED-{name}",
description=f"REPLACED-{description}",
enabled=not enabled,
)
gql_input_via_obj_and_kwargs = prepare_to_create(orig_obj, **attrs)
assert expected_gql_input == gql_input_via_obj_and_kwargs
@mark.parametrize("invalid_event_type", INVALID_INPUT_EVENTS)
def test_prepare_to_create_rejects_excluded_event_types(
self,
input_event,
input_action,
invalid_event_type,
):
"""Check that prepare_to_create() fails if we try to assign a disallowed event type.
Event types may be disallowed if e.g. the event type is deprecated or should otherwise
be blocked on new/edited automations.
"""
with raises(ValidationError):
automation_to_create = NewAutomation(
event=input_event,
action=input_action,
name="test-name",
)
automation_to_create.event.event_type = invalid_event_type
prepare_to_create(automation_to_create)
@mark.parametrize("invalid_action_type", INVALID_INPUT_ACTIONS)
def test_prepare_to_create_rejects_excluded_action_types(
self,
input_event,
input_action,
invalid_action_type,
):
"""Check that prepare_to_create() fails if we try to assign a disallowed action type.
Action types may be disallowed if e.g. the action type is deprecated or should otherwise
be blocked on new/edited automations.
"""
with raises(ValidationError):
automation_to_create = NewAutomation(
event=input_event,
action=input_action,
name="test-name",
)
automation_to_create.action.action_type = invalid_action_type
prepare_to_create(automation_to_create)
@fixture(
params=[
{"name": "test-name", "description": "test-description", "enabled": True},
{"name": "test-name", "enabled": False},
{"name": "test-name"},
]
)
def input_kwargs(self, request: FixtureRequest) -> dict[str, Any]:
return request.param
@fixture
def prepared_vars(
self,
input_event: InputEvent,
input_action: InputAction,
input_kwargs: dict[str, Any],
) -> dict[str, Any]:
# If we were to actually send this new Automation to the server,
# these the input variables for the GraphQL mutation.
prepared = prepare_to_create(input_event >> input_action, **input_kwargs)
return prepared.model_dump()
def test_prepared_dict_values(
self,
scope,
scope_type,
event_type,
action_type,
input_kwargs,
prepared_vars,
):
"""Check that preparing the GraphQL variables for creating a new Automation exports GraphQL variables with the expected keys and values."""
# This only checks the simpler key-value pairs (without nested payloads).
# We've omitted the more complicated event/action payloads, which will be checked separately.
expected_subset = {
"scopeType": scope_type,
"scopeID": scope.id,
"triggeringEventType": event_type,
"triggeredActionType": action_type,
**input_kwargs,
}
# Expected defaults, if they weren't provided
if "enabled" not in input_kwargs:
expected_subset["enabled"] = True
assert expected_subset.keys() <= prepared_vars.keys()
for k, expected_val in expected_subset.items():
prepared_val = prepared_vars[k]
assert prepared_val == expected_val
# Check all expected keys are present
other_expected_keys = {"eventFilter", "triggeredActionConfig"}
assert {*expected_subset.keys(), *other_expected_keys} == prepared_vars.keys()
# ----------------------------------------------------------------------------
# Check prepared event payloads
@fixture
def event_filter_dict(self, prepared_vars) -> dict[str, Any]:
"""The prepared and DESERIALIZED `CreateFilterTriggerInput.eventFilter` payload."""
return json.loads(prepared_vars["eventFilter"])
@mark.parametrize("event_type", [EventType.RUN_METRIC_THRESHOLD], indirect=True)
def test_event_payload_for_run_metric_threshold_events(
self, input_event, event_filter_dict
):
# Check the run filter
orig_run_filter = input_event.filter.run
run_filter_dict = json.loads(event_filter_dict["run_filter"])
# Check that the filter is nested/wrapped as required by current backend/frontend logic
assert run_filter_dict == orig_run_filter.model_dump()
# Check the metric threshold condition
orig_threshold_filter = input_event.filter.metric.threshold_filter
threshold_dict = event_filter_dict["run_metric_filter"]["threshold_filter"]
assert threshold_dict == {
"agg_op": orig_threshold_filter.agg,
"cmp_op": orig_threshold_filter.cmp,
"threshold": orig_threshold_filter.threshold,
"name": orig_threshold_filter.name,
"window_size": orig_threshold_filter.window,
}
@mark.parametrize(
"event_type",
[
EventType.CREATE_ARTIFACT,
EventType.ADD_ARTIFACT_ALIAS,
EventType.LINK_ARTIFACT,
],
indirect=True,
)
def test_event_payload_for_artifact_mutation_events(
self, input_event, event_filter_dict
):
# Check that the filter is nested/wrapped as required by current backend/frontend logic
#
# Besides that, check the event payload: event filter should
# otherwise match what was set on the event instance.
assert event_filter_dict == input_event.filter.model_dump()
# Current frontend logic needs these event filters to be wrapped like:
# {"$or": [{"$and": [...]}]}
assert event_filter_dict.keys() == {"$or"}
assert len(event_filter_dict["$or"]) == 1
assert event_filter_dict["$or"][0].keys() == {"$and"}
# ----------------------------------------------------------------------------
# Check prepared action payloads
@fixture
def action_config_dict(self, prepared_vars) -> dict[str, Any]:
"""The prepared `CreateFilterTriggerInput.triggeredActionConfig` payload."""
return prepared_vars["triggeredActionConfig"]
@mark.parametrize("action_type", [ActionType.NO_OP], indirect=True)
def test_action_payload_for_no_op_actions(self, action_config_dict):
assert action_config_dict == {"noOpActionInput": {"noOp": True}}
@mark.parametrize("action_type", [ActionType.NOTIFICATION], indirect=True)
def test_action_payload_for_notification_actions(
self, input_action, action_config_dict
):
assert action_config_dict == {
"notificationActionInput": {
"integrationID": input_action.integration_id,
"title": input_action.title,
"message": input_action.message,
"severity": input_action.severity,
},
}
@mark.parametrize("action_type", [ActionType.GENERIC_WEBHOOK], indirect=True)
def test_action_payload_for_webhook_actions(self, input_action, action_config_dict):
assert action_config_dict == {
"genericWebhookActionInput": {
"integrationID": input_action.integration_id,
"requestPayload": json.dumps(
input_action.request_payload, separators=(",", ":")
),
},
}
| TestPrepareToCreate |
python | RaRe-Technologies__gensim | gensim/models/atmodel.py | {
"start": 3119,
"end": 5177
} | class ____(LdaState):
"""Encapsulate information for computation of :class:`~gensim.models.atmodel.AuthorTopicModel`."""
def __init__(self, eta, lambda_shape, gamma_shape):
"""
Parameters
----------
eta: numpy.ndarray
Dirichlet topic parameter for sparsity.
lambda_shape: (int, int)
Initialize topic parameters.
gamma_shape: int
Initialize topic parameters.
"""
self.eta = eta
self.sstats = np.zeros(lambda_shape)
self.gamma = np.zeros(gamma_shape)
self.numdocs = 0
self.dtype = np.float64 # To be compatible with LdaState
def construct_doc2author(corpus, author2doc):
"""Create a mapping from document IDs to author IDs.
Parameters
----------
corpus: iterable of list of (int, float)
Corpus in BoW format.
author2doc: dict of (str, list of int)
Mapping of authors to documents.
Returns
-------
dict of (int, list of str)
Document to Author mapping.
"""
doc2author = {}
for d, _ in enumerate(corpus):
author_ids = []
for a, a_doc_ids in author2doc.items():
if d in a_doc_ids:
author_ids.append(a)
doc2author[d] = author_ids
return doc2author
def construct_author2doc(doc2author):
"""Make a mapping from author IDs to document IDs.
Parameters
----------
doc2author: dict of (int, list of str)
Mapping of document id to authors.
Returns
-------
dict of (str, list of int)
Mapping of authors to document ids.
"""
# First get a set of all authors.
authors_ids = set()
for d, a_doc_ids in doc2author.items():
for a in a_doc_ids:
authors_ids.add(a)
# Now construct the dictionary.
author2doc = {}
for a in authors_ids:
author2doc[a] = []
for d, a_ids in doc2author.items():
if a in a_ids:
author2doc[a].append(d)
return author2doc
| AuthorTopicState |
python | apache__airflow | providers/cncf/kubernetes/src/airflow/providers/cncf/kubernetes/decorators/kubernetes.py | {
"start": 1923,
"end": 6203
} | class ____(DecoratedOperator, KubernetesPodOperator):
custom_operator_name = "@task.kubernetes"
# `cmds` and `arguments` are used internally by the operator
template_fields: Sequence[str] = tuple(
{"op_args", "op_kwargs", *KubernetesPodOperator.template_fields} - {"cmds", "arguments"}
)
# Since we won't mutate the arguments, we should just do the shallow copy
# there are some cases we can't deepcopy the objects (e.g protobuf).
shallow_copy_attrs: Sequence[str] = ("python_callable",)
def __init__(self, namespace: str | None = None, use_dill: bool = False, **kwargs) -> None:
self.use_dill = use_dill
# If the name was not provided, we generate operator name from the python_callable
# we also instruct operator to add a random suffix to avoid collisions by default
op_name = kwargs.pop("name", f"k8s-airflow-pod-{kwargs['python_callable'].__name__}")
random_name_suffix = kwargs.pop("random_name_suffix", True)
super().__init__(
namespace=namespace,
name=op_name,
random_name_suffix=random_name_suffix,
cmds=["placeholder-command"],
**kwargs,
)
def _generate_cmds(self) -> list[str]:
script_filename = "/tmp/script.py"
input_filename = "/tmp/script.in"
output_filename = "/airflow/xcom/return.json"
write_local_script_file_cmd = (
f"{_generate_decoded_command(quote(_PYTHON_SCRIPT_ENV), quote(script_filename))}"
)
write_local_input_file_cmd = (
f"{_generate_decoded_command(quote(_PYTHON_INPUT_ENV), quote(input_filename))}"
)
make_xcom_dir_cmd = "mkdir -p /airflow/xcom"
exec_python_cmd = f"python {script_filename} {input_filename} {output_filename}"
return [
"bash",
"-cx",
(
f"{write_local_script_file_cmd} && "
f"{write_local_input_file_cmd} && "
f"{make_xcom_dir_cmd} && "
f"{exec_python_cmd}"
),
]
def execute(self, context: Context):
with TemporaryDirectory(prefix="venv") as tmp_dir:
pickling_library = dill if self.use_dill else pickle
script_filename = os.path.join(tmp_dir, "script.py")
input_filename = os.path.join(tmp_dir, "script.in")
with open(input_filename, "wb") as file:
pickling_library.dump({"args": self.op_args, "kwargs": self.op_kwargs}, file)
py_source = self.get_python_source()
jinja_context = {
"op_args": self.op_args,
"op_kwargs": self.op_kwargs,
"pickling_library": pickling_library.__name__,
"python_callable": self.python_callable.__name__,
"python_callable_source": py_source,
"string_args_global": False,
}
write_python_script(jinja_context=jinja_context, filename=script_filename)
self.env_vars: list[k8s.V1EnvVar] = [
*self.env_vars,
k8s.V1EnvVar(name=_PYTHON_SCRIPT_ENV, value=_read_file_contents(script_filename)),
k8s.V1EnvVar(name=_PYTHON_INPUT_ENV, value=_read_file_contents(input_filename)),
]
self.cmds = self._generate_cmds()
return super().execute(context)
def kubernetes_task(
python_callable: Callable | None = None,
multiple_outputs: bool | None = None,
**kwargs,
) -> TaskDecorator:
"""
Kubernetes operator decorator.
This wraps a function to be executed in K8s using KubernetesPodOperator.
Also accepts any argument that KubernetesPodOperator will via ``kwargs``. Can be
reused in a single DAG.
:param python_callable: Function to decorate
:param multiple_outputs: if set, function return value will be
unrolled to multiple XCom values. Dict will unroll to xcom values with
keys as XCom keys. Defaults to False.
"""
return task_decorator_factory(
python_callable=python_callable,
multiple_outputs=multiple_outputs,
decorated_operator_class=_KubernetesDecoratedOperator,
**kwargs,
)
| _KubernetesDecoratedOperator |
python | streamlit__streamlit | lib/streamlit/runtime/state/query_params.py | {
"start": 1474,
"end": 9416
} | class ____(MutableMapping[str, str]):
"""A lightweight wrapper of a dict that sends forwardMsgs when state changes.
It stores str keys with str and List[str] values.
"""
_query_params: dict[str, list[str] | str] = field(default_factory=dict)
def __iter__(self) -> Iterator[str]:
self._ensure_single_query_api_used()
return iter(
key
for key in self._query_params
if key.lower() not in EMBED_QUERY_PARAMS_KEYS
)
def __getitem__(self, key: str) -> str:
"""Retrieves a value for a given key in query parameters.
Returns the last item in a list or an empty string if empty.
If the key is not present, raise KeyError.
"""
self._ensure_single_query_api_used()
if key.lower() in EMBED_QUERY_PARAMS_KEYS:
raise KeyError(missing_key_error_message(key))
try:
value = self._query_params[key]
if isinstance(value, list):
if len(value) == 0:
return ""
# Return the last value to mimic Tornado's behavior
# https://www.tornadoweb.org/en/stable/web.html#tornado.web.RequestHandler.get_query_argument
return value[-1]
return value
except KeyError:
raise KeyError(missing_key_error_message(key))
def __setitem__(self, key: str, value: str | Iterable[str]) -> None:
self._ensure_single_query_api_used()
self._set_item_internal(key, value)
self._send_query_param_msg()
def _set_item_internal(self, key: str, value: str | Iterable[str]) -> None:
_set_item_in_dict(self._query_params, key, value)
def __delitem__(self, key: str) -> None:
self._ensure_single_query_api_used()
if key.lower() in EMBED_QUERY_PARAMS_KEYS:
raise KeyError(missing_key_error_message(key))
try:
del self._query_params[key]
self._send_query_param_msg()
except KeyError:
raise KeyError(missing_key_error_message(key))
def update(
self,
other: Iterable[tuple[str, str | Iterable[str]]]
| SupportsKeysAndGetItem[str, str | Iterable[str]] = (),
/,
**kwds: str,
) -> None:
# This overrides the `update` provided by MutableMapping
# to ensure only one one ForwardMsg is sent.
self._ensure_single_query_api_used()
if hasattr(other, "keys") and hasattr(other, "__getitem__"):
other = cast("SupportsKeysAndGetItem[str, str | Iterable[str]]", other)
for key in other.keys(): # noqa: SIM118
self._set_item_internal(key, other[key])
else:
for key, value in other:
self._set_item_internal(key, value)
for key, value in kwds.items():
self._set_item_internal(key, value)
self._send_query_param_msg()
def get_all(self, key: str) -> list[str]:
self._ensure_single_query_api_used()
if key not in self._query_params or key.lower() in EMBED_QUERY_PARAMS_KEYS:
return []
value = self._query_params[key]
return value if isinstance(value, list) else [value]
def __len__(self) -> int:
self._ensure_single_query_api_used()
return len(
{
key
for key in self._query_params
if key.lower() not in EMBED_QUERY_PARAMS_KEYS
}
)
def __str__(self) -> str:
self._ensure_single_query_api_used()
return str(self._query_params)
def _send_query_param_msg(self) -> None:
ctx = get_script_run_ctx()
if ctx is None:
return
self._ensure_single_query_api_used()
msg = ForwardMsg()
msg.page_info_changed.query_string = parse.urlencode(
self._query_params, doseq=True
)
ctx.query_string = msg.page_info_changed.query_string
ctx.enqueue(msg)
def clear(self) -> None:
self._ensure_single_query_api_used()
self.clear_with_no_forward_msg(preserve_embed=True)
self._send_query_param_msg()
def to_dict(self) -> dict[str, str]:
self._ensure_single_query_api_used()
# return the last query param if multiple values are set
return {
key: self[key]
for key in self._query_params
if key.lower() not in EMBED_QUERY_PARAMS_KEYS
}
def from_dict(
self,
_dict: Iterable[tuple[str, str | Iterable[str]]]
| SupportsKeysAndGetItem[str, str | Iterable[str]],
) -> None:
self._ensure_single_query_api_used()
old_value = self._query_params.copy()
self.clear_with_no_forward_msg(preserve_embed=True)
try:
self.update(_dict)
except StreamlitAPIException:
# restore the original from before we made any changes.
self._query_params = old_value
raise
def set_with_no_forward_msg(self, key: str, val: list[str] | str) -> None:
self._query_params[key] = val
def clear_with_no_forward_msg(self, preserve_embed: bool = False) -> None:
self._query_params = {
key: value
for key, value in self._query_params.items()
if key.lower() in EMBED_QUERY_PARAMS_KEYS and preserve_embed
}
def _ensure_single_query_api_used(self) -> None:
ctx = get_script_run_ctx()
if ctx is None:
return
ctx.mark_production_query_params_used()
def missing_key_error_message(key: str) -> str:
return f'st.query_params has no key "{key}".'
def _set_item_in_dict(
target_dict: dict[str, list[str] | str], key: str, value: str | Iterable[str]
) -> None:
"""Set an item in a dictionary."""
if isinstance(value, dict):
raise StreamlitQueryParamDictValueError(key)
if key.lower() in EMBED_QUERY_PARAMS_KEYS:
raise StreamlitAPIException(
"Query param embed and embed_options (case-insensitive) cannot be set programmatically."
)
# Type checking users should handle the string serialization themselves
# We will accept any type for the list and serialize to str just in case
if isinstance(value, Iterable) and not isinstance(value, str):
target_dict[key] = [str(item) for item in value]
else:
target_dict[key] = str(value)
def process_query_params(
query_params: Iterable[tuple[str, str | Iterable[str]]]
| SupportsKeysAndGetItem[str, str | Iterable[str]],
) -> str:
"""Convert query params into a URL-encoded query string."""
processed_params: dict[str, list[str] | str] = {}
if hasattr(query_params, "keys") and hasattr(query_params, "__getitem__"):
query_params = cast(
"SupportsKeysAndGetItem[str, str | Iterable[str]]", query_params
)
for key in query_params.keys(): # noqa: SIM118
value = query_params[key]
_set_item_in_dict(processed_params, key, value)
else:
for key, value in query_params:
if key in processed_params:
# If the key already exists, we need to accumulate the values.
if isinstance(value, dict):
raise StreamlitQueryParamDictValueError(key)
current_val = processed_params[key]
if not isinstance(current_val, list):
current_val = [current_val]
if isinstance(value, Iterable) and not isinstance(value, str):
current_val.extend([str(item) for item in value])
else:
current_val.append(str(value))
processed_params[key] = current_val
else:
_set_item_in_dict(processed_params, key, value)
return parse.urlencode(processed_params, doseq=True)
| QueryParams |
python | buildout__buildout | src/zc/buildout/_package_index.py | {
"start": 42498,
"end": 46941
} | class ____(configparser.RawConfigParser):
def __init__(self):
"""
Load from ~/.pypirc
"""
defaults = dict.fromkeys(['username', 'password', 'repository'], '')
super().__init__(defaults)
rc = os.path.join(os.path.expanduser('~'), '.pypirc')
if os.path.exists(rc):
_cfg_read_utf8_with_fallback(self, rc)
@property
def creds_by_repository(self):
sections_with_repositories = [
section
for section in self.sections()
if self.get(section, 'repository').strip()
]
return dict(map(self._get_repo_cred, sections_with_repositories))
def _get_repo_cred(self, section):
repo = self.get(section, 'repository').strip()
return repo, Credential(
self.get(section, 'username').strip(),
self.get(section, 'password').strip(),
)
def find_credential(self, url):
"""
If the URL indicated appears to be a repository defined in this
config, return the credential for that repository.
"""
for repository, cred in self.creds_by_repository.items():
if url.startswith(repository):
return cred
return None
def open_with_auth(url, opener=urllib.request.urlopen):
"""Open a urllib2 request, handling HTTP authentication"""
parsed = urllib.parse.urlparse(url)
scheme, netloc, path, params, query, frag = parsed
# Double scheme does not raise on macOS as revealed by a
# failing test. We would expect "nonnumeric port". Refs #20.
if netloc.endswith(':'):
raise http.client.InvalidURL("nonnumeric port: ''")
if scheme in ('http', 'https'):
auth, address = _splituser(netloc)
else:
auth, address = (None, None)
if not auth:
cred = PyPIConfig().find_credential(url)
if cred:
auth = str(cred)
info = cred.username, url
log.info('Authenticating as %s for %s (from .pypirc)', *info)
if auth:
auth = "Basic " + _encode_auth(auth)
parts = scheme, address, path, params, query, frag
new_url = urllib.parse.urlunparse(parts)
request = urllib.request.Request(new_url)
request.add_header("Authorization", auth)
else:
request = urllib.request.Request(url)
request.add_header('User-Agent', user_agent)
fp = opener(request)
if auth:
# Put authentication info back into request URL if same host,
# so that links found on the page will work
s2, h2, path2, param2, query2, frag2 = urllib.parse.urlparse(fp.url)
if s2 == scheme and h2 == address:
parts = s2, netloc, path2, param2, query2, frag2
fp.url = urllib.parse.urlunparse(parts)
return fp
# copy of urllib.parse._splituser from Python 3.8
# See https://github.com/python/cpython/issues/80072.
def _splituser(host):
"""splituser('user[:passwd]@host[:port]')
--> 'user[:passwd]', 'host[:port]'."""
user, delim, host = host.rpartition('@')
return (user if delim else None), host
# adding a timeout to avoid freezing package_index
open_with_auth = socket_timeout(_SOCKET_TIMEOUT)(open_with_auth)
def fix_sf_url(url):
return url # backward compatibility
def local_open(url):
"""Read a local path, with special support for directories"""
_scheme, _server, path, _param, _query, _frag = urllib.parse.urlparse(url)
filename = urllib.request.url2pathname(path)
if os.path.isfile(filename):
return urllib.request.urlopen(url)
elif path.endswith('/') and os.path.isdir(filename):
files = []
for f in os.listdir(filename):
filepath = os.path.join(filename, f)
if f == 'index.html':
body = _read_utf8_with_fallback(filepath)
break
elif os.path.isdir(filepath):
f += '/'
files.append(f'<a href="{f}">{f}</a>')
else:
tmpl = "<html><head><title>{url}</title></head><body>{files}</body></html>"
body = tmpl.format(url=url, files='\n'.join(files))
status, message = 200, "OK"
else:
status, message, body = 404, "Path not found", "Not found"
headers = {'content-type': 'text/html'}
body_stream = io.StringIO(body)
return urllib.error.HTTPError(url, status, message, headers, body_stream)
| PyPIConfig |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_image57.py | {
"start": 315,
"end": 842
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("image57.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_image("E9", self.image_dir + "logo.gif")
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.