language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | has2k1__plotnine | plotnine/scales/scale_color.py | {
"start": 11211,
"end": 11392
} | class ____(scale_color_distiller):
"""
Sequential, diverging continuous color scales
"""
_aesthetics = ["fill"]
# matplotlib colormaps
@dataclass
| scale_fill_distiller |
python | Textualize__textual | src/textual/widget.py | {
"start": 6727,
"end": 7022
} | class ____(NamedTuple):
"""Used for render/render_line based widgets that use caching. This structure can be used as a
cache-key."""
enabled: bool
"""Is 'enabled' applied?"""
focus: bool
"""Is 'focus' applied?"""
hover: bool
"""Is 'hover' applied?"""
| PseudoClasses |
python | apache__airflow | task-sdk/tests/task_sdk/definitions/_internal/test_decorators.py | {
"start": 946,
"end": 2103
} | class ____:
@pytest.mark.parametrize(
("decorators", "expected_decorators"),
[
(["@task.external_python"], []),
(["@task.external_python()"], []),
(['@task.external_python(serializer="dill")'], []),
(["@foo", "@task.external_python", "@bar"], ["@foo", "@bar"]),
(["@foo", "@task.external_python()", "@bar"], ["@foo", "@bar"]),
],
ids=["without_parens", "parens", "with_args", "nested_without_parens", "nested_with_parens"],
)
def test_remove_task_decorator(self, decorators: list[str], expected_decorators: list[str]):
concated_decorators = "\n".join(decorators)
expected_decorator = "\n".join(expected_decorators)
SCRIPT = dedent(
"""
def f():
import funcsigs
"""
)
py_source = concated_decorators + SCRIPT
expected_source = expected_decorator + SCRIPT if expected_decorator else SCRIPT.lstrip()
res = remove_task_decorator(python_source=py_source, task_decorator_name="@task.external_python")
assert res == expected_source
| TestExternalPythonDecorator |
python | dagster-io__dagster | .buildkite/buildkite-shared/buildkite_shared/step_builders/input_step_builder.py | {
"start": 0,
"end": 451
} | class ____:
def __init__(self, prompt, fields, key=None):
self._step = {"input": prompt, "fields": fields}
if key is not None:
self._step["key"] = key
def with_condition(self, condition):
self._step["if"] = condition
return self
def depends_on(self, dependencies):
self._step["depends_on"] = dependencies
return self
def build(self):
return self._step
| InputStepBuilder |
python | spyder-ide__spyder | spyder/plugins/editor/panels/scrollflag.py | {
"start": 859,
"end": 17264
} | class ____(Panel):
"""Source code editor's scroll flag area"""
WIDTH = 24 if sys.platform == 'darwin' else 12
FLAGS_DX = 4
FLAGS_DY = 2
def __init__(self):
Panel.__init__(self)
self.setAttribute(Qt.WA_OpaquePaintEvent)
self.scrollable = True
self.setMouseTracking(True)
# Define some attributes to be used for unit testing.
self._unit_testing = False
self._range_indicator_is_visible = False
self._alt_key_is_down = False
self._ctrl_key_is_down = False
self._shift_key_is_down = False
self._meta_key_is_down = False
self._slider_range_color = QColor(Qt.gray)
self._slider_range_color.setAlphaF(.85)
self._slider_range_brush = QColor(Qt.gray)
self._slider_range_brush.setAlphaF(.5)
# Dictionary with flag lists
self._dict_flag_list = {}
# Thread to update flags on it.
self._update_flags_thread = QThread(None)
self._update_flags_thread.run = self._update_flags
self._update_flags_thread.finished.connect(self.update)
def on_install(self, editor):
"""Manages install setup of the pane."""
super().on_install(editor)
# Define permanent Qt colors that are needed for painting the flags
# and the slider range.
self._facecolors = {
'warning': QColor(editor.warning_color),
'error': QColor(editor.error_color),
'todo': QColor(editor.todo_color),
'breakpoint': QColor(editor.breakpoint_color),
'occurrence': QColor(editor.occurrence_color),
'found_results': QColor(editor.found_results_color)
}
self._edgecolors = {key: color.darker(120) for
key, color in self._facecolors.items()}
# Signals
editor.sig_focus_changed.connect(self.update)
editor.sig_key_pressed.connect(self.keyPressEvent)
editor.sig_key_released.connect(self.keyReleaseEvent)
editor.sig_scrollflag_shortcut_click.connect(self.mousePressEvent)
editor.sig_scrollflag_shortcut_move.connect(self.mouseMoveEvent)
editor.sig_leave_out.connect(self.update)
editor.sig_flags_changed.connect(self.update_flags)
editor.sig_theme_colors_changed.connect(self.update_flag_colors)
# This prevents that flags are updated while the user is moving the
# cursor, e.g. when typing.
editor.sig_cursor_position_changed.connect(self.update_flags)
@property
def slider(self):
"""This property holds whether the vertical scrollbar is visible."""
return self.editor.verticalScrollBar().isVisible()
def closeEvent(self, event):
self._update_flags_thread.quit()
self._update_flags_thread.wait()
super().closeEvent(event)
def sizeHint(self):
"""Override Qt method"""
return QSize(self.WIDTH, 0)
def update_flag_colors(self, color_dict):
"""
Update the permanent Qt colors that are used for painting the flags
and the slider range with the new colors defined in the given dict.
"""
for name, color in color_dict.items():
self._facecolors[name] = QColor(color)
self._edgecolors[name] = self._facecolors[name].darker(120)
@qdebounced(timeout=REFRESH_RATE)
def update_flags(self):
"""Update flags list in a thread."""
logger.debug("Updating current flags")
self._dict_flag_list = {
'error': [],
'warning': [],
'todo': [],
'breakpoint': [],
}
# Run this computation in a different thread to prevent freezing
# the interface
if not self._update_flags_thread.isRunning():
self._update_flags_thread.start()
def _update_flags(self):
"""Update flags list."""
editor = self.editor
block = editor.document().firstBlock()
while block.isValid():
# Parse all lines in the file looking for something to flag.
data = block.userData()
if data:
if data.code_analysis:
for _, _, severity, _ in data.code_analysis:
if severity == DiagnosticSeverity.ERROR:
flag_type = 'error'
break
else:
flag_type = 'warning'
elif data.todo:
flag_type = 'todo'
elif data.breakpoint:
flag_type = 'breakpoint'
else:
flag_type = None
if flag_type is not None:
self._dict_flag_list[flag_type].append(block)
block = block.next()
def paintEvent(self, event):
"""
Override Qt method.
Painting the scroll flag area
There is two cases:
- The scroll bar is moving, in which case paint all flags.
- The scroll bar is not moving, only paint flags corresponding
to visible lines.
"""
# The area in which the slider handle of the scrollbar may move.
groove_rect = self.get_scrollbar_groove_rect()
# This is necessary to catch a possible error when the scrollbar
# has zero height.
# Fixes spyder-ide/spyder#21600
try:
# The scrollbar's scale factor ratio between pixel span height and
# value span height
scale_factor = (
groove_rect.height() / self.get_scrollbar_value_height()
)
except ZeroDivisionError:
scale_factor = 1
# The vertical offset of the scroll flag area relative to the
# top of the text editor.
offset = groove_rect.y()
# Note that we calculate the pixel metrics required to draw the flags
# here instead of using the convenience methods of the ScrollFlagArea
# for performance reason.
rect_x = ceil(self.FLAGS_DX / 2)
rect_w = self.WIDTH - self.FLAGS_DX
rect_h = self.FLAGS_DY
# Fill the whole painting area
painter = QPainter(self)
painter.fillRect(event.rect(), self.editor.sideareas_color)
editor = self.editor
# Define compute_flag_ypos to position the flags:
# Paint flags for the entire document
last_line = editor.document().lastBlock().firstLineNumber()
# The 0.5 offset is used to align the flags with the center of
# their corresponding text edit block before scaling.
first_y_pos = self.value_to_position(
0.5, scale_factor, offset) - self.FLAGS_DY / 2
last_y_pos = self.value_to_position(
last_line + 0.5, scale_factor, offset) - self.FLAGS_DY / 2
# Compute the height of a line and of a flag in lines.
line_height = last_y_pos - first_y_pos
if line_height > 0:
flag_height_lines = rect_h * last_line / line_height
else:
flag_height_lines = 0
# All the lists of block numbers for flags
dict_flag_lists = {
"occurrence": editor.occurrences,
"found_results": editor.found_results
}
dict_flag_lists.update(self._dict_flag_list)
# This is necessary to paint find matches above errors and warnings.
# See spyder-ide/spyder#20970
dict_flag_lists_iter = reversed(dict_flag_lists)
for flag_type in dict_flag_lists_iter:
painter.setBrush(self._facecolors[flag_type])
painter.setPen(self._edgecolors[flag_type])
if editor.verticalScrollBar().maximum() == 0:
# No scroll
for block in dict_flag_lists[flag_type]:
if not is_block_safe(block):
continue
geometry = editor.blockBoundingGeometry(block)
rect_y = ceil(
geometry.y() +
geometry.height() / 2 +
rect_h / 2
)
painter.drawRect(rect_x, rect_y, rect_w, rect_h)
elif last_line == 0:
# Only one line
for block in dict_flag_lists[flag_type]:
if not is_block_safe(block):
continue
rect_y = ceil(first_y_pos)
painter.drawRect(rect_x, rect_y, rect_w, rect_h)
else:
# Many lines
if len(dict_flag_lists[flag_type]) < MAX_FLAGS:
# If the file is too long, do not freeze the editor
next_line = 0
for block in dict_flag_lists[flag_type]:
if not is_block_safe(block):
continue
block_line = block.firstLineNumber()
# block_line = -1 if invalid
if block_line < next_line:
# Don't print flags on top of flags
continue
next_line = block_line + flag_height_lines / 2
frac = block_line / last_line
rect_y = ceil(first_y_pos + frac * line_height)
painter.drawRect(rect_x, rect_y, rect_w, rect_h)
# Paint the slider range
if not self._unit_testing:
modifiers = QApplication.queryKeyboardModifiers()
else:
modifiers = Qt.KeyboardModifier.NoModifier
if self._alt_key_is_down:
modifiers |= Qt.KeyboardModifier.AltModifier
if self._ctrl_key_is_down:
modifiers |= Qt.KeyboardModifier.ControlModifier
if self._shift_key_is_down:
modifiers |= Qt.KeyboardModifier.ShiftModifier
if self._meta_key_is_down:
modifiers |= Qt.KeyboardModifier.MetaModifier
mouse_modifiers = editor.mouse_shortcuts['jump_to_position']
modifiers_held = modifiers == mouse_modifiers
if self.slider:
cursor_pos = self.mapFromGlobal(QCursor().pos())
is_over_self = self.rect().contains(cursor_pos)
is_over_editor = editor.rect().contains(
editor.mapFromGlobal(QCursor().pos()))
# We use QRect.contains instead of QWidget.underMouse method to
# determined if the cursor is over the editor or the flag scrollbar
# because the later gives a wrong result when a mouse button
# is pressed.
if is_over_self or (modifiers_held and is_over_editor):
painter.setPen(self._slider_range_color)
painter.setBrush(self._slider_range_brush)
x, y, width, height = self.make_slider_range(
cursor_pos, scale_factor, offset, groove_rect)
painter.drawRect(x, y, width, height)
self._range_indicator_is_visible = True
else:
self._range_indicator_is_visible = False
def enterEvent(self, event):
"""Override Qt method"""
self.update()
def leaveEvent(self, event):
"""Override Qt method"""
self.update()
def mouseMoveEvent(self, event):
"""Override Qt method"""
self.update()
def mousePressEvent(self, event):
"""Override Qt method"""
if self.slider and event.button() == Qt.LeftButton:
vsb = self.editor.verticalScrollBar()
value = self.position_to_value(event.pos().y())
vsb.setValue(int(value-vsb.pageStep()/2))
def keyReleaseEvent(self, event):
"""Override Qt method."""
if event.key() == Qt.Key.Key_Alt:
self._alt_key_is_down = False
self.update()
elif event.key() == Qt.Key.Key_Control:
self._ctrl_key_is_down = False
self.update()
elif event.key() == Qt.Key.Key_Shift:
self._shift_key_is_down = False
self.update()
elif event.key() == Qt.Key.Key_Meta:
self._meta_key_is_down = False
self.update()
def keyPressEvent(self, event):
"""Override Qt method"""
if event.key() == Qt.Key_Alt:
self._alt_key_is_down = True
self.update()
elif event.key() == Qt.Key.Key_Control:
self._ctrl_key_is_down = True
self.update()
elif event.key() == Qt.Key.Key_Shift:
self._shift_key_is_down = True
self.update()
elif event.key() == Qt.Key.Key_Meta:
self._meta_key_is_down = True
self.update()
def get_vertical_offset(self):
"""
Return the vertical offset of the scroll flag area relative to the
top of the text editor.
"""
groove_rect = self.get_scrollbar_groove_rect()
return groove_rect.y()
def get_slider_min_height(self):
"""
Return the minimum height of the slider range based on that set for
the scroll bar's slider.
"""
return QApplication.instance().style().pixelMetric(
QStyle.PM_ScrollBarSliderMin)
def get_scrollbar_groove_rect(self):
"""Return the area in which the slider handle may move."""
vsb = self.editor.verticalScrollBar()
style = QApplication.instance().style()
opt = QStyleOptionSlider()
vsb.initStyleOption(opt)
# Get the area in which the slider handle may move.
groove_rect = style.subControlRect(
QStyle.CC_ScrollBar, opt, QStyle.SC_ScrollBarGroove, self)
return groove_rect
def get_scrollbar_position_height(self):
"""Return the pixel span height of the scrollbar area in which
the slider handle may move"""
groove_rect = self.get_scrollbar_groove_rect()
return float(groove_rect.height())
def get_scrollbar_value_height(self):
"""Return the value span height of the scrollbar"""
vsb = self.editor.verticalScrollBar()
return vsb.maximum() - vsb.minimum() + vsb.pageStep()
def get_scale_factor(self):
"""Return scrollbar's scale factor:
ratio between pixel span height and value span height"""
return (self.get_scrollbar_position_height() /
self.get_scrollbar_value_height())
def value_to_position(self, y, scale_factor, offset):
"""Convert value to position in pixels"""
vsb = self.editor.verticalScrollBar()
return int((y - vsb.minimum()) * scale_factor + offset)
def position_to_value(self, y):
"""Convert position in pixels to value"""
vsb = self.editor.verticalScrollBar()
offset = self.get_vertical_offset()
return vsb.minimum() + max([0, (y - offset) / self.get_scale_factor()])
def make_slider_range(self, cursor_pos, scale_factor, offset, groove_rect):
"""
Return the slider x and y positions and the slider width and height.
"""
# The slider range indicator position follows the mouse vertical
# position while its height corresponds to the part of the file that
# is currently visible on screen.
vsb = self.editor.verticalScrollBar()
slider_height = self.value_to_position(
vsb.pageStep(), scale_factor, offset) - offset
slider_height = max(slider_height, self.get_slider_min_height())
# Calculate the minimum and maximum y-value to constraint the slider
# range indicator position to the height span of the scrollbar area
# where the slider may move.
min_ypos = offset
max_ypos = groove_rect.height() + offset - slider_height
# Determine the bounded y-position of the slider rect.
slider_y = max(min_ypos, min(max_ypos,
ceil(cursor_pos.y()-slider_height/2)))
return 1, slider_y, self.WIDTH - 2, slider_height
def wheelEvent(self, event):
"""Override Qt method"""
self.editor.wheelEvent(event)
def set_enabled(self, state):
"""Toggle scroll flag area visibility"""
self.enabled = state
self.setVisible(state)
| ScrollFlagArea |
python | walkccc__LeetCode | solutions/2183. Count Array Pairs Divisible by K/2183.py | {
"start": 0,
"end": 307
} | class ____:
def countPairs(self, nums: list[int], k: int) -> int:
ans = 0
gcds = collections.Counter()
for num in nums:
gcd_i = math.gcd(num, k)
for gcd_j, count in gcds.items():
if gcd_i * gcd_j % k == 0:
ans += count
gcds[gcd_i] += 1
return ans
| Solution |
python | getsentry__sentry | src/sentry/analytics/events/advanced_search_feature_gated.py | {
"start": 86,
"end": 275
} | class ____(analytics.Event):
user_id: int | None = None
default_user_id: int
organization_id: int
analytics.register(AdvancedSearchFeatureGateEvent)
| AdvancedSearchFeatureGateEvent |
python | scikit-learn__scikit-learn | sklearn/neighbors/_base.py | {
"start": 13133,
"end": 26005
} | class ____(MultiOutputMixin, BaseEstimator, metaclass=ABCMeta):
"""Base class for nearest neighbors estimators."""
_parameter_constraints: dict = {
"n_neighbors": [Interval(Integral, 1, None, closed="left"), None],
"radius": [Interval(Real, 0, None, closed="both"), None],
"algorithm": [StrOptions({"auto", "ball_tree", "kd_tree", "brute"})],
"leaf_size": [Interval(Integral, 1, None, closed="left")],
"p": [Interval(Real, 0, None, closed="right"), None],
"metric": [StrOptions(set(itertools.chain(*VALID_METRICS.values()))), callable],
"metric_params": [dict, None],
"n_jobs": [Integral, None],
}
@abstractmethod
def __init__(
self,
n_neighbors=None,
radius=None,
algorithm="auto",
leaf_size=30,
metric="minkowski",
p=2,
metric_params=None,
n_jobs=None,
):
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.metric = metric
self.metric_params = metric_params
self.p = p
self.n_jobs = n_jobs
def _check_algorithm_metric(self):
if self.algorithm == "auto":
if self.metric == "precomputed":
alg_check = "brute"
elif (
callable(self.metric)
or self.metric in VALID_METRICS["ball_tree"]
or isinstance(self.metric, DistanceMetric)
):
alg_check = "ball_tree"
else:
alg_check = "brute"
else:
alg_check = self.algorithm
if callable(self.metric):
if self.algorithm == "kd_tree":
# callable metric is only valid for brute force and ball_tree
raise ValueError(
"kd_tree does not support callable metric '%s'"
"Function call overhead will result"
"in very poor performance." % self.metric
)
elif self.metric not in VALID_METRICS[alg_check] and not isinstance(
self.metric, DistanceMetric
):
raise ValueError(
"Metric '%s' not valid. Use "
"sorted(sklearn.neighbors.VALID_METRICS['%s']) "
"to get valid options. "
"Metric can also be a callable function." % (self.metric, alg_check)
)
if self.metric_params is not None and "p" in self.metric_params:
if self.p is not None:
warnings.warn(
(
"Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored."
),
SyntaxWarning,
stacklevel=3,
)
def _fit(self, X, y=None):
ensure_all_finite = "allow-nan" if get_tags(self).input_tags.allow_nan else True
if self.__sklearn_tags__().target_tags.required:
if not isinstance(X, (KDTree, BallTree, NeighborsBase)):
X, y = validate_data(
self,
X,
y,
accept_sparse="csr",
multi_output=True,
order="C",
ensure_all_finite=ensure_all_finite,
)
if is_classifier(self):
# Classification targets require a specific format
if y.ndim == 1 or (y.ndim == 2 and y.shape[1] == 1):
if y.ndim != 1:
warnings.warn(
(
"A column-vector y was passed when a "
"1d array was expected. Please change "
"the shape of y to (n_samples,), for "
"example using ravel()."
),
DataConversionWarning,
stacklevel=2,
)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
check_classification_targets(y)
self.classes_ = []
# Using `dtype=np.intp` is necessary since `np.bincount`
# (called in _classification.py) fails when dealing
# with a float64 array on 32bit systems.
self._y = np.empty(y.shape, dtype=np.intp)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
else:
self._y = y
else:
if not isinstance(X, (KDTree, BallTree, NeighborsBase)):
X = validate_data(
self,
X,
ensure_all_finite=ensure_all_finite,
accept_sparse="csr",
order="C",
)
self._check_algorithm_metric()
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.effective_metric_params_.get("p", self.p)
if self.metric == "minkowski":
self.effective_metric_params_["p"] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == "minkowski":
p = self.effective_metric_params_.pop("p", 2)
w = self.effective_metric_params_.pop("w", None)
if p == 1 and w is None:
self.effective_metric_ = "manhattan"
elif p == 2 and w is None:
self.effective_metric_ = "euclidean"
elif p == np.inf and w is None:
self.effective_metric_ = "chebyshev"
else:
# Use the generic minkowski metric, possibly weighted.
self.effective_metric_params_["p"] = p
self.effective_metric_params_["w"] = w
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
self.n_samples_fit_ = X.n_samples_fit_
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = "ball_tree"
self.n_samples_fit_ = X.data.shape[0]
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = "kd_tree"
self.n_samples_fit_ = X.data.shape[0]
return self
if self.metric == "precomputed":
X = _check_precomputed(X)
# Precomputed matrix X must be squared
if X.shape[0] != X.shape[1]:
raise ValueError(
"Precomputed matrix must be square."
" Input is a {}x{} matrix.".format(X.shape[0], X.shape[1])
)
self.n_features_in_ = X.shape[1]
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
if issparse(X):
if self.algorithm not in ("auto", "brute"):
warnings.warn("cannot use tree with sparse input: using brute force")
if (
self.effective_metric_ not in VALID_METRICS_SPARSE["brute"]
and not callable(self.effective_metric_)
and not isinstance(self.effective_metric_, DistanceMetric)
):
raise ValueError(
"Metric '%s' not valid for sparse input. "
"Use sorted(sklearn.neighbors."
"VALID_METRICS_SPARSE['brute']) "
"to get valid options. "
"Metric can also be a callable function." % (self.effective_metric_)
)
self._fit_X = X.copy()
self._tree = None
self._fit_method = "brute"
self.n_samples_fit_ = X.shape[0]
return self
self._fit_method = self.algorithm
self._fit_X = X
self.n_samples_fit_ = X.shape[0]
if self._fit_method == "auto":
# A tree approach is better for small number of neighbors or small
# number of features, with KDTree generally faster when available
if (
self.metric == "precomputed"
or self._fit_X.shape[1] > 15
or (
self.n_neighbors is not None
and self.n_neighbors >= self._fit_X.shape[0] // 2
)
):
self._fit_method = "brute"
else:
if (
self.effective_metric_ == "minkowski"
and self.effective_metric_params_["p"] < 1
):
self._fit_method = "brute"
elif (
self.effective_metric_ == "minkowski"
and self.effective_metric_params_.get("w") is not None
):
# 'minkowski' with weights is not supported by KDTree but is
# supported byBallTree.
self._fit_method = "ball_tree"
elif self.effective_metric_ in VALID_METRICS["kd_tree"]:
self._fit_method = "kd_tree"
elif (
callable(self.effective_metric_)
or self.effective_metric_ in VALID_METRICS["ball_tree"]
):
self._fit_method = "ball_tree"
else:
self._fit_method = "brute"
if (
self.effective_metric_ == "minkowski"
and self.effective_metric_params_["p"] < 1
):
# For 0 < p < 1 Minkowski distances aren't valid distance
# metric as they do not satisfy triangular inequality:
# they are semi-metrics.
# algorithm="kd_tree" and algorithm="ball_tree" can't be used because
# KDTree and BallTree require a proper distance metric to work properly.
# However, the brute-force algorithm supports semi-metrics.
if self._fit_method == "brute":
warnings.warn(
"Mind that for 0 < p < 1, Minkowski metrics are not distance"
" metrics. Continuing the execution with `algorithm='brute'`."
)
else: # self._fit_method in ("kd_tree", "ball_tree")
raise ValueError(
f'algorithm="{self._fit_method}" does not support 0 < p < 1 for '
"the Minkowski metric. To resolve this problem either "
'set p >= 1 or algorithm="brute".'
)
if self._fit_method == "ball_tree":
self._tree = BallTree(
X,
self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_,
)
elif self._fit_method == "kd_tree":
if (
self.effective_metric_ == "minkowski"
and self.effective_metric_params_.get("w") is not None
):
raise ValueError(
"algorithm='kd_tree' is not valid for "
"metric='minkowski' with a weight parameter 'w': "
"try algorithm='ball_tree' "
"or algorithm='brute' instead."
)
self._tree = KDTree(
X,
self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_,
)
elif self._fit_method == "brute":
self._tree = None
return self
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.sparse = True
# For cross-validation routines to split data correctly
tags.input_tags.pairwise = self.metric == "precomputed"
# when input is precomputed metric values, all those values need to be positive
tags.input_tags.positive_only = tags.input_tags.pairwise
tags.input_tags.allow_nan = self.metric == "nan_euclidean"
return tags
| NeighborsBase |
python | huggingface__transformers | src/transformers/models/udop/modeling_udop.py | {
"start": 64085,
"end": 72233
} | class ____(UdopPreTrainedModel):
_tied_weights_keys = {
"encoder.embed_tokens.weight": "shared.weight",
"decoder.embed_tokens.weight": "shared.weight",
"encoder.embed_patches.proj.weight": "patch_embed.proj.weight", # TODO tie weights for patch embeddings not working
"encoder.embed_patches.proj.bias": "patch_embed.proj.bias", # TODO tie weights for patch embeddings not working
}
def __init__(self, config):
super().__init__(config)
# text and image embeddings
self.shared = nn.Embedding(config.vocab_size, config.d_model)
self.patch_embed = UdopPatchEmbeddings(config)
encoder_config = deepcopy(config)
encoder_config.is_decoder = False
encoder_config.use_cache = False
encoder_config.tie_word_embeddings = True
self.encoder = UdopStack(encoder_config)
decoder_config = deepcopy(config)
decoder_config.is_decoder = True
decoder_config.tie_word_embeddings = True
decoder_config.num_layers = config.num_decoder_layers
self.decoder = UdopStack(decoder_config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
self.decoder.set_input_embeddings(new_embeddings)
@auto_docstring
def forward(
self,
input_ids: Optional[Tensor] = None,
attention_mask: Optional[Tensor] = None,
bbox: Optional[dict[str, Any]] = None,
pixel_values: Optional[Tensor] = None,
visual_bbox: Optional[dict[str, Any]] = None,
decoder_input_ids: Optional[Tensor] = None,
decoder_attention_mask: Optional[Tensor] = None,
inputs_embeds: Optional[Tensor] = None,
encoder_outputs: Optional[Tensor] = None,
past_key_values: Optional[Cache] = None,
decoder_inputs_embeds: Optional[Tensor] = None,
use_cache=True,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
) -> tuple[Tensor, ...]:
r"""
bbox (`torch.LongTensor` of shape `({0}, 4)`, *optional*):
Bounding boxes of each input sequence tokens. Selected in the range `[0,
config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)
format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,
y1) represents the position of the lower right corner.
Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]
token. See `pixel_values` for `patch_sequence_length`.
visual_bbox (`torch.LongTensor` of shape `(batch_size, patch_sequence_length, 4)`, *optional*):
Bounding boxes of each patch in the image. If not provided, bounding boxes are created in the model.
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using
[`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids) T5 uses the `pad_token_id` as the starting
token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last
`decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare
`decoder_input_ids` for pretraining take a look at [T5 Training](./t5#training).
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
Example:
```python
>>> from transformers import AutoProcessor, AutoModel
>>> from datasets import load_dataset
>>> import torch
>>> # load model and processor
>>> # in this case, we already have performed OCR ourselves
>>> # so we initialize the processor with `apply_ocr=False`
>>> processor = AutoProcessor.from_pretrained("microsoft/udop-large", apply_ocr=False)
>>> model = AutoModel.from_pretrained("microsoft/udop-large")
>>> # load an example image, along with the words and coordinates
>>> # which were extracted using an OCR engine
>>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
>>> example = dataset[0]
>>> image = example["image"]
>>> words = example["tokens"]
>>> boxes = example["bboxes"]
>>> inputs = processor(image, words, boxes=boxes, return_tensors="pt")
>>> decoder_input_ids = torch.tensor([[model.config.decoder_start_token_id]])
>>> # forward pass
>>> outputs = model(**inputs, decoder_input_ids=decoder_input_ids)
>>> last_hidden_states = outputs.last_hidden_state
>>> list(last_hidden_states.shape)
[1, 1, 1024]
```"""
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# Encode if needed (training, first prediction pass)
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
bbox=bbox,
pixel_values=pixel_values,
visual_bbox=visual_bbox,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = encoder_outputs[0]
encoder_attention_mask = encoder_outputs.attention_mask if return_dict else encoder_outputs[1]
# Decode
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
inputs_embeds=decoder_inputs_embeds,
past_key_values=past_key_values,
encoder_hidden_states=hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
if not return_dict:
# we filter out the attention mask
decoder_outputs = tuple(value for idx, value in enumerate(decoder_outputs) if idx != 1)
encoder_outputs = tuple(value for idx, value in enumerate(encoder_outputs) if idx != 1)
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@auto_docstring(
custom_intro="""
The UDOP encoder-decoder Transformer with a language modeling head on top, enabling to generate text given document
images and an optional prompt.
This class is based on [`T5ForConditionalGeneration`], extended to deal with images and layout (2D) data.
"""
)
| UdopModel |
python | numba__numba | numba/core/types/containers.py | {
"start": 2522,
"end": 2717
} | class ____(Buffer):
"""
Type class for Python 3.x bytes objects.
"""
mutable = False
# Actually true but doesn't matter since bytes is immutable
slice_is_copy = False
| Bytes |
python | pypa__pipenv | pipenv/exceptions.py | {
"start": 6529,
"end": 6817
} | class ____(PipenvUsageError):
def __init__(self, message=None, **kwargs):
if not message:
message = "[bold]Aborting deploy[/bold]"
extra = kwargs.pop("extra", [])
PipenvUsageError.__init__(self, message=message, extra=extra, **kwargs)
| DeployException |
python | cython__cython | Cython/Compiler/ExprNodes.py | {
"start": 150744,
"end": 152065
} | class ____(ExprNode):
# Node created during analyse_types phase
# of some nodes to hold a temporary value.
#
# Note: One must call "allocate" and "release" on
# the node during code generation to get/release the temp.
# This is because the temp result is often used outside of
# the regular cycle.
subexprs = []
def __init__(self, pos, type, env=None):
ExprNode.__init__(self, pos)
self.type = type
if type.is_pyobject:
self.result_ctype = py_object_type
self.is_temp = 1
def analyse_types(self, env):
return self
def analyse_target_declaration(self, env):
self.is_target = True
def generate_result_code(self, code):
pass
def allocate(self, code):
self.temp_cname = code.funcstate.allocate_temp(self.type, manage_ref=True)
def release(self, code):
code.funcstate.release_temp(self.temp_cname)
self.temp_cname = None
def result(self):
try:
return self.temp_cname
except:
assert False, "Remember to call allocate/release on TempNode"
raise
# Do not participate in normal temp alloc/dealloc:
def allocate_temp_result(self, code):
pass
def release_temp_result(self, code):
pass
| TempNode |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_numeric.py | {
"start": 76199,
"end": 77510
} | class ____(TestCase):
def setUp(self):
super().setUp()
self.A = np.array([1, -1, 1, -1])
self.real_var = 1
def test_basic(self):
assert_almost_equal(np.var(self.A), self.real_var)
assert_almost_equal(np.std(self.A) ** 2, self.real_var)
def test_scalars(self):
assert_equal(np.var(1), 0)
assert_equal(np.std(1), 0)
def test_ddof1(self):
assert_almost_equal(
np.var(self.A, ddof=1), self.real_var * len(self.A) / (len(self.A) - 1)
)
assert_almost_equal(
np.std(self.A, ddof=1) ** 2, self.real_var * len(self.A) / (len(self.A) - 1)
)
def test_ddof2(self):
assert_almost_equal(
np.var(self.A, ddof=2), self.real_var * len(self.A) / (len(self.A) - 2)
)
assert_almost_equal(
np.std(self.A, ddof=2) ** 2, self.real_var * len(self.A) / (len(self.A) - 2)
)
def test_out_scalar(self):
d = np.arange(10)
out = np.array(0.0)
r = np.std(d, out=out)
assert_(r is out)
assert_array_equal(r, out)
r = np.var(d, out=out)
assert_(r is out)
assert_array_equal(r, out)
r = np.mean(d, out=out)
assert_(r is out)
assert_array_equal(r, out)
| TestStdVar |
python | vyperlang__vyper | vyper/ast/metadata.py | {
"start": 465,
"end": 1981
} | class ____:
_NOT_FOUND = object()
def __init__(self):
self._node_updates: list[dict[tuple[int, str, Any], NodeMetadata]] = []
def register_update(self, metadata, k):
KEY = (id(metadata), k)
if KEY in self._node_updates[-1]:
return
prev = metadata.get(k, self._NOT_FOUND)
self._node_updates[-1][KEY] = (metadata, prev)
@contextlib.contextmanager
def enter(self):
self._node_updates.append({})
try:
yield
except VyperException as e:
# note: would be better to only catch typechecker exceptions here.
self._rollback_inner()
raise e from e
else:
self._commit_inner()
def _rollback_inner(self):
for (_, k), (metadata, prev) in self._node_updates[-1].items():
if prev is self._NOT_FOUND:
metadata.pop(k, None)
else:
metadata[k] = prev
self._pop_inner()
def _commit_inner(self):
inner = self._pop_inner()
if len(self._node_updates) == 0:
return
outer = self._node_updates[-1]
# register with previous frame in case inner gets committed
# but outer needs to be rolled back
for (_, k), (metadata, prev) in inner.items():
if (id(metadata), k) not in outer:
outer[(id(metadata), k)] = (metadata, prev)
def _pop_inner(self):
return self._node_updates.pop()
| _NodeMetadataJournal |
python | scrapy__scrapy | tests/test_loader.py | {
"start": 1311,
"end": 2030
} | class ____:
def test_add_value_on_unknown_field(self):
il = ProcessorItemLoader()
with pytest.raises(KeyError):
il.add_value("wrong_field", ["lala", "lolo"])
def test_load_item_using_default_loader(self):
i = SummaryItem()
i["summary"] = "lala"
il = ItemLoader(item=i)
il.add_value("name", "marta")
item = il.load_item()
assert item is i
assert item["summary"] == ["lala"]
assert item["name"] == ["marta"]
def test_load_item_using_custom_loader(self):
il = ProcessorItemLoader()
il.add_value("name", "marta")
item = il.load_item()
assert item["name"] == ["Marta"]
| TestBasicItemLoader |
python | charliermarsh__ruff | crates/ty_python_semantic/resources/corpus/78_class_dec_member_func.py | {
"start": 9,
"end": 61
} | class ____:
def __init__(self):
self.x = 42
| C |
python | jmcnamara__XlsxWriter | xlsxwriter/test/workbook/test_write_sheets.py | {
"start": 299,
"end": 918
} | class ____(unittest.TestCase):
"""
Test the Workbook _write_sheets() method.
"""
def setUp(self):
self.fh = StringIO()
self.workbook = Workbook()
self.workbook._set_filehandle(self.fh)
def test_write_sheets(self):
"""Test the _write_sheets() method"""
self.workbook.add_worksheet("Sheet2")
self.workbook._write_sheets()
exp = """<sheets><sheet name="Sheet2" sheetId="1" r:id="rId1"/></sheets>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def tearDown(self):
self.workbook.fileclosed = 1
| TestWriteSheets |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_custom_reprs.py | {
"start": 2009,
"end": 2081
} | class ____:
def __init__(self, x: int) -> None:
self.x = x
| Foo |
python | readthedocs__readthedocs.org | readthedocs/redirects/migrations/0002_add_missing_model_change_migrations.py | {
"start": 150,
"end": 1319
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("redirects", "0001_initial"),
]
operations = [
migrations.AlterField(
model_name="redirect",
name="redirect_type",
field=models.CharField(
choices=[
("prefix", "Prefix Redirect"),
("page", "Page Redirect"),
("exact", "Exact Redirect"),
("sphinx_html", "Sphinx HTMLDir -> HTML"),
("sphinx_htmldir", "Sphinx HTML -> HTMLDir"),
],
help_text="The type of redirect you wish to use.",
max_length=255,
verbose_name="Redirect Type",
),
),
migrations.AlterField(
model_name="redirect",
name="to_url",
field=models.CharField(
blank=True,
db_index=True,
help_text="Absolute or relative URL. Example: <b>/tutorial/install.html</b>",
max_length=255,
verbose_name="To URL",
),
),
]
| Migration |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-bedrock-converse/tests/test_llms_bedrock_converse.py | {
"start": 4786,
"end": 5764
} | class ____:
def __init__(self) -> "MockClient":
self.exceptions = MockExceptions()
def converse(self, *args, **kwargs):
return {"output": {"message": {"content": [{"text": EXP_RESPONSE}]}}}
def converse_stream(self, *args, **kwargs):
def stream_generator():
for i, element in enumerate(EXP_STREAM_RESPONSE):
yield {
"contentBlockDelta": {
"delta": {"text": element},
"contentBlockIndex": 0,
}
}
# Add messageStop and metadata events for token usage testing
yield {"messageStop": {"stopReason": "end_turn"}}
yield {
"metadata": {
"usage": {"inputTokens": 15, "outputTokens": 26, "totalTokens": 41},
"metrics": {"latencyMs": 886},
}
}
return {"stream": stream_generator()}
| MockClient |
python | run-llama__llama_index | llama-index-core/llama_index/core/query_engine/jsonalyze/jsonalyze_query_engine.py | {
"start": 412,
"end": 1046
} | class ____:
"""
JSONalyze query engine.
DEPRECATED: Use `JSONalyzeQueryEngine` from `llama-index-experimental` instead.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
raise DeprecationWarning(
"JSONalyzeQueryEngine has been moved to `llama-index-experimental`.\n"
"`pip install llama-index-experimental`\n"
"`from llama_index.experimental.query_engine import JSONalyzeQueryEngine`\n"
"Note that the JSONalyzeQueryEngine allows for arbitrary file creation, \n"
"and should be used in a secure environment."
)
| JSONalyzeQueryEngine |
python | astropy__astropy | astropy/constants/codata2022.py | {
"start": 477,
"end": 3793
} | class ____(CODATA2022, EMConstant):
_registry = CODATA2022._registry
h = CODATA2022("h", "Planck constant", 6.62607015e-34, "J s", 0.0, system="si")
hbar = CODATA2022(
"hbar", "Reduced Planck constant", h.value / (2 * math.pi), "J s", 0.0, system="si"
)
k_B = CODATA2022("k_B", "Boltzmann constant", 1.380649e-23, "J / (K)", 0.0, system="si")
c = CODATA2022(
"c", "Speed of light in vacuum", 299792458.0, "m / (s)", 0.0, system="si"
)
G = CODATA2022(
"G", "Gravitational constant", 6.67430e-11, "m3 / (kg s2)", 0.00015e-11, system="si"
)
g0 = CODATA2022(
"g0", "Standard acceleration of gravity", 9.80665, "m / s2", 0.0, system="si"
)
m_p = CODATA2022(
"m_p", "Proton mass", 1.67262192595e-27, "kg", 0.00000000052e-27, system="si"
)
m_n = CODATA2022(
"m_n", "Neutron mass", 1.67492750056e-27, "kg", 0.00000000085e-27, system="si"
)
m_e = CODATA2022(
"m_e", "Electron mass", 9.1093837139e-31, "kg", 0.0000000028e-31, system="si"
)
u = CODATA2022(
"u", "Atomic mass", 1.66053906892e-27, "kg", 0.00000000052e-27, system="si"
)
sigma_sb = CODATA2022(
"sigma_sb",
"Stefan-Boltzmann constant",
2 * math.pi**5 * k_B.value**4 / (15 * h.value**3 * c.value**2),
"W / (K4 m2)",
0.0,
system="si",
)
e = EMCODATA2022("e", "Electron charge", 1.602176634e-19, "C", 0.0, system="si")
eps0 = EMCODATA2022(
"eps0",
"Vacuum electric permittivity",
8.8541878188e-12,
"F/m",
0.0000000014e-12,
system="si",
)
N_A = CODATA2022(
"N_A", "Avogadro's number", 6.02214076e23, "1 / (mol)", 0.0, system="si"
)
R = CODATA2022(
"R", "Gas constant", k_B.value * N_A.value, "J / (K mol)", 0.0, system="si"
)
Ryd = CODATA2022(
"Ryd", "Rydberg constant", 10973731.568157, "1 / (m)", 0.000012, system="si"
)
a0 = CODATA2022(
"a0", "Bohr radius", 5.29177210544e-11, "m", 0.00000000082e-11, system="si"
)
muB = CODATA2022(
"muB", "Bohr magneton", 9.2740100657e-24, "J/T", 0.0000000029e-24, system="si"
)
alpha = CODATA2022(
"alpha",
"Fine-structure constant",
7.2973525643e-3,
"",
0.0000000011e-3,
system="si",
)
atm = CODATA2022("atm", "Standard atmosphere", 101325, "Pa", 0.0, system="si")
mu0 = CODATA2022(
"mu0",
"Vacuum magnetic permeability",
1.25663706127e-6,
"N/A2",
0.00000000020e-6,
system="si",
)
sigma_T = CODATA2022(
"sigma_T",
"Thomson scattering cross-section",
6.6524587051e-29,
"m2",
0.0000000062e-29,
system="si",
)
# Formula taken from NIST wall chart.
# The numerical factor is from a numerical solution to the equation for the
# maximum. See https://en.wikipedia.org/wiki/Wien%27s_displacement_law
b_wien = CODATA2022(
"b_wien",
"Wien wavelength displacement law constant",
h.value * c.value / (k_B.value * 4.965114231744276),
"m K",
0.0,
system="si",
)
# CGS constants.
# Only constants that cannot be converted directly from S.I. are defined here.
# Because both e and c are exact, these are also exact by definition.
e_esu = EMCODATA2022(
e.abbrev, e.name, e.value * c.value * 10.0, "statC", 0.0, system="esu"
)
e_emu = EMCODATA2022(e.abbrev, e.name, e.value / 10, "abC", 0.0, system="emu")
e_gauss = EMCODATA2022(
e.abbrev, e.name, e.value * c.value * 10.0, "Fr", 0.0, system="gauss"
)
| EMCODATA2022 |
python | django__django | django/contrib/admin/widgets.py | {
"start": 2778,
"end": 2870
} | class ____(BaseAdminTimeWidget):
template_name = "admin/widgets/time.html"
| AdminTimeWidget |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/simple_resource/package.py | {
"start": 217,
"end": 653
} | class ____(Package):
url = "http://example.com/source-1.0.tgz"
version("1.0", sha256="1111111111111111111111111111111111111111111111111111111111111111")
resource(
name="sample-resource",
url="https://example.com/resource.tgz",
checksum="2222222222222222222222222222222222222222222222222222222222222222",
when="@1.0",
placement="resource-dst",
expand="True",
)
| SimpleResource |
python | google__flatbuffers | tests/monster_test_generated.py | {
"start": 20767,
"end": 21972
} | class ____(object):
# StatT
def __init__(
self,
id = None,
val = 0,
count = 0,
):
self.id = id # type: Optional[str]
self.val = val # type: int
self.count = count # type: int
@classmethod
def InitFromBuf(cls, buf, pos):
stat = Stat()
stat.Init(buf, pos)
return cls.InitFromObj(stat)
@classmethod
def InitFromPackedBuf(cls, buf, pos=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
return cls.InitFromBuf(buf, pos+n)
@classmethod
def InitFromObj(cls, stat):
x = StatT()
x._UnPack(stat)
return x
# StatT
def _UnPack(self, stat):
if stat is None:
return
self.id = stat.Id()
self.val = stat.Val()
self.count = stat.Count()
# StatT
def Pack(self, builder):
if self.id is not None:
id = builder.CreateString(self.id)
StatStart(builder)
if self.id is not None:
StatAddId(builder, id)
StatAddVal(builder, self.val)
StatAddCount(builder, self.count)
stat = StatEnd(builder)
return stat
| StatT |
python | langchain-ai__langchain | libs/core/langchain_core/prompt_values.py | {
"start": 546,
"end": 1360
} | class ____(Serializable, ABC):
"""Base abstract class for inputs to any language model.
`PromptValues` can be converted to both LLM (pure text-generation) inputs and
chat model inputs.
"""
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return `True` as this class is serializable."""
return True
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the LangChain object.
Returns:
`["langchain", "schema", "prompt"]`
"""
return ["langchain", "schema", "prompt"]
@abstractmethod
def to_string(self) -> str:
"""Return prompt value as string."""
@abstractmethod
def to_messages(self) -> list[BaseMessage]:
"""Return prompt as a list of messages."""
| PromptValue |
python | MorvanZhou__Reinforcement-learning-with-tensorflow | contents/8_Actor_Critic_Advantage/AC_CartPole.py | {
"start": 2825,
"end": 5860
} | class ____(object):
def __init__(self, sess, n_features, lr=0.01):
self.sess = sess
self.s = tf.placeholder(tf.float32, [1, n_features], "state")
self.v_ = tf.placeholder(tf.float32, [1, 1], "v_next")
self.r = tf.placeholder(tf.float32, None, 'r')
with tf.variable_scope('Critic'):
l1 = tf.layers.dense(
inputs=self.s,
units=20, # number of hidden units
activation=tf.nn.relu, # None
# have to be linear to make sure the convergence of actor.
# But linear approximator seems hardly learns the correct Q.
kernel_initializer=tf.random_normal_initializer(0., .1), # weights
bias_initializer=tf.constant_initializer(0.1), # biases
name='l1'
)
self.v = tf.layers.dense(
inputs=l1,
units=1, # output units
activation=None,
kernel_initializer=tf.random_normal_initializer(0., .1), # weights
bias_initializer=tf.constant_initializer(0.1), # biases
name='V'
)
with tf.variable_scope('squared_TD_error'):
self.td_error = self.r + GAMMA * self.v_ - self.v
self.loss = tf.square(self.td_error) # TD_error = (r+gamma*V_next) - V_eval
with tf.variable_scope('train'):
self.train_op = tf.train.AdamOptimizer(lr).minimize(self.loss)
def learn(self, s, r, s_):
s, s_ = s[np.newaxis, :], s_[np.newaxis, :]
v_ = self.sess.run(self.v, {self.s: s_})
td_error, _ = self.sess.run([self.td_error, self.train_op],
{self.s: s, self.v_: v_, self.r: r})
return td_error
sess = tf.Session()
actor = Actor(sess, n_features=N_F, n_actions=N_A, lr=LR_A)
critic = Critic(sess, n_features=N_F, lr=LR_C) # we need a good teacher, so the teacher should learn faster than the actor
sess.run(tf.global_variables_initializer())
if OUTPUT_GRAPH:
tf.summary.FileWriter("logs/", sess.graph)
for i_episode in range(MAX_EPISODE):
s = env.reset()
t = 0
track_r = []
while True:
if RENDER: env.render()
a = actor.choose_action(s)
s_, r, done, info = env.step(a)
if done: r = -20
track_r.append(r)
td_error = critic.learn(s, r, s_) # gradient = grad[r + gamma * V(s_) - V(s)]
actor.learn(s, a, td_error) # true_gradient = grad[logPi(s,a) * td_error]
s = s_
t += 1
if done or t >= MAX_EP_STEPS:
ep_rs_sum = sum(track_r)
if 'running_reward' not in globals():
running_reward = ep_rs_sum
else:
running_reward = running_reward * 0.95 + ep_rs_sum * 0.05
if running_reward > DISPLAY_REWARD_THRESHOLD: RENDER = True # rendering
print("episode:", i_episode, " reward:", int(running_reward))
break
| Critic |
python | getsentry__sentry | tests/sentry/api/test_base.py | {
"start": 1702,
"end": 1807
} | class ____(DummyEndpoint):
permission_classes = (SuperuserPermission,)
| DummySuperuserPermissionEndpoint |
python | faif__python-patterns | patterns/behavioral/registry.py | {
"start": 480,
"end": 1202
} | class ____(metaclass=RegistryHolder):
"""
Any class that will inherits from BaseRegisteredClass will be included
inside the dict RegistryHolder.REGISTRY, the key being the name of the
class and the associated value, the class itself.
"""
def main():
"""
Before subclassing
>>> sorted(RegistryHolder.REGISTRY)
['BaseRegisteredClass']
>>> class ClassRegistree(BaseRegisteredClass):
... def __init__(self, *args, **kwargs):
... pass
After subclassing
>>> sorted(RegistryHolder.REGISTRY)
['BaseRegisteredClass', 'ClassRegistree']
"""
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.ELLIPSIS)
| BaseRegisteredClass |
python | redis__redis-py | tests/test_max_connections_error.py | {
"start": 104,
"end": 3625
} | class ____(ConnectionInterface):
"""A dummy connection class for testing that doesn't actually connect to Redis"""
def __init__(self, *args, **kwargs):
self.connected = False
def connect(self):
self.connected = True
def disconnect(self):
self.connected = False
def register_connect_callback(self, callback):
pass
def deregister_connect_callback(self, callback):
pass
def set_parser(self, parser_class):
pass
def get_protocol(self):
return 2
def on_connect(self):
pass
def check_health(self):
return True
def send_packed_command(self, command, check_health=True):
pass
def send_command(self, *args, **kwargs):
pass
def can_read(self, timeout=0):
return False
def read_response(self, disable_decoding=False, **kwargs):
return "PONG"
@pytest.mark.onlynoncluster
def test_max_connections_error_inheritance():
"""Test that MaxConnectionsError is a subclass of ConnectionError"""
assert issubclass(redis.MaxConnectionsError, redis.ConnectionError)
@pytest.mark.onlynoncluster
def test_connection_pool_raises_max_connections_error():
"""Test that ConnectionPool raises MaxConnectionsError and not ConnectionError"""
# Use a dummy connection class that doesn't try to connect to a real Redis server
pool = redis.ConnectionPool(max_connections=1, connection_class=DummyConnection)
pool.get_connection()
with pytest.raises(redis.MaxConnectionsError):
pool.get_connection()
@pytest.mark.skipif(
not hasattr(redis, "RedisCluster"), reason="RedisCluster not available"
)
def test_cluster_handles_max_connections_error():
"""
Test that RedisCluster doesn't reinitialize when MaxConnectionsError is raised
"""
# Create a more complete mock cluster
cluster = mock.MagicMock(spec=redis.RedisCluster)
cluster.cluster_response_callbacks = {}
cluster.RedisClusterRequestTTL = 3 # Set the TTL to avoid infinite loops
cluster.nodes_manager = mock.MagicMock()
node = mock.MagicMock()
# Mock get_redis_connection to return a mock Redis client
redis_conn = mock.MagicMock()
cluster.get_redis_connection.return_value = redis_conn
# Setup get_connection to be called and return a connection that will raise
connection = mock.MagicMock()
# Patch the get_connection function in the cluster module
with mock.patch("redis.cluster.get_connection", return_value=connection):
# Test MaxConnectionsError
connection.send_command.side_effect = redis.MaxConnectionsError(
"Too many connections"
)
# Call the method and check that the exception is raised
with pytest.raises(redis.MaxConnectionsError):
redis.RedisCluster._execute_command(cluster, node, "GET", "key")
# Verify nodes_manager.initialize was NOT called
cluster.nodes_manager.initialize.assert_not_called()
# Reset the mock for the next test
cluster.nodes_manager.initialize.reset_mock()
# Now test with regular ConnectionError to ensure it DOES reinitialize
connection.send_command.side_effect = redis.ConnectionError("Connection lost")
with pytest.raises(redis.ConnectionError):
redis.RedisCluster._execute_command(cluster, node, "GET", "key")
# Verify nodes_manager.initialize WAS called
cluster.nodes_manager.initialize.assert_called_once()
| DummyConnection |
python | pytest-dev__pytest-xdist | testing/acceptance_test.py | {
"start": 125,
"end": 12284
} | class ____:
def test_n1_pass(self, pytester: pytest.Pytester) -> None:
p1 = pytester.makepyfile(
"""
def test_ok():
pass
"""
)
result = pytester.runpytest(p1, "-n1")
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
def test_n1_fail(self, pytester: pytest.Pytester) -> None:
p1 = pytester.makepyfile(
"""
def test_fail():
assert 0
"""
)
result = pytester.runpytest(p1, "-n1")
assert result.ret == 1
result.stdout.fnmatch_lines(["*1 failed*"])
def test_n1_import_error(self, pytester: pytest.Pytester) -> None:
p1 = pytester.makepyfile(
"""
import __import_of_missing_module
def test_import():
pass
"""
)
result = pytester.runpytest(p1, "-n1")
assert result.ret == 1
result.stdout.fnmatch_lines(
["E *Error: No module named *__import_of_missing_module*"]
)
def test_n2_import_error(self, pytester: pytest.Pytester) -> None:
"""Check that we don't report the same import error multiple times
in distributed mode."""
p1 = pytester.makepyfile(
"""
import __import_of_missing_module
def test_import():
pass
"""
)
result1 = pytester.runpytest(p1, "-n2")
result2 = pytester.runpytest(p1, "-n1")
assert len(result1.stdout.lines) == len(result2.stdout.lines)
def test_n1_skip(self, pytester: pytest.Pytester) -> None:
p1 = pytester.makepyfile(
"""
def test_skip():
import pytest
pytest.skip("myreason")
"""
)
result = pytester.runpytest(p1, "-n1")
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 skipped*"])
def test_manytests_to_one_import_error(self, pytester: pytest.Pytester) -> None:
p1 = pytester.makepyfile(
"""
import __import_of_missing_module
def test_import():
pass
"""
)
result = pytester.runpytest(p1, "--tx=popen", "--tx=popen")
assert result.ret in (1, 2)
result.stdout.fnmatch_lines(
["E *Error: No module named *__import_of_missing_module*"]
)
def test_manytests_to_one_popen(self, pytester: pytest.Pytester) -> None:
p1 = pytester.makepyfile(
"""
import pytest
def test_fail0():
assert 0
def test_fail1():
raise ValueError()
def test_ok():
pass
def test_skip():
pytest.skip("hello")
"""
)
result = pytester.runpytest(p1, "-v", "-d", "--tx=popen", "--tx=popen")
result.stdout.fnmatch_lines(
[
"created: 2/2 workers",
"*2 failed, 1 passed, 1 skipped*",
]
)
assert result.ret == 1
def test_exitfirst_waits_for_workers_to_finish(
self, pytester: pytest.Pytester
) -> None:
"""The DSession waits for workers before exiting early on failure.
When -x/--exitfirst is set, the DSession wait for all workers to finish
before raising an Interrupt exception. This prevents reports from the
faiing test and other tests from being discarded.
"""
p1 = pytester.makepyfile(
"""
import time
def test_fail1():
time.sleep(0.1)
assert 0
def test_fail2():
time.sleep(0.2)
def test_fail3():
time.sleep(0.3)
assert 0
def test_fail4():
time.sleep(0.3)
def test_fail5():
time.sleep(0.3)
def test_fail6():
time.sleep(0.3)
"""
)
# Two workers are used
result = pytester.runpytest(p1, "-x", "-rA", "-v", "-n2")
assert result.ret == 2
# DSession should stop when the first failure is reached. Two failures
# may actually occur, due to timing.
outcomes = result.parseoutcomes()
assert "failed" in outcomes, "Expected at least one failure"
assert 1 <= outcomes["failed"] <= 2, "Expected no more than 2 failures"
def test_basetemp_in_subprocesses(self, pytester: pytest.Pytester) -> None:
p1 = pytester.makepyfile(
"""
def test_send(tmp_path):
from pathlib import Path
assert tmp_path.relative_to(Path(%r)), tmp_path
"""
% str(pytester.path)
)
result = pytester.runpytest_subprocess(p1, "-n1")
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
def test_dist_ini_specified(self, pytester: pytest.Pytester) -> None:
p1 = pytester.makepyfile(
"""
import pytest
def test_fail0():
assert 0
def test_fail1():
raise ValueError()
def test_ok():
pass
def test_skip():
pytest.skip("hello")
"""
)
pytester.makeini(
"""
[pytest]
addopts = --tx=3*popen
"""
)
result = pytester.runpytest(p1, "-d", "-v")
result.stdout.fnmatch_lines(
[
"created: 3/3 workers",
"*2 failed, 1 passed, 1 skipped*",
]
)
assert result.ret == 1
def test_dist_tests_with_crash(self, pytester: pytest.Pytester) -> None:
if not hasattr(os, "kill"):
pytest.skip("no os.kill")
p1 = pytester.makepyfile(
"""
import pytest
def test_fail0():
assert 0
def test_fail1():
raise ValueError()
def test_ok():
pass
def test_skip():
pytest.skip("hello")
def test_crash():
import time
import os
time.sleep(0.5)
os.kill(os.getpid(), 15)
"""
)
result = pytester.runpytest(p1, "-v", "-d", "-n1")
result.stdout.fnmatch_lines(
[
"*Python*",
"*PASS**test_ok*",
"*node*down*",
"*3 failed, 1 passed, 1 skipped*",
]
)
assert result.ret == 1
def test_distribution_rsyncdirs_example(
self, pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch
) -> None:
# use a custom plugin that has a custom command-line option to ensure
# this is propagated to workers (see #491)
pytester.makepyfile(
**{
"myplugin/src/foobarplugin.py": """
from __future__ import print_function
import os
import sys
import pytest
def pytest_addoption(parser):
parser.addoption("--foobar", action="store", dest="foobar_opt")
@pytest.hookimpl(tryfirst=True)
def pytest_load_initial_conftests(early_config):
opt = early_config.known_args_namespace.foobar_opt
print("--foobar=%s active! [%s]" % (opt, os.getpid()), file=sys.stderr)
"""
}
)
assert (pytester.path / "myplugin/src/foobarplugin.py").is_file()
monkeypatch.setenv(
"PYTHONPATH", str(pytester.path / "myplugin/src"), prepend=os.pathsep
)
source = pytester.mkdir("source")
dest = pytester.mkdir("dest")
subdir = source / "example_pkg"
subdir.mkdir()
subdir.joinpath("__init__.py").touch()
p = subdir / "test_one.py"
p.write_text("def test_5():\n assert not __file__.startswith(%r)" % str(p))
result = pytester.runpytest_subprocess(
"-v",
"-d",
"-s",
"-pfoobarplugin",
"--foobar=123",
"--dist=load",
f"--rsyncdir={subdir}",
f"--tx=popen//chdir={dest}",
p,
)
assert result.ret == 0
result.stdout.fnmatch_lines(
[
"*1 passed*",
]
)
result.stderr.fnmatch_lines(["--foobar=123 active! *"])
assert dest.joinpath(subdir.name).is_dir()
def test_data_exchange(self, pytester: pytest.Pytester) -> None:
pytester.makeconftest(
"""
# This hook only called on the controlling process.
def pytest_configure_node(node):
node.workerinput['a'] = 42
node.workerinput['b'] = 7
def pytest_configure(config):
# this attribute is only set on workers
if hasattr(config, 'workerinput'):
a = config.workerinput['a']
b = config.workerinput['b']
r = a + b
config.workeroutput['r'] = r
# This hook only called on the controlling process.
def pytest_testnodedown(node, error):
node.config.calc_result = node.workeroutput['r']
def pytest_terminal_summary(terminalreporter):
if not hasattr(terminalreporter.config, 'workerinput'):
calc_result = terminalreporter.config.calc_result
terminalreporter._tw.sep('-',
'calculated result is %s' % calc_result)
"""
)
p1 = pytester.makepyfile("def test_func(): pass")
result = pytester.runpytest("-v", p1, "-d", "--tx=popen")
result.stdout.fnmatch_lines(
[
"created: 1/1 worker",
"*calculated result is 49*",
"*1 passed*",
]
)
assert result.ret == 0
def test_keyboardinterrupt_hooks_issue79(self, pytester: pytest.Pytester) -> None:
pytester.makepyfile(
__init__="",
test_one="""
def test_hello():
raise KeyboardInterrupt()
""",
)
pytester.makeconftest(
"""
def pytest_sessionfinish(session):
# on the worker
if hasattr(session.config, 'workeroutput'):
session.config.workeroutput['s2'] = 42
# on the controller
def pytest_testnodedown(node, error):
assert node.workeroutput['s2'] == 42
print ("s2call-finished")
"""
)
args = ["-n1", "--debug"]
result = pytester.runpytest_subprocess(*args)
s = result.stdout.str()
assert result.ret == 2
assert "s2call" in s
assert "Interrupted" in s
def test_keyboard_interrupt_dist(self, pytester: pytest.Pytester) -> None:
# xxx could be refined to check for return code
pytester.makepyfile(
"""
def test_sleep():
import time
time.sleep(10)
"""
)
child = pytester.spawn_pytest("-n1 -v", expect_timeout=30.0)
child.expect(".*test_sleep.*")
child.kill(2) # keyboard interrupt
child.expect(".*KeyboardInterrupt.*")
# child.expect(".*seconds.*")
child.close()
# assert ret == 2
def test_dist_with_collectonly(self, pytester: pytest.Pytester) -> None:
p1 = pytester.makepyfile(
"""
def test_ok():
pass
"""
)
result = pytester.runpytest(p1, "-n1", "--collect-only")
assert result.ret == 0
result.stdout.fnmatch_lines(["*collected 1 item*"])
| TestDistribution |
python | pypa__pip | src/pip/_internal/resolution/resolvelib/reporter.py | {
"start": 2838,
"end": 3909
} | class ____(BaseReporter[Requirement, Candidate, str]):
"""A reporter that does an info log for every event it sees."""
def starting(self) -> None:
logger.info("Reporter.starting()")
def starting_round(self, index: int) -> None:
logger.info("Reporter.starting_round(%r)", index)
def ending_round(self, index: int, state: Any) -> None:
logger.info("Reporter.ending_round(%r, state)", index)
logger.debug("Reporter.ending_round(%r, %r)", index, state)
def ending(self, state: Any) -> None:
logger.info("Reporter.ending(%r)", state)
def adding_requirement(
self, requirement: Requirement, parent: Candidate | None
) -> None:
logger.info("Reporter.adding_requirement(%r, %r)", requirement, parent)
def rejecting_candidate(self, criterion: Any, candidate: Candidate) -> None:
logger.info("Reporter.rejecting_candidate(%r, %r)", criterion, candidate)
def pinning(self, candidate: Candidate) -> None:
logger.info("Reporter.pinning(%r)", candidate)
| PipDebuggingReporter |
python | kamyu104__LeetCode-Solutions | Python/the-score-of-students-solving-math-expression.py | {
"start": 982,
"end": 2644
} | class ____(object):
def scoreOfStudents(self, s, answers):
"""
:type s: str
:type answers: List[int]
:rtype: int
"""
MAX_ANS = 1000
def evaluate(s):
def compute(operands, operators):
right, left = operands.pop(), operands.pop()
operands.append(ops[operators.pop()](left, right))
ops = {'+':operator.add, '*':operator.mul}
precedence = {'+':0, '*':1}
operands, operators, operand = [], [], 0
for c in s:
if c.isdigit():
operands.append(int(c))
else:
while operators and precedence[operators[-1]] >= precedence[c]:
compute(operands, operators)
operators.append(c)
while operators:
compute(operands, operators)
return operands[-1]
n = (len(s)+1)//2
dp = [[set() for _ in xrange(n)] for _ in xrange(n)]
for i in xrange(n):
dp[i][i].add(int(s[i*2]))
for l in xrange(1, n):
for left in xrange(n-l):
right = left+l
for k in xrange(left, right):
if s[2*k+1] == '+':
dp[left][right].update((x+y for x in dp[left][k] for y in dp[k+1][right] if x+y <= MAX_ANS))
else:
dp[left][right].update((x*y for x in dp[left][k] for y in dp[k+1][right] if x*y <= MAX_ANS))
target = evaluate(s)
return sum(5 if ans == target else 2 if ans in dp[0][-1] else 0 for ans in answers)
| Solution2 |
python | huggingface__transformers | tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py | {
"start": 24073,
"end": 37343
} | class ____(unittest.TestCase):
def setUp(self):
self.processor = AutoProcessor.from_pretrained("Qwen/Qwen2.5-Omni-7B")
self.audio_url = (
"https://huggingface.co/datasets/raushan-testing-hf/audio-test/resolve/main/glass-breaking-151256.mp3"
)
self.audio_url_additional = (
"https://huggingface.co/datasets/raushan-testing-hf/audio-test/resolve/main/f2641_0_throatclearing.wav"
)
self.image_url = "https://qianwen-res.oss-accelerate-overseas.aliyuncs.com/Qwen2-VL/demo_small.jpg"
self.messages = [
{
"role": "user",
"content": [
{"type": "audio", "audio_url": self.audio_url},
{"type": "image", "image_url": self.image_url},
{"type": "text", "text": "What's that sound and what kind of dog is this?"},
],
}
]
self.raw_audio, _ = librosa.load(
BytesIO(urlopen(self.audio_url).read()), sr=self.processor.feature_extractor.sampling_rate
)
self.raw_audio_additional, _ = librosa.load(
BytesIO(urlopen(self.audio_url_additional).read()), sr=self.processor.feature_extractor.sampling_rate
)
self.raw_image = Image.open(requests.get(self.image_url, stream=True).raw)
def tearDown(self):
cleanup(torch_device, gc_collect=True)
@slow
def test_small_model_integration_test(self):
model = Qwen2_5OmniForConditionalGeneration.from_pretrained(
"Qwen/Qwen2.5-Omni-7B", dtype=torch.bfloat16, device_map="auto"
)
text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True)
inputs = self.processor(
text=text, audio=[self.raw_audio], images=[self.raw_image], return_tensors="pt", padding=True
).to(torch.bfloat16)
expected_input_ids = torch.tensor(
[
151644,
8948,
198,
2610,
525,
264,
10950,
17847,
13,
151645,
198,
151644,
872,
198,
151647,
151646,
151646,
]
)
assert torch.allclose(expected_input_ids, inputs.input_ids[0][:17], atol=3e-3)
expected_pixel_slice = torch.tensor(
[
[0.8792, 0.8792, 0.9084],
[1.1858, 1.1858, 1.2296],
[1.2004, 1.2004, 1.2150],
[1.4340, 1.4340, 1.4194],
[1.3902, 1.4048, 1.4194],
[1.5216, 1.5362, 1.5362],
],
dtype=torch.bfloat16,
device="cpu",
)
assert torch.allclose(expected_pixel_slice, inputs.pixel_values[:6, :3], atol=3e-3)
# verify generation
inputs = inputs.to(torch_device)
output = model.generate(
**inputs, thinker_temperature=0, thinker_do_sample=False, return_audio=False, thinker_max_new_tokens=20
)
EXPECTED_DECODED_TEXT = Expectations({
("xpu", None): "system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is glass shattering, and the dog is a Labrador Retriever.",
("cuda", (8, 6)): "system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is glass shattering, and the dog is a Labrador Retriever.",
("rocm", (9, 4)): "system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is glass shattering, and the dog is a Labrador Retriever.",
}).get_expectation() # fmt: skip
decoded_text = self.processor.decode(output[0], skip_special_tokens=True)
self.assertEqual(decoded_text, EXPECTED_DECODED_TEXT)
@slow
def test_small_model_integration_test_batch(self):
model = Qwen2_5OmniForConditionalGeneration.from_pretrained(
"Qwen/Qwen2.5-Omni-7B", dtype=torch.bfloat16, device_map="auto"
)
text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True)
inputs = self.processor(
text=[text] * 2,
audio=[self.raw_audio, self.raw_audio],
images=[self.raw_image, self.raw_image],
return_tensors="pt",
padding=True,
).to(torch_device, dtype=torch.bfloat16)
output = model.generate(
**inputs, thinker_temperature=0, thinker_do_sample=False, return_audio=False, thinker_max_new_tokens=20
)
EXPECTED_DECODED_TEXTS = Expectations(
{
("xpu", 3): [
"system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is glass shattering, and the dog is a Labrador Retriever.",
"system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is glass shattering, and the dog is a Labrador Retriever.",
],
("cuda", 7) : [
"system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is of glass shattering, and the dog in the picture is a Labrador Retriever",
"system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is of glass shattering, and the dog in the picture is a Labrador Retriever",
],
("cuda", 8): [
"system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is glass shattering, and the dog is a Labrador Retriever.",
"system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is glass shattering, and the dog is a Labrador Retriever.",
],
("rocm", (9, 4)): [
"system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is glass shattering, and the dog is a Labrador Retriever.",
"system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is glass shattering, and the dog is a Labrador Retriever.",
],
}
).get_expectation() # fmt: skip
decoded_texts = self.processor.batch_decode(output, skip_special_tokens=True)
self.assertEqual(decoded_texts, EXPECTED_DECODED_TEXTS)
@slow
def test_small_model_integration_test_multiturn(self):
model = Qwen2_5OmniForConditionalGeneration.from_pretrained(
"Qwen/Qwen2.5-Omni-7B", dtype=torch.bfloat16, device_map="auto"
)
messages = [
self.messages[0],
{
"role": "assistant",
"content": [
{
"type": "text",
"text": "The sound is glass shattering, and the dog appears to be a Labrador Retriever.",
}
],
},
{
"role": "user",
"content": [
{"type": "audio", "audio_url": self.audio_url_additional},
{"type": "text", "text": "How about this one?"},
],
},
]
text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = self.processor(
text=text,
audio=[self.raw_audio, self.raw_audio_additional],
images=[self.raw_image],
return_tensors="pt",
padding=True,
).to(torch_device, dtype=torch.bfloat16)
output = model.generate(
**inputs, thinker_temperature=0, thinker_do_sample=False, return_audio=False, thinker_max_new_tokens=20
)
EXPECTED_DECODED_TEXT = "system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is glass shattering, and the dog appears to be a Labrador Retriever.\nuser\nHow about this one?\nassistant\nThe sound is a cough."
self.assertEqual(
self.processor.decode(output[0], skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
@slow
def test_small_model_integration_test_w_audio(self):
model = Qwen2_5OmniForConditionalGeneration.from_pretrained(
"Qwen/Qwen2.5-Omni-7B", dtype=torch.bfloat16, device_map="auto"
)
audio_url = "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/guess_age_gender.wav"
messages = [
{
"role": "system",
"content": [
{
"type": "text",
"text": "You are Qwen, a virtual human developed by the Qwen Team, Alibaba Group, capable of perceiving auditory and visual inputs, as well as generating text and speech.",
}
],
},
{
"role": "user",
"content": [{"type": "audio", "audio": audio_url}],
},
]
audio, _ = librosa.load(BytesIO(urlopen(audio_url).read()), sr=self.processor.feature_extractor.sampling_rate)
text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = self.processor(text=text, audio=[audio], return_tensors="pt", padding=True).to(
torch_device, dtype=torch.bfloat16
)
output = model.generate(
**inputs,
thinker_temperature=0,
thinker_do_sample=False,
thinker_max_new_tokens=20,
talker_max_new_tokens=10,
)
EXPECTED_DECODED_TEXTS = Expectations(
{
("xpu", None): "system\nYou are Qwen, a virtual human developed by the Qwen Team, Alibaba Group, capable of perceiving auditory and visual inputs, as well as generating text and speech.\nuser\n\nassistant\nWell, I can't really guess your age and gender just from your voice. There are so many",
("cuda", 7): "system\nYou are Qwen, a virtual human developed by the Qwen Team, Alibaba Group, capable of perceiving auditory and visual inputs, as well as generating text and speech.\nuser\n\nassistant\nWell, I can try. But it's not always that accurate. I might be able to make",
("cuda", 8): "system\nYou are Qwen, a virtual human developed by the Qwen Team, Alibaba Group, capable of perceiving auditory and visual inputs, as well as generating text and speech.\nuser\n\nassistant\nWell, I can't really guess your age and gender just from your voice. There are so many",
}
) # fmt: skip
EXPECTED_DECODED_TEXT = EXPECTED_DECODED_TEXTS.get_expectation()
decoded_text = self.processor.decode(output[0][0], skip_special_tokens=True)
self.assertEqual(decoded_text, EXPECTED_DECODED_TEXT)
self.assertFalse(torch.isnan(output[1]).any().item())
@slow
@require_flash_attn
@require_torch_gpu
@pytest.mark.flash_attn_test
def test_small_model_integration_test_batch_flashatt2(self):
model = Qwen2_5OmniForConditionalGeneration.from_pretrained(
"Qwen/Qwen2.5-Omni-7B",
dtype=torch.bfloat16,
attn_implementation="flash_attention_2",
device_map="auto",
)
text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True)
inputs = self.processor(
text=[text, text],
audio=[self.raw_audio, self.raw_audio],
images=[self.raw_image, self.raw_image],
return_tensors="pt",
padding=True,
).to(torch_device)
output = model.generate(**inputs, thinker_temperature=0, thinker_do_sample=False, return_audio=False)
EXPECTED_DECODED_TEXT = Expectations({
("cuda", None): "system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is glass shattering, and the dog appears to be a Labrador Retriever.",
("cuda", (8, 6)): "system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is glass shattering, and the dog is a Labrador Retriever.",
("rocm", (9, 4)): "system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is glass shattering, and the dog is a Labrador Retriever.",
}).get_expectation() # fmt: skip
decoded_texts = self.processor.batch_decode(output, skip_special_tokens=True)
self.assertEqual(decoded_texts[0], EXPECTED_DECODED_TEXT)
self.assertEqual(decoded_texts[1], EXPECTED_DECODED_TEXT)
| Qwen2_5OmniModelIntegrationTest |
python | spack__spack | lib/spack/spack/vendor/jinja2/loaders.py | {
"start": 5545,
"end": 8375
} | class ____(BaseLoader):
"""Load templates from a directory in the file system.
The path can be relative or absolute. Relative paths are relative to
the current working directory.
.. code-block:: python
loader = FileSystemLoader("templates")
A list of paths can be given. The directories will be searched in
order, stopping at the first matching template.
.. code-block:: python
loader = FileSystemLoader(["/override/templates", "/default/templates"])
:param searchpath: A path, or list of paths, to the directory that
contains the templates.
:param encoding: Use this encoding to read the text from template
files.
:param followlinks: Follow symbolic links in the path.
.. versionchanged:: 2.8
Added the ``followlinks`` parameter.
"""
def __init__(
self,
searchpath: t.Union[str, os.PathLike, t.Sequence[t.Union[str, os.PathLike]]],
encoding: str = "utf-8",
followlinks: bool = False,
) -> None:
if not isinstance(searchpath, abc.Iterable) or isinstance(searchpath, str):
searchpath = [searchpath]
self.searchpath = [os.fspath(p) for p in searchpath]
self.encoding = encoding
self.followlinks = followlinks
def get_source(
self, environment: "Environment", template: str
) -> t.Tuple[str, str, t.Callable[[], bool]]:
pieces = split_template_path(template)
for searchpath in self.searchpath:
filename = os.path.join(searchpath, *pieces)
f = open_if_exists(filename)
if f is None:
continue
try:
contents = f.read().decode(self.encoding)
finally:
f.close()
mtime = os.path.getmtime(filename)
def uptodate() -> bool:
try:
return os.path.getmtime(filename) == mtime
except OSError:
return False
return contents, filename, uptodate
raise TemplateNotFound(template)
def list_templates(self) -> t.List[str]:
found = set()
for searchpath in self.searchpath:
walk_dir = os.walk(searchpath, followlinks=self.followlinks)
for dirpath, _, filenames in walk_dir:
for filename in filenames:
template = (
os.path.join(dirpath, filename)[len(searchpath) :]
.strip(os.path.sep)
.replace(os.path.sep, "/")
)
if template[:2] == "./":
template = template[2:]
if template not in found:
found.add(template)
return sorted(found)
| FileSystemLoader |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeNarrowingLiteral2.py | {
"start": 924,
"end": 1728
} | class ____(Enum):
ZERO = 0
ONE = 1
def func5(x: Union[MyEnum, str]):
if x is MyEnum.ZERO:
reveal_type(x, expected_text="Literal[MyEnum.ZERO]")
elif x is MyEnum.ONE:
reveal_type(x, expected_text="Literal[MyEnum.ONE]")
else:
reveal_type(x, expected_text="str")
def func6(x: Any):
if x is MyEnum.ZERO:
reveal_type(x, expected_text="Literal[MyEnum.ZERO]")
else:
reveal_type(x, expected_text="Any")
def func7(x: Any):
if x == MyEnum.ZERO:
reveal_type(x, expected_text="Literal[MyEnum.ZERO]")
else:
reveal_type(x, expected_text="Any")
def func8(x: Literal[0, 1] | None):
if x is 1:
reveal_type(x, expected_text="Literal[1]")
else:
reveal_type(x, expected_text="Literal[0, 1] | None")
| MyEnum |
python | coleifer__peewee | tests/regressions.py | {
"start": 30077,
"end": 30987
} | class ____(ModelTestCase):
requires = [RU, Recipe]
def test_multi_fk_join_regression(self):
u1, u2 = [RU.create(username=u) for u in ('u1', 'u2')]
for (n, a, m) in (('r11', u1, u1), ('r12', u1, u2), ('r21', u2, u1)):
Recipe.create(name=n, created_by=a, changed_by=m)
Change = RU.alias()
query = (Recipe
.select(Recipe, RU, Change)
.join(RU, on=(RU.id == Recipe.created_by).alias('a'))
.switch(Recipe)
.join(Change, on=(Change.id == Recipe.changed_by).alias('b'))
.order_by(Recipe.name))
with self.assertQueryCount(1):
data = [(r.name, r.a.username, r.b.username) for r in query]
self.assertEqual(data, [
('r11', 'u1', 'u1'),
('r12', 'u1', 'u2'),
('r21', 'u2', 'u1')])
| TestMultiFKJoinRegression |
python | tensorflow__tensorflow | tensorflow/python/ops/linalg/linear_operator.py | {
"start": 2292,
"end": 3699
} | class ____(
composite_tensor_gradient.CompositeTensorGradient):
"""Composite tensor gradient for `LinearOperator`."""
def get_gradient_components(self, value):
return value._type_spec._to_components(value)
def replace_gradient_components(self, value, components):
flat_components = nest.flatten(components)
# If all component gradients are disconnected, return None.
if all(c is None for c in flat_components):
return None
# TODO(b/286565628): Update this once `CompositeTensorGradient` fully
# supports `tf.UnconnectedGradients.ZERO`.
# Replace individual disconnected component gradients with zeros.
value_components = value._type_spec._to_components(value)
flat_grad_components = []
for gc, vc in zip(flat_components, nest.flatten(value_components)):
if gc is None:
flat_grad_components.append(
nest.map_structure(
lambda x: array_ops.zeros_like(x, dtype=value.dtype),
vc,
expand_composites=True))
else:
flat_grad_components.append(gc)
grad_components = nest.pack_sequence_as(
value_components, flat_grad_components)
return value._type_spec._from_components(grad_components)
# pylint: enable=protected-access
# TODO(langmore) Use matrix_solve_ls for singular or non-square matrices.
@tf_export("linalg.LinearOperator")
| _LinearOperatorGradient |
python | fastai__fastai | fastai/metrics.py | {
"start": 18960,
"end": 19198
} | class ____(Dice):
"Implementation of the Jaccard coefficient that is lighter in RAM"
@property
def value(self): return self.inter/(self.union-self.inter) if self.union > 0 else None
# %% ../nbs/13b_metrics.ipynb 121
| JaccardCoeff |
python | getsentry__sentry | src/sentry/types/region.py | {
"start": 716,
"end": 874
} | class ____(Exception):
"""Indicate that a region was misconfigured or could not be initialized."""
@dataclass(frozen=True, eq=True)
| RegionConfigurationError |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-db2/llama_index/vector_stores/db2/base.py | {
"start": 967,
"end": 5591
} | class ____(Enum):
COSINE = 1
DOT_PRODUCT = 2
EUCLIDEAN_DISTANCE = 3
MANHATTAN_DISTANCE = 4
HAMMING_DISTANCE = 5
EUCLIDEAN_SQUARED = 6
# Define a type variable that can be any kind of function
T = TypeVar("T", bound=Callable[..., Any])
def _handle_exceptions(func: T) -> T:
@functools.wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
try:
return func(*args, **kwargs)
except RuntimeError as db_err:
# Handle a known type of error (e.g., DB-related) specifically
logger.exception("DB-related error occurred.")
raise RuntimeError(f"Failed due to a DB issue: {db_err}") from db_err
except ValueError as val_err:
# Handle another known type of error specifically
logger.exception("Validation error.")
raise ValueError(f"Validation failed: {val_err}") from val_err
except Exception as e:
# Generic handler for all other exceptions
logger.exception(f"An unexpected error occurred: {e}")
raise RuntimeError(f"Unexpected error: {e}") from e
return cast(T, wrapper)
def _escape_str(value: str) -> str:
BS = "\\"
must_escape = (BS, "'")
return (
"".join(f"{BS}{c}" if c in must_escape else c for c in value) if value else ""
)
column_config: Dict = {
"id": {"type": "VARCHAR(64) PRIMARY KEY", "extract_func": lambda x: x.node_id},
"doc_id": {"type": "VARCHAR(64)", "extract_func": lambda x: x.ref_doc_id},
"embedding": {
"type": "VECTOR(embedding_dim, FLOAT32)",
"extract_func": lambda x: f"{x.get_embedding()}",
},
"node_info": {
"type": "BLOB",
"extract_func": lambda x: json.dumps(x.node_info),
},
"metadata": {
"type": "BLOB",
"extract_func": lambda x: json.dumps(x.metadata),
},
"text": {
"type": "CLOB",
"extract_func": lambda x: _escape_str(
x.get_content(metadata_mode=MetadataMode.NONE) or ""
),
},
}
def _stringify_list(lst: List) -> str:
return "(" + ",".join(f"'{item}'" for item in lst) + ")"
def table_exists(connection: Connection, table_name: str) -> bool:
try:
cursor = connection.cursor()
cursor.execute(f"SELECT COUNT(*) FROM {table_name}")
except Exception as ex:
if "SQL0204N" in str(ex):
return False
raise
finally:
cursor.close()
return True
def _get_distance_function(distance_strategy: DistanceStrategy) -> str:
# Dictionary to map distance strategies to their corresponding function names
distance_strategy2function = {
DistanceStrategy.EUCLIDEAN_DISTANCE: "EUCLIDEAN",
DistanceStrategy.DOT_PRODUCT: "DOT",
DistanceStrategy.COSINE: "COSINE",
DistanceStrategy.MANHATTAN_DISTANCE: "MANHATTAN",
DistanceStrategy.HAMMING_DISTANCE: "HAMMING",
DistanceStrategy.EUCLIDEAN_SQUARED: "EUCLIDEAN_SQUARED",
}
# Attempt to return the corresponding distance function
if distance_strategy in distance_strategy2function:
return distance_strategy2function[distance_strategy]
# If it's an unsupported distance strategy, raise an error
raise ValueError(f"Unsupported distance strategy: {distance_strategy}")
@_handle_exceptions
def create_table(client: Connection, table_name: str, embedding_dim: int) -> None:
cols_dict = {
"id": "VARCHAR(64) PRIMARY KEY NOT NULL",
"doc_id": "VARCHAR(64)",
"embedding": f"vector({embedding_dim}, FLOAT32)",
"node_info": "BLOB",
"metadata": "BLOB",
"text": "CLOB",
}
if not table_exists(client, table_name):
cursor = client.cursor()
ddl_body = ", ".join(
f"{col_name} {col_type}" for col_name, col_type in cols_dict.items()
)
ddl = f"CREATE TABLE {table_name} ({ddl_body})"
try:
cursor.execute(ddl)
cursor.execute("COMMIT")
logger.info(f"Table {table_name} created successfully...")
finally:
cursor.close()
else:
logger.info(f"Table {table_name} already exists...")
@_handle_exceptions
def drop_table(connection: Connection, table_name: str) -> None:
if table_exists(connection, table_name):
cursor = connection.cursor()
try:
ddl = f"DROP TABLE {table_name}"
cursor.execute(ddl)
logger.info("Table dropped successfully...")
finally:
cursor.close()
else:
logger.info("Table not found...")
| DistanceStrategy |
python | getsentry__sentry | src/sentry/integrations/jira/actions/form.py | {
"start": 348,
"end": 950
} | class ____(IntegrationNotifyServiceForm):
provider = IntegrationProviderSlug.JIRA.value
def clean(self) -> dict[str, Any] | None:
cleaned_data = super().clean()
if cleaned_data is None:
return None
integration_id = cleaned_data.get("integration")
integration = integration_service.get_integration(
integration_id=integration_id, provider=self.provider
)
if not integration:
raise forms.ValidationError(_("Jira integration is a required field."), code="invalid")
return cleaned_data
| JiraNotifyServiceForm |
python | sqlalchemy__sqlalchemy | test/orm/inheritance/test_assorted_poly.py | {
"start": 21578,
"end": 25135
} | class ____(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
global people, engineers, managers, cars
people = Table(
"people",
metadata,
Column(
"person_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("name", String(50)),
Column("type", String(50)),
)
engineers = Table(
"engineers",
metadata,
Column(
"person_id",
Integer,
ForeignKey("people.person_id"),
primary_key=True,
),
Column("status", String(30)),
)
managers = Table(
"managers",
metadata,
Column(
"person_id",
Integer,
ForeignKey("people.person_id"),
primary_key=True,
),
Column("longer_status", String(70)),
)
cars = Table(
"cars",
metadata,
Column(
"car_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("owner", Integer, ForeignKey("people.person_id")),
)
def test_eager_empty(self):
"""test parent object with child relationship to an inheriting mapper,
using eager loads, works when there are no child objects present"""
class Person:
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def __repr__(self):
return "Ordinary person %s" % self.name
class Engineer(Person):
def __repr__(self):
return "Engineer %s, status %s" % (self.name, self.status)
class Manager(Person):
def __repr__(self):
return "Manager %s, status %s" % (
self.name,
self.longer_status,
)
class Car:
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def __repr__(self):
return "Car number %d" % self.car_id
person_mapper = self.mapper_registry.map_imperatively(
Person,
people,
polymorphic_on=people.c.type,
polymorphic_identity="person",
)
self.mapper_registry.map_imperatively(
Engineer,
engineers,
inherits=person_mapper,
polymorphic_identity="engineer",
)
manager_mapper = self.mapper_registry.map_imperatively(
Manager,
managers,
inherits=person_mapper,
polymorphic_identity="manager",
)
self.mapper_registry.map_imperatively(
Car,
cars,
properties={
"manager": relationship(manager_mapper, lazy="joined")
},
)
sess = fixture_session()
car1 = Car()
car2 = Car()
car2.manager = Manager()
sess.add(car1)
sess.add(car2)
sess.flush()
sess.expunge_all()
carlist = sess.query(Car).all()
assert carlist[0].manager is None
assert carlist[1].manager.person_id == car2.manager.person_id
| RelationshipTest5 |
python | numba__numba | numba/tests/test_runtests.py | {
"start": 349,
"end": 11292
} | class ____(unittest.TestCase):
"""These test cases are meant to test the Numba test infrastructure itself.
Therefore, the logic used here shouldn't use numba.testing, but only the
upstream unittest, and run the numba test suite only in a subprocess."""
def get_testsuite_listing(self, args, *, subp_kwargs=None):
"""
Use `subp_kwargs` to pass extra argument to `subprocess.check_output`.
"""
subp_kwargs = subp_kwargs or {}
cmd = [sys.executable, '-m', 'numba.runtests', '-l'] + list(args)
out_bytes = subprocess.check_output(cmd, **subp_kwargs)
lines = out_bytes.decode('UTF-8').splitlines()
lines = [line for line in lines if line.strip()]
return lines
def check_listing_prefix(self, prefix):
listing = self.get_testsuite_listing([prefix])
for ln in listing[:-1]:
errmsg = '{!r} not startswith {!r}'.format(ln, prefix)
self.assertTrue(ln.startswith(prefix), msg=errmsg)
def check_testsuite_size(self, args, minsize):
"""
Check that the reported numbers of tests are at least *minsize*.
"""
lines = self.get_testsuite_listing(args)
last_line = lines[-1]
self.assertTrue('tests found' in last_line)
number = int(last_line.split(' ')[0])
# There may be some "skipped" messages at the beginning,
# so do an approximate check.
self.assertIn(len(lines), range(number + 1, number + 20))
self.assertGreaterEqual(number, minsize)
return lines
def check_all(self, ids):
lines = self.check_testsuite_size(ids, 5000)
# CUDA should be included by default
self.assertTrue(any('numba.cuda.tests.' in line for line in lines))
# As well as subpackage
self.assertTrue(any('numba.tests.npyufunc.test_' in line
for line in lines),)
def _get_numba_tests_from_listing(self, listing):
"""returns a filter on strings starting with 'numba.', useful for
selecting the 'numba' test names from a test listing."""
return filter(lambda x: x.startswith('numba.'), listing)
def test_default(self):
self.check_all([])
def test_all(self):
self.check_all(['numba.tests'])
def test_cuda(self):
# Even without CUDA enabled, there is at least one test
# (in numba.cuda.tests.nocuda)
minsize = 100 if cuda.is_available() else 1
self.check_testsuite_size(['numba.cuda.tests'], minsize)
@unittest.skipIf(not cuda.is_available(), "NO CUDA")
def test_cuda_submodules(self):
self.check_listing_prefix('numba.cuda.tests.cudadrv')
self.check_listing_prefix('numba.cuda.tests.cudapy')
self.check_listing_prefix('numba.cuda.tests.nocuda')
self.check_listing_prefix('numba.cuda.tests.cudasim')
def test_module(self):
self.check_testsuite_size(['numba.tests.test_storeslice'], 2)
self.check_testsuite_size(['numba.tests.test_nested_calls'], 10)
# Several modules
self.check_testsuite_size(['numba.tests.test_nested_calls',
'numba.tests.test_storeslice'], 12)
def test_subpackage(self):
self.check_testsuite_size(['numba.tests.npyufunc'], 50)
def test_random(self):
self.check_testsuite_size(
['--random', '0.1', 'numba.tests.npyufunc'], 5)
def test_include_exclude_tags(self):
def get_count(arg_list):
lines = self.get_testsuite_listing(arg_list)
self.assertIn('tests found', lines[-1])
count = int(lines[-1].split()[0])
self.assertTrue(count > 0)
return count
tags = ['long_running', 'long_running, important']
total = get_count(['numba.tests'])
for tag in tags:
included = get_count(['--tags', tag, 'numba.tests'])
excluded = get_count(['--exclude-tags', tag, 'numba.tests'])
self.assertEqual(total, included + excluded)
# check syntax with `=` sign in
included = get_count(['--tags=%s' % tag, 'numba.tests'])
excluded = get_count(['--exclude-tags=%s' % tag, 'numba.tests'])
self.assertEqual(total, included + excluded)
def test_check_shard(self):
tmpAll = self.get_testsuite_listing([])
tmp1 = self.get_testsuite_listing(['-j', '0:2'])
tmp2 = self.get_testsuite_listing(['-j', '1:2'])
lAll = set(self._get_numba_tests_from_listing(tmpAll))
l1 = set(self._get_numba_tests_from_listing(tmp1))
l2 = set(self._get_numba_tests_from_listing(tmp2))
# The difference between two adjacent shards should be less than 5% of
# the total
self.assertLess(abs(len(l2) - len(l1)), len(lAll) / 20)
self.assertLess(len(l1), len(lAll))
self.assertLess(len(l2), len(lAll))
def test_check_sharding_equivalent(self):
# get some shards
sharded = list()
for i in range(3):
subset = self.get_testsuite_listing(['-j', '{}:3'.format(i)])
slist = [*self._get_numba_tests_from_listing(subset)]
sharded.append(slist)
# get the always running tests
tmp = self.get_testsuite_listing(['--tag', 'always_test'])
always_running = set(self._get_numba_tests_from_listing(tmp))
# make sure there is at least one test that always runs
self.assertGreaterEqual(len(always_running), 1)
# check that each shard contains no repeats
sharded_sets = [set(x) for x in sharded]
for i in range(len(sharded)):
self.assertEqual(len(sharded_sets[i]), len(sharded[i]))
# check that the always running tests are in every shard, and then
# remove them from the shards
for shard in sharded_sets:
for test in always_running:
self.assertIn(test, shard)
shard.remove(test)
self.assertNotIn(test, shard)
# check that there is no overlap between the shards
for a, b in itertools.combinations(sharded_sets, 2):
self.assertFalse(a & b)
# check that the sum of the shards and the always running tests is the
# same as the full listing
sum_of_parts = set()
for x in sharded_sets:
sum_of_parts.update(x)
sum_of_parts.update(always_running)
full_listing = set(self._get_numba_tests_from_listing(
self.get_testsuite_listing([])))
self.assertEqual(sum_of_parts, full_listing)
@unittest.skipUnless(has_gitpython, "Requires gitpython")
def test_gitdiff(self):
# Check for git
try:
subprocess.call("git",
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
except FileNotFoundError:
self.skipTest("no git available")
# default
outs = self.get_testsuite_listing(['-g'])
self.assertNotIn("Git diff by common ancestor", outs)
# using ancestor
outs = self.get_testsuite_listing(['-g=ancestor'])
self.assertIn("Git diff by common ancestor", outs)
# misspelled ancestor
subp_kwargs = dict(stderr=subprocess.DEVNULL)
with self.assertRaises(subprocess.CalledProcessError):
self.get_testsuite_listing(['-g=ancest'], subp_kwargs=subp_kwargs)
@unittest.skipUnless(has_pyyaml, "Requires pyyaml")
def test_azure_config(self):
from yaml import Loader
base_path = os.path.dirname(os.path.abspath(__file__))
azure_pipe = os.path.join(base_path, '..', '..', 'azure-pipelines.yml')
if not os.path.isfile(azure_pipe):
self.skipTest("'azure-pipelines.yml' is not available")
with open(os.path.abspath(azure_pipe), 'rt') as f:
data = f.read()
pipe_yml = yaml.load(data, Loader=Loader)
templates = pipe_yml['jobs']
# first look at the items in the first two templates, this is osx/linux
start_indexes = []
for tmplt in templates[:2]:
matrix = tmplt['parameters']['matrix']
for setup in matrix.values():
start_indexes.append(setup['TEST_START_INDEX'])
# next look at the items in the windows only template
winpath = ['..', '..', 'buildscripts', 'azure', 'azure-windows.yml']
azure_windows = os.path.join(base_path, *winpath)
if not os.path.isfile(azure_windows):
self.skipTest("'azure-windows.yml' is not available")
with open(os.path.abspath(azure_windows), 'rt') as f:
data = f.read()
windows_yml = yaml.load(data, Loader=Loader)
# There's only one template in windows and its keyed differently to the
# above, get its matrix.
matrix = windows_yml['jobs'][0]['strategy']['matrix']
for setup in matrix.values():
start_indexes.append(setup['TEST_START_INDEX'])
# sanity checks
# 1. That the TEST_START_INDEX is unique
self.assertEqual(len(start_indexes), len(set(start_indexes)))
# 2. That the TEST_START_INDEX is a complete range
lim_start_index = max(start_indexes) + 1
expected = [*range(lim_start_index)]
self.assertEqual(sorted(start_indexes), expected)
# 3. That the number of indexes matches the declared test count
self.assertEqual(lim_start_index, pipe_yml['variables']['TEST_COUNT'])
def test_no_compilation_on_list(self):
# Checks that the test suite doesn't do any CPU-side compilation on
# listing of tests.
code = """if 1:
from unittest import mock
from llvmlite import binding as llvm
error = RuntimeError("Detected compilation during test listing")
with mock.patch.object(llvm.ExecutionEngine, 'finalize_object',
side_effect=error):
import numba
{0}
"""
# Run with a jit function in the test to demonstrate failure
with self.assertRaises(subprocess.CalledProcessError) as raises:
cmd = [sys.executable, "-c", code.format("numba.njit(lambda:0)()")]
subprocess.check_output(cmd,
stderr=subprocess.STDOUT,
timeout=60)
self.assertIn("Detected compilation during test listing",
raises.exception.stdout.decode('UTF-8'))
# Run to validate the test suite does not trigger compilation during
# listing.
cmd = [sys.executable, "-c", code.format("numba.test('-l')")]
subprocess.check_call(cmd,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,)
if __name__ == '__main__':
unittest.main()
| TestCase |
python | pyinstaller__pyinstaller | PyInstaller/building/api.py | {
"start": 16858,
"end": 54811
} | class ____(Target):
"""
Creates the final executable of the frozen app. This bundles all necessary files together.
"""
def __init__(self, *args, **kwargs):
"""
args
One or more arguments that are either an instance of `Target` or an iterable representing TOC list.
kwargs
Possible keyword arguments:
bootloader_ignore_signals
Non-Windows only. If True, the bootloader process will ignore all ignorable signals. If False (default),
it will forward all signals to the child process. Useful in situations where for example a supervisor
process signals both the bootloader and the child (e.g., via a process group) to avoid signalling the
child twice.
console
On Windows or macOS governs whether to use the console executable or the windowed executable. Always
True on Linux/Unix (always console executable - it does not matter there).
hide_console
Windows only. In console-enabled executable, hide or minimize the console window if the program owns the
console window (i.e., was not launched from existing console window). Depending on the setting, the
console is hidden/mininized either early in the bootloader execution ('hide-early', 'minimize-early') or
late in the bootloader execution ('hide-late', 'minimize-late'). The early option takes place as soon as
the PKG archive is found. In onefile builds, the late option takes place after application has unpacked
itself and before it launches the child process. In onedir builds, the late option takes place before
starting the embedded python interpreter.
disable_windowed_traceback
Disable traceback dump of unhandled exception in windowed (noconsole) mode (Windows and macOS only),
and instead display a message that this feature is disabled.
debug
Setting to True gives you progress messages from the executable (for console=False there will be
annoying MessageBoxes on Windows).
name
The filename for the executable. On Windows suffix '.exe' is appended.
exclude_binaries
Forwarded to the PKG the EXE builds.
icon
Windows and macOS only. icon='myicon.ico' to use an icon file or icon='notepad.exe,0' to grab an icon
resource. Defaults to use PyInstaller's console or windowed icon. Use icon=`NONE` to not add any icon.
version
Windows only. version='myversion.txt'. Use grab_version.py to get a version resource from an executable
and then edit the output to create your own. (The syntax of version resources is so arcane that I would
not attempt to write one from scratch).
uac_admin
Windows only. Setting to True creates a Manifest with will request elevation upon application start.
uac_uiaccess
Windows only. Setting to True allows an elevated application to work with Remote Desktop.
argv_emulation
macOS only. Enables argv emulation in macOS .app bundles (i.e., windowed bootloader). If enabled, the
initial open document/URL Apple Events are intercepted by bootloader and converted into sys.argv.
target_arch
macOS only. Used to explicitly specify the target architecture; either single-arch ('x86_64' or 'arm64')
or 'universal2'. Used in checks that the collected binaries contain the requires arch slice(s) and/or
to convert fat binaries into thin ones as necessary. If not specified (default), a single-arch build
corresponding to running architecture is assumed.
codesign_identity
macOS only. Use the provided identity to sign collected binaries and the generated executable. If
signing identity is not provided, ad-hoc signing is performed.
entitlements_file
macOS only. Optional path to entitlements file to use with code signing of collected binaries
(--entitlements option to codesign utility).
contents_directory
Onedir mode only. Specifies the name of the directory where all files par the executable will be placed.
Setting the name to '.' (or '' or None) re-enables old onedir layout without contents directory.
"""
from PyInstaller.config import CONF
super().__init__()
# Available options for EXE in .spec files.
self.exclude_binaries = kwargs.get('exclude_binaries', False)
self.bootloader_ignore_signals = kwargs.get('bootloader_ignore_signals', False)
self.console = kwargs.get('console', True)
self.hide_console = kwargs.get('hide_console', None)
self.disable_windowed_traceback = kwargs.get('disable_windowed_traceback', False)
self.debug = kwargs.get('debug', False)
self.name = kwargs.get('name', None)
self.icon = kwargs.get('icon', None)
self.versrsrc = kwargs.get('version', None)
self.manifest = kwargs.get('manifest', None)
self.resources = kwargs.get('resources', [])
self.strip = kwargs.get('strip', False)
self.upx_exclude = kwargs.get("upx_exclude", [])
self.runtime_tmpdir = kwargs.get('runtime_tmpdir', None)
self.contents_directory = kwargs.get("contents_directory", "_internal")
# If ``append_pkg`` is false, the archive will not be appended to the exe, but copied beside it.
self.append_pkg = kwargs.get('append_pkg', True)
# On Windows allows the exe to request admin privileges.
self.uac_admin = kwargs.get('uac_admin', False)
self.uac_uiaccess = kwargs.get('uac_uiaccess', False)
# macOS argv emulation
self.argv_emulation = kwargs.get('argv_emulation', False)
# Target architecture (macOS only)
self.target_arch = kwargs.get('target_arch', None)
if is_darwin:
if self.target_arch is None:
import platform
self.target_arch = platform.machine()
else:
assert self.target_arch in {'x86_64', 'arm64', 'universal2'}, \
f"Unsupported target arch: {self.target_arch}"
logger.info("EXE target arch: %s", self.target_arch)
else:
self.target_arch = None # explicitly disable
# Code signing identity (macOS only)
self.codesign_identity = kwargs.get('codesign_identity', None)
if is_darwin:
logger.info("Code signing identity: %s", self.codesign_identity)
else:
self.codesign_identity = None # explicitly disable
# Code signing entitlements
self.entitlements_file = kwargs.get('entitlements_file', None)
# UPX needs to be both available and enabled for the target.
self.upx = CONF['upx_available'] and kwargs.get('upx', False)
# Catch and clear options that are unsupported on specific platforms.
if self.versrsrc and not is_win:
logger.warning('Ignoring version information; supported only on Windows!')
self.versrsrc = None
if self.manifest and not is_win:
logger.warning('Ignoring manifest; supported only on Windows!')
self.manifest = None
if self.resources and not is_win:
logger.warning('Ignoring resources; supported only on Windows!')
self.resources = []
if self.icon and not (is_win or is_darwin):
logger.warning('Ignoring icon; supported only on Windows and macOS!')
self.icon = None
if self.hide_console and not is_win:
logger.warning('Ignoring hide_console; supported only on Windows!')
self.hide_console = None
if self.contents_directory in ("", "."):
self.contents_directory = None # Re-enable old onedir layout without contents directory.
elif self.contents_directory == ".." or "/" in self.contents_directory or "\\" in self.contents_directory:
raise SystemExit(
f'ERROR: Invalid value "{self.contents_directory}" passed to `--contents-directory` or '
'`contents_directory`. Exactly one directory level is required (or just "." to disable the '
'contents directory).'
)
if not kwargs.get('embed_manifest', True):
from PyInstaller.exceptions import RemovedExternalManifestError
raise RemovedExternalManifestError(
"Please remove the 'embed_manifest' argument to EXE() in your spec file."
)
# Old .spec format included in 'name' the path where to put created app. New format includes only exename.
#
# Ignore fullpath in the 'name' and prepend DISTPATH or WORKPATH.
# DISTPATH - onefile
# WORKPATH - onedir
if self.exclude_binaries:
# onedir mode - create executable in WORKPATH.
self.name = os.path.join(CONF['workpath'], os.path.basename(self.name))
else:
# onefile mode - create executable in DISTPATH.
self.name = os.path.join(CONF['distpath'], os.path.basename(self.name))
# Old .spec format included on Windows in 'name' .exe suffix.
if is_win or is_cygwin:
# Append .exe suffix if it is not already there.
if not self.name.endswith('.exe'):
self.name += '.exe'
base_name = os.path.splitext(os.path.basename(self.name))[0]
else:
base_name = os.path.basename(self.name)
# Create the CArchive PKG in WORKPATH. When instancing PKG(), set name so that guts check can test whether the
# file already exists.
self.pkgname = os.path.join(CONF['workpath'], base_name + '.pkg')
self.toc = []
for arg in args:
# Valid arguments: PYZ object, Splash object, and TOC-list iterables
if isinstance(arg, (PYZ, Splash)):
# Add object as an entry to the TOC, and merge its dependencies TOC
if isinstance(arg, PYZ):
self.toc.append((os.path.basename(arg.name), arg.name, "PYZ"))
else:
self.toc.append((os.path.basename(arg.name), arg.name, "SPLASH"))
self.toc.extend(arg.dependencies)
elif miscutils.is_iterable(arg):
# TOC-like iterable
self.toc.extend(arg)
else:
raise TypeError(f"Invalid argument type for EXE: {type(arg)!r}")
if is_nogil:
# Signal to bootloader that python was built with Py_GIL_DISABLED, in order to select correct `PyConfig`
# structure layout at run-time.
self.toc.append(("pyi-python-flag Py_GIL_DISABLED", "", "OPTION"))
if self.runtime_tmpdir is not None:
self.toc.append(("pyi-runtime-tmpdir " + self.runtime_tmpdir, "", "OPTION"))
if self.bootloader_ignore_signals:
# no value; presence means "true"
self.toc.append(("pyi-bootloader-ignore-signals", "", "OPTION"))
if self.disable_windowed_traceback:
# no value; presence means "true"
self.toc.append(("pyi-disable-windowed-traceback", "", "OPTION"))
if self.argv_emulation:
# no value; presence means "true"
self.toc.append(("pyi-macos-argv-emulation", "", "OPTION"))
if self.contents_directory:
self.toc.append(("pyi-contents-directory " + self.contents_directory, "", "OPTION"))
if self.hide_console:
# Validate the value
_HIDE_CONSOLE_VALUES = {'hide-early', 'minimize-early', 'hide-late', 'minimize-late'}
self.hide_console = self.hide_console.lower()
if self.hide_console not in _HIDE_CONSOLE_VALUES:
raise ValueError(
f"Invalid hide_console value: {self.hide_console}! Allowed values: {_HIDE_CONSOLE_VALUES}"
)
self.toc.append((f"pyi-hide-console {self.hide_console}", "", "OPTION"))
# If the icon path is relative, make it relative to the .spec file.
if self.icon and self.icon != "NONE":
if isinstance(self.icon, list):
self.icon = [self._makeabs(ic) for ic in self.icon]
else:
self.icon = [self._makeabs(self.icon)]
if is_win:
if not self.icon:
# --icon not specified; use default from bootloader folder
if self.console:
ico = 'icon-console.ico'
else:
ico = 'icon-windowed.ico'
self.icon = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'bootloader', 'images', ico)
# Prepare manifest for the executable by creating minimal manifest or modifying the supplied one.
if self.manifest:
# Determine if we were given a filename or an XML string.
if "<" in self.manifest:
self.manifest = self.manifest.encode("utf-8")
else:
self.manifest = self._makeabs(self.manifest)
with open(self.manifest, "rb") as fp:
self.manifest = fp.read()
self.manifest = winmanifest.create_application_manifest(self.manifest, self.uac_admin, self.uac_uiaccess)
if self.versrsrc:
if isinstance(self.versrsrc, versioninfo.VSVersionInfo):
# We were passed a valid versioninfo.VSVersionInfo structure
pass
elif isinstance(self.versrsrc, (str, bytes, os.PathLike)):
# File path; either absolute, or relative to the spec file
self.versrsrc = self._makeabs(self.versrsrc)
logger.debug("Loading version info from file: %r", self.versrsrc)
self.versrsrc = versioninfo.load_version_info_from_text_file(self.versrsrc)
else:
raise TypeError(f"Unsupported type for version info argument: {type(self.versrsrc)!r}")
# Identify python shared library. This is needed both for PKG (where we need to store the name so that
# bootloader can look it up), and for macOS-specific processing of the generated executable (adjusting the SDK
# version).
#
# NOTE: we already performed an equivalent search (using the same `get_python_library_path` helper) during the
# analysis stage to ensure that the python shared library is collected. Unfortunately, with the way data passing
# works in onedir builds, we cannot look up the value in the TOC at this stage, and we need to search again.
self.python_lib = bindepend.get_python_library_path()
if self.python_lib is None:
from PyInstaller.exceptions import PythonLibraryNotFoundError
raise PythonLibraryNotFoundError()
# On AIX, the python shared library might in fact be an ar archive with shared object inside it, and needs to
# be `dlopen`'ed with full name (for example, `libpython3.9.a(libpython3.9.so)`. So if the library's suffix is
# .a, adjust the name accordingly, assuming fixed format for the shared object name. NOTE: the information about
# shared object name is in fact available from `ldd` but not propagated from our binary dependency analysis. If
# we ever need to determine the shared object's name dynamically, we could write a simple ar parser, based on
# information from `https://www.ibm.com/docs/en/aix/7.3?topic=formats-ar-file-format-big`.
if is_aix:
_, ext = os.path.splitext(self.python_lib)
if ext == '.a':
_py_major, _py_minor = sys.version_info[:2]
self.python_lib += f"(libpython{_py_major}.{_py_minor}.so)"
# Normalize TOC
self.toc = normalize_toc(self.toc)
self.pkg = PKG(
toc=self.toc,
python_lib_name=os.path.basename(self.python_lib),
name=self.pkgname,
cdict=kwargs.get('cdict', None),
exclude_binaries=self.exclude_binaries,
strip_binaries=self.strip,
upx_binaries=self.upx,
upx_exclude=self.upx_exclude,
target_arch=self.target_arch,
codesign_identity=self.codesign_identity,
entitlements_file=self.entitlements_file
)
self.dependencies = self.pkg.dependencies
# Get the path of the bootloader and store it in a TOC, so it can be checked for being changed.
exe = self._bootloader_file('run', '.exe' if is_win or is_cygwin else '')
self.exefiles = [(os.path.basename(exe), exe, 'EXECUTABLE')]
self.__postinit__()
_GUTS = (
# input parameters
('name', _check_guts_eq),
('console', _check_guts_eq),
('debug', _check_guts_eq),
('exclude_binaries', _check_guts_eq),
('icon', _check_guts_eq),
('versrsrc', _check_guts_eq),
('uac_admin', _check_guts_eq),
('uac_uiaccess', _check_guts_eq),
('manifest', _check_guts_eq),
('append_pkg', _check_guts_eq),
('argv_emulation', _check_guts_eq),
('target_arch', _check_guts_eq),
('codesign_identity', _check_guts_eq),
('entitlements_file', _check_guts_eq),
# for the case the directory is shared between platforms:
('pkgname', _check_guts_eq),
('toc', _check_guts_eq),
('resources', _check_guts_eq),
('strip', _check_guts_eq),
('upx', _check_guts_eq),
('mtm', None), # checked below
# derived values
('exefiles', _check_guts_toc),
('python_lib', _check_guts_eq),
)
def _check_guts(self, data, last_build):
if not os.path.exists(self.name):
logger.info("Rebuilding %s because %s missing", self.tocbasename, os.path.basename(self.name))
return True
if not self.append_pkg and not os.path.exists(self.pkgname):
logger.info("Rebuilding because %s missing", os.path.basename(self.pkgname))
return True
if Target._check_guts(self, data, last_build):
return True
mtm = data['mtm']
if mtm != miscutils.mtime(self.name):
logger.info("Rebuilding %s because mtimes don't match", self.tocbasename)
return True
if mtm < miscutils.mtime(self.pkg.tocfilename):
logger.info("Rebuilding %s because pkg is more recent", self.tocbasename)
return True
return False
@staticmethod
def _makeabs(path):
"""
Helper for anchoring relative paths to spec file location.
"""
from PyInstaller.config import CONF
if os.path.isabs(path):
return path
else:
return os.path.join(CONF['specpath'], path)
def _bootloader_file(self, exe, extension=None):
"""
Pick up the right bootloader file - debug, console, windowed.
"""
# Having console/windowed bootloader makes sense only on Windows and macOS.
if is_win or is_darwin:
if not self.console:
exe = exe + 'w'
# There are two types of bootloaders:
# run - release, no verbose messages in console.
# run_d - contains verbose messages in console.
if self.debug:
exe = exe + '_d'
if extension:
exe = exe + extension
bootloader_file = os.path.join(HOMEPATH, 'PyInstaller', 'bootloader', PLATFORM, exe)
logger.info('Bootloader %s' % bootloader_file)
return bootloader_file
def assemble(self):
# On Windows, we used to append .notanexecutable to the intermediate/temporary file name to (attempt to)
# prevent interference from anti-virus programs with the build process (see #6467). This is now disabled
# as we wrap all processing steps that modify the executable in the `_retry_operation` helper; however,
# we keep around the `build_name` variable instead of directly using `self.name`, just in case we need
# to re-enable it...
build_name = self.name
logger.info("Building EXE from %s", self.tocbasename)
if os.path.exists(self.name):
if os.path.isdir(self.name):
_rmtree(self.name) # will prompt for confirmation if --noconfirm is not given
else:
os.remove(self.name)
if not os.path.exists(os.path.dirname(self.name)):
os.makedirs(os.path.dirname(self.name))
bootloader_exe = self.exefiles[0][1] # pathname of bootloader
if not os.path.exists(bootloader_exe):
raise SystemExit(_MISSING_BOOTLOADER_ERRORMSG)
# Step 1: copy the bootloader file, and perform any operations that need to be done prior to appending the PKG.
logger.info("Copying bootloader EXE to %s", build_name)
self._retry_operation(shutil.copyfile, bootloader_exe, build_name)
self._retry_operation(os.chmod, build_name, 0o755)
if is_win:
# First, remove all resources from the file. This ensures that no manifest is embedded, even if bootloader
# was compiled with a toolchain that forcibly embeds a default manifest (e.g., mingw toolchain from msys2).
self._retry_operation(winresource.remove_all_resources, build_name)
# Embed icon.
if self.icon != "NONE":
logger.info("Copying icon to EXE")
self._retry_operation(icon.CopyIcons, build_name, self.icon)
# Embed version info.
if self.versrsrc:
logger.info("Copying version information to EXE")
self._retry_operation(versioninfo.write_version_info_to_executable, build_name, self.versrsrc)
# Embed/copy other resources.
logger.info("Copying %d resources to EXE", len(self.resources))
for resource in self.resources:
self._retry_operation(self._copy_windows_resource, build_name, resource)
# Embed the manifest into the executable.
logger.info("Embedding manifest in EXE")
self._retry_operation(winmanifest.write_manifest_to_executable, build_name, self.manifest)
elif is_darwin:
# Convert bootloader to the target arch
logger.info("Converting EXE to target arch (%s)", self.target_arch)
osxutils.binary_to_target_arch(build_name, self.target_arch, display_name='Bootloader EXE')
# Step 2: append the PKG, if necessary
if self.append_pkg:
append_file = self.pkg.name # Append PKG
append_type = 'PKG archive' # For debug messages
else:
# In onefile mode, copy the stand-alone PKG next to the executable. In onedir, this will be done by the
# COLLECT() target.
if not self.exclude_binaries:
pkg_dst = os.path.join(os.path.dirname(build_name), os.path.basename(self.pkgname))
logger.info("Copying stand-alone PKG archive from %s to %s", self.pkg.name, pkg_dst)
shutil.copyfile(self.pkg.name, pkg_dst)
else:
logger.info("Stand-alone PKG archive will be handled by COLLECT")
# The bootloader requires package side-loading to be explicitly enabled, which is done by embedding custom
# signature to the executable. This extra signature ensures that the sideload-enabled executable is at least
# slightly different from the stock bootloader executables, which should prevent antivirus programs from
# flagging our stock bootloaders due to sideload-enabled applications in the wild.
# Write to temporary file
pkgsig_file = self.pkg.name + '.sig'
with open(pkgsig_file, "wb") as f:
# 8-byte MAGIC; slightly changed PKG MAGIC pattern
f.write(b'MEI\015\013\012\013\016')
append_file = pkgsig_file # Append PKG-SIG
append_type = 'PKG sideload signature' # For debug messages
if is_linux:
# Linux: append data into custom ELF section using objcopy.
logger.info("Appending %s to custom ELF section in EXE", append_type)
cmd = ['objcopy', '--add-section', f'pydata={append_file}', build_name]
p = subprocess.run(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, encoding='utf-8')
if p.returncode:
raise SystemError(f"objcopy Failure: {p.returncode} {p.stdout}")
elif is_darwin:
# macOS: remove signature, append data, and fix-up headers so that the appended data appears to be part of
# the executable (which is required by strict validation during code-signing).
# Strip signatures from all arch slices. Strictly speaking, we need to remove signature (if present) from
# the last slice, because we will be appending data to it. When building universal2 bootloaders natively on
# macOS, only arm64 slices have a (dummy) signature. However, when cross-compiling with osxcross, we seem to
# get dummy signatures on both x86_64 and arm64 slices. While the former should not have any impact, it does
# seem to cause issues with further binary signing using real identity. Therefore, we remove all signatures
# and re-sign the binary using dummy signature once the data is appended.
logger.info("Removing signature(s) from EXE")
osxutils.remove_signature_from_binary(build_name)
# Fix Mach-O image UUID(s) in executable to ensure uniqueness across different builds.
# NOTE: even if PKG is side-loaded, use the hash of its contents to generate the new UUID.
# NOTE: this step is performed *before* PKG is appended and sizes are fixed in the executable's headers;
# this ensures that we are operating only on original header size instead of enlarged one (which could
# be significantly larger in large onefile builds).
logger.info("Modifying Mach-O image UUID(s) in EXE")
osxutils.update_exe_identifier(build_name, self.pkg.name)
# Append the data
logger.info("Appending %s to EXE", append_type)
self._append_data_to_exe(build_name, append_file)
# Fix Mach-O headers
logger.info("Fixing EXE headers for code signing")
osxutils.fix_exe_for_code_signing(build_name)
else:
# Fall back to just appending data at the end of the file
logger.info("Appending %s to EXE", append_type)
self._retry_operation(self._append_data_to_exe, build_name, append_file)
# Step 3: post-processing
if is_win:
# Set checksum to appease antiviral software. Also set build timestamp to current time to increase entropy
# (but honor SOURCE_DATE_EPOCH environment variable for reproducible builds).
logger.info("Fixing EXE headers")
build_timestamp = int(os.environ.get('SOURCE_DATE_EPOCH', time.time()))
self._retry_operation(winutils.set_exe_build_timestamp, build_name, build_timestamp)
self._retry_operation(winutils.update_exe_pe_checksum, build_name)
elif is_darwin:
# If the version of macOS SDK used to build bootloader exceeds that of macOS SDK used to built Python
# library (and, by extension, bundled Tcl/Tk libraries), force the version declared by the frozen executable
# to match that of the Python library.
# Having macOS attempt to enable new features (based on SDK version) for frozen application has no benefit
# if the Python library does not support them as well.
# On the other hand, there seem to be UI issues in tkinter due to failed or partial enablement of dark mode
# (i.e., the bootloader executable being built against SDK 10.14 or later, which causes macOS to enable dark
# mode, and Tk libraries being built against an earlier SDK version that does not support the dark mode).
# With python.org Intel macOS installers, this manifests as black Tk windows and UI elements (see issue
# #5827), while in Anaconda python, it may result in white text on bright background.
pylib_version = osxutils.get_macos_sdk_version(self.python_lib)
exe_version = osxutils.get_macos_sdk_version(build_name)
if pylib_version < exe_version:
logger.info(
"Rewriting the executable's macOS SDK version (%d.%d.%d) to match the SDK version of the Python "
"library (%d.%d.%d) in order to avoid inconsistent behavior and potential UI issues in the "
"frozen application.", *exe_version, *pylib_version
)
osxutils.set_macos_sdk_version(build_name, *pylib_version)
# Re-sign the binary (either ad-hoc or using real identity, if provided).
logger.info("Re-signing the EXE")
osxutils.sign_binary(build_name, self.codesign_identity, self.entitlements_file)
# Ensure executable flag is set
self._retry_operation(os.chmod, build_name, 0o755)
# Get mtime for storing into the guts
self.mtm = self._retry_operation(miscutils.mtime, build_name)
if build_name != self.name:
self._retry_operation(os.rename, build_name, self.name)
logger.info("Building EXE from %s completed successfully.", self.tocbasename)
def _copy_windows_resource(self, build_name, resource_spec):
import pefile
# Helper for optionally converting integer strings to values; resource types and IDs/names can be specified as
# either numeric values or custom strings...
def _to_int(value):
try:
return int(value)
except Exception:
return value
logger.debug("Processing resource: %r", resource_spec)
resource = resource_spec.split(",") # filename,[type],[name],[language]
if len(resource) < 1 or len(resource) > 4:
raise ValueError(
f"Invalid Windows resource specifier {resource_spec!r}! "
f"Must be in format 'filename,[type],[name],[language]'!"
)
# Anchor resource file to spec file location, if necessary.
src_filename = self._makeabs(resource[0])
# Ensure file exists.
if not os.path.isfile(src_filename):
raise ValueError(f"Resource file {src_filename!r} does not exist!")
# Check if src_filename points to a PE file or an arbitrary (data) file.
try:
with pefile.PE(src_filename, fast_load=True):
is_pe_file = True
except Exception:
is_pe_file = False
if is_pe_file:
# If resource file is PE file, copy all resources from it, subject to specified type, name, and language.
logger.debug("Resource file %r is a PE file...", src_filename)
# Resource type, name, and language serve as filters. If not specified, use "*".
resource_type = _to_int(resource[1]) if len(resource) >= 2 else "*"
resource_name = _to_int(resource[2]) if len(resource) >= 3 else "*"
resource_lang = _to_int(resource[3]) if len(resource) >= 4 else "*"
try:
winresource.copy_resources_from_pe_file(
build_name,
src_filename,
[resource_type],
[resource_name],
[resource_lang],
)
except Exception as e:
raise IOError(f"Failed to copy resources from PE file {src_filename!r}") from e
else:
logger.debug("Resource file %r is an arbitrary data file...", src_filename)
# For arbitrary data file, resource type and name need to be provided.
if len(resource) < 3:
raise ValueError(
f"Invalid Windows resource specifier {resource_spec!r}! "
f"For arbitrary data file, the format is 'filename,type,name,[language]'!"
)
resource_type = _to_int(resource[1])
resource_name = _to_int(resource[2])
resource_lang = _to_int(resource[3]) if len(resource) >= 4 else 0 # LANG_NEUTRAL
# Prohibit wildcards for resource type and name.
if resource_type == "*":
raise ValueError(
f"Invalid Windows resource specifier {resource_spec!r}! "
f"For arbitrary data file, resource type cannot be a wildcard (*)!"
)
if resource_name == "*":
raise ValueError(
f"Invalid Windows resource specifier {resource_spec!r}! "
f"For arbitrary data file, resource ma,e cannot be a wildcard (*)!"
)
try:
with open(src_filename, 'rb') as fp:
data = fp.read()
winresource.add_or_update_resource(
build_name,
data,
resource_type,
[resource_name],
[resource_lang],
)
except Exception as e:
raise IOError(f"Failed to embed data file {src_filename!r} as Windows resource") from e
def _append_data_to_exe(self, build_name, append_file):
with open(build_name, 'ab') as outf:
with open(append_file, 'rb') as inf:
shutil.copyfileobj(inf, outf, length=64 * 1024)
@staticmethod
def _retry_operation(func, *args, max_attempts=20):
"""
Attempt to execute the given function `max_attempts` number of times while catching exceptions that are usually
associated with Windows anti-virus programs temporarily locking the access to the executable.
"""
def _is_allowed_exception(e):
"""
Helper to determine whether the given exception is eligible for retry or not.
"""
if isinstance(e, PermissionError):
# Always retry on all instances of PermissionError
return True
elif is_win:
from PyInstaller.compat import pywintypes
# Windows-specific errno and winerror codes.
# https://learn.microsoft.com/en-us/cpp/c-runtime-library/errno-constants
_ALLOWED_ERRNO = {
13, # EACCES (would typically be a PermissionError instead)
22, # EINVAL (reported to be caused by Crowdstrike; see #7840)
}
# https://learn.microsoft.com/en-us/windows/win32/debug/system-error-codes--0-499-
_ALLOWED_WINERROR = {
5, # ERROR_ACCESS_DENIED (reported in #7825)
32, # ERROR_SHARING_VIOLATION (exclusive lock via `CreateFileW` flags, or via `_locked`).
110, # ERROR_OPEN_FAILED (reported in #8138)
}
if isinstance(e, OSError):
# For OSError exceptions other than PermissionError, validate errno.
if e.errno in _ALLOWED_ERRNO:
return True
# OSError typically translates `winerror` into `errno` equivalent; but try to match the original
# values as a fall back, just in case. `OSError.winerror` attribute exists only on Windows.
if e.winerror in _ALLOWED_WINERROR:
return True
elif isinstance(e, pywintypes.error):
# pywintypes.error is raised by helper functions that use win32 C API bound via pywin32-ctypes.
if e.winerror in _ALLOWED_WINERROR:
return True
return False
func_name = func.__name__
for attempt in range(max_attempts):
try:
return func(*args)
except Exception as e:
# Check if exception is eligible for retry; if not, also check its immediate cause (in case the
# exception was thrown from an eligible exception).
if not _is_allowed_exception(e) and not _is_allowed_exception(e.__context__):
raise
# Retry after sleep (unless this was our last attempt)
if attempt < max_attempts - 1:
sleep_duration = 1 / (max_attempts - 1 - attempt)
logger.warning(
f"Execution of {func_name!r} failed on attempt #{attempt + 1} / {max_attempts}: {e!r}. "
f"Retrying in {sleep_duration:.2f} second(s)..."
)
time.sleep(sleep_duration)
else:
logger.warning(
f"Execution of {func_name!r} failed on attempt #{attempt + 1} / {max_attempts}: {e!r}."
)
raise RuntimeError(f"Execution of {func_name!r} failed - no more attempts left!") from e
| EXE |
python | mlflow__mlflow | dev/clint/src/clint/rules/invalid_abstract_method.py | {
"start": 84,
"end": 1684
} | class ____(Rule):
def _message(self) -> str:
return (
"Abstract method should only contain a single statement/expression, "
"and it must be `pass`, `...`, or a docstring."
)
@staticmethod
def _is_abstract_method(
node: ast.FunctionDef | ast.AsyncFunctionDef, resolver: Resolver
) -> bool:
return any(
(resolved := resolver.resolve(d)) and resolved == ["abc", "abstractmethod"]
for d in node.decorator_list
)
@staticmethod
def _has_invalid_body(node: ast.FunctionDef | ast.AsyncFunctionDef) -> bool:
# Does this abstract method have multiple statements/expressions?
if len(node.body) > 1:
return True
# This abstract method has a single statement/expression.
# Check if it's `pass`, `...`, or a docstring. If not, it's invalid.
stmt = node.body[0]
# Check for `pass`
if isinstance(stmt, ast.Pass):
return False
# Check for `...` or docstring
if isinstance(stmt, ast.Expr) and isinstance(stmt.value, ast.Constant):
value = stmt.value.value
# `...` literal or docstring
return not (value is ... or isinstance(value, str))
# Any other statement is invalid
return True
@staticmethod
def check(node: ast.FunctionDef | ast.AsyncFunctionDef, resolver: Resolver) -> bool:
return InvalidAbstractMethod._is_abstract_method(
node, resolver
) and InvalidAbstractMethod._has_invalid_body(node)
| InvalidAbstractMethod |
python | facebook__pyre-check | client/json_rpc.py | {
"start": 1059,
"end": 1314
} | class ____(JSONRPCException):
"""
The JSON received is not a valid Request object.
Internally we also raise it when the JSON sent is not a valid Response object.
"""
def error_code(self) -> int:
return -32600
| InvalidRequestError |
python | python-jsonschema__jsonschema | jsonschema/tests/test_validators.py | {
"start": 67298,
"end": 67491
} | class ____(ValidatorTestMixin, TestCase):
Validator = validators.Draft201909Validator
valid: tuple[dict, dict] = ({}, {})
invalid = {"type": "integer"}, "foo"
| TestDraft201909Validator |
python | mlflow__mlflow | mlflow/entities/run.py | {
"start": 373,
"end": 2763
} | class ____(_MlflowObject):
"""
Run object.
"""
def __init__(
self,
run_info: RunInfo,
run_data: RunData,
run_inputs: RunInputs | None = None,
run_outputs: RunOutputs | None = None,
) -> None:
if run_info is None:
raise MlflowException("run_info cannot be None")
self._info = run_info
self._data = run_data
self._inputs = run_inputs
self._outputs = run_outputs
@property
def info(self) -> RunInfo:
"""
The run metadata, such as the run id, start time, and status.
:rtype: :py:class:`mlflow.entities.RunInfo`
"""
return self._info
@property
def data(self) -> RunData:
"""
The run data, including metrics, parameters, and tags.
:rtype: :py:class:`mlflow.entities.RunData`
"""
return self._data
@property
def inputs(self) -> RunInputs:
"""
The run inputs, including dataset inputs.
:rtype: :py:class:`mlflow.entities.RunInputs`
"""
return self._inputs
@property
def outputs(self) -> RunOutputs:
"""
The run outputs, including model outputs.
:rtype: :py:class:`mlflow.entities.RunOutputs`
"""
return self._outputs
def to_proto(self):
run = ProtoRun()
run.info.MergeFrom(self.info.to_proto())
if self.data:
run.data.MergeFrom(self.data.to_proto())
if self.inputs:
run.inputs.MergeFrom(self.inputs.to_proto())
if self.outputs:
run.outputs.MergeFrom(self.outputs.to_proto())
return run
@classmethod
def from_proto(cls, proto):
return cls(
RunInfo.from_proto(proto.info),
RunData.from_proto(proto.data),
RunInputs.from_proto(proto.inputs) if proto.inputs else None,
RunOutputs.from_proto(proto.outputs) if proto.outputs else None,
)
def to_dictionary(self) -> dict[Any, Any]:
run_dict = {
"info": dict(self.info),
}
if self.data:
run_dict["data"] = self.data.to_dictionary()
if self.inputs:
run_dict["inputs"] = self.inputs.to_dictionary()
if self.outputs:
run_dict["outputs"] = self.outputs.to_dictionary()
return run_dict
| Run |
python | huggingface__transformers | src/transformers/models/omdet_turbo/modeling_omdet_turbo.py | {
"start": 11385,
"end": 12564
} | class ____(nn.Module):
def __init__(self, config: OmDetTurboConfig):
super().__init__()
self.model = AutoModel.from_config(config.text_config)
self.text_projection = nn.Parameter(torch.zeros(config.text_projection_in_dim, config.text_projection_out_dim))
def forward(self, hidden_states, mask=None, encode_type="task"):
text_outputs = self.model(hidden_states)
pooled_output = text_outputs[0]
if encode_type == "task":
if mask is None:
raise ValueError("mask is required for task encoding")
max_len = (mask != 0).sum(1).max().item()
truncated_mask = mask[:, :max_len]
truncated_output = pooled_output[:, :max_len, :]
return truncated_output.transpose(0, 1), truncated_mask
elif encode_type == "class":
max_pooled_output = pooled_output[torch.arange(pooled_output.shape[0]), hidden_states.argmax(dim=-1)]
projected_output = max_pooled_output @ self.text_projection
return projected_output
else:
raise ValueError(f"encode_type {encode_type} is not supported")
| OmDetTurboLanguageBackbone |
python | kamyu104__LeetCode-Solutions | Python/greatest-english-letter-in-upper-and-lower-case.py | {
"start": 50,
"end": 450
} | class ____(object):
def greatestLetter(self, s):
"""
:type s: str
:rtype: str
"""
lookup = set(s)
result = ""
for c in s:
if c.isupper() and lower(c) in s:
if c > result:
result = c
return result
# Time: O(n)
# Space: O(1)
import itertools
import string
# string, hash table
| Solution |
python | doocs__leetcode | solution/1200-1299/1227.Airplane Seat Assignment Probability/Solution.py | {
"start": 0,
"end": 105
} | class ____:
def nthPersonGetsNthSeat(self, n: int) -> float:
return 1 if n == 1 else 0.5
| Solution |
python | openai__openai-python | src/openai/types/beta/threads/image_file_delta.py | {
"start": 220,
"end": 734
} | class ____(BaseModel):
detail: Optional[Literal["auto", "low", "high"]] = None
"""Specifies the detail level of the image if specified by the user.
`low` uses fewer tokens, you can opt in to high resolution using `high`.
"""
file_id: Optional[str] = None
"""
The [File](https://platform.openai.com/docs/api-reference/files) ID of the image
in the message content. Set `purpose="vision"` when uploading the File if you
need to later display the file content.
"""
| ImageFileDelta |
python | huggingface__transformers | src/transformers/models/xlnet/modeling_xlnet.py | {
"start": 51373,
"end": 63225
} | class ____(XLNetPreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_loss.weight": "transformer.word_embedding.weight"}
def __init__(self, config):
super().__init__(config)
self.attn_type = config.attn_type
self.same_length = config.same_length
self.transformer = XLNetModel(config)
self.lm_loss = nn.Linear(config.d_model, config.vocab_size, bias=True)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.lm_loss
def set_output_embeddings(self, new_embeddings):
self.lm_loss = new_embeddings
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, use_mems=None, **kwargs):
# Overwritten -- this model has unique input preparation
# Add dummy token at the end (no attention on this one)
effective_batch_size = input_ids.shape[0]
dummy_token = torch.zeros((effective_batch_size, 1), dtype=torch.long, device=input_ids.device)
# At every pass, the attention values for the new token and the two last generated tokens
# are computed, the rest is reloaded from the `past` cache. A purely auto-regressive model would have
# offset = 1; offset = 2 seems to have slightly better computation.
offset = 2
if past_key_values:
input_ids = torch.cat([input_ids[:, -offset:], dummy_token], dim=1)
else:
input_ids = torch.cat([input_ids, dummy_token], dim=1)
# Build permutation mask so that previous tokens don't see last token
sequence_length = input_ids.shape[1]
perm_mask = torch.zeros(
(effective_batch_size, sequence_length, sequence_length), dtype=torch.float, device=input_ids.device
)
perm_mask[:, :, -1] = 1.0
# We'll only predict the last token
target_mapping = torch.zeros(
(effective_batch_size, 1, sequence_length), dtype=torch.float, device=input_ids.device
)
target_mapping[:, 0, -1] = 1.0
model_inputs = {
"input_ids": input_ids,
"perm_mask": perm_mask,
"target_mapping": target_mapping,
"use_mems": use_mems,
}
# if past is defined in model kwargs then use it for faster decoding
if past_key_values:
model_inputs["mems"] = tuple(layer_past[:-offset, :, :] for layer_past in past_key_values)
# Attention mask is computed on the fly on XLNetModel.forward()
kwargs.pop("attention_mask", None)
# TODO: Ignoring use_cache should not happen, fixme.
kwargs.pop("use_cache", None)
# Forward ALL kwargs that are uninitialized (e.g. `use_cache`).
for key, value in kwargs.items():
if key not in model_inputs:
model_inputs[key] = value
return model_inputs
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
mems: Optional[torch.Tensor] = None,
perm_mask: Optional[torch.Tensor] = None,
target_mapping: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
input_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
use_mems: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs, # delete when `use_cache` is removed in XLNetModel
) -> Union[tuple, XLNetLMHeadModelOutput]:
r"""
mems (`list[torch.FloatTensor]` of length `config.n_layers`):
Contains pre-computed hidden-states (see `mems` output below) . Can be used to speed up sequential
decoding. The token ids which have their past given to this model should not be passed as `input_ids` as
they have already been computed.
`use_mems` has to be set to `True` to make use of `mems`.
perm_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length, sequence_length)`, *optional*):
Mask to indicate the attention pattern for each input token with values selected in `[0, 1]`:
- if `perm_mask[k, i, j] = 0`, i attend to j in batch k;
- if `perm_mask[k, i, j] = 1`, i does not attend to j in batch k.
If not set, each token attends to all the others (full bidirectional attention). Only used during
pretraining (to define factorization order) or for sequential decoding (generation).
target_mapping (`torch.FloatTensor` of shape `(batch_size, num_predict, sequence_length)`, *optional*):
Mask to indicate the output tokens to use. If `target_mapping[k, i, j] = 1`, the i-th predict in batch k is
on the j-th token. Only used during pretraining for partial prediction or for sequential decoding
(generation).
input_mask (`torch.FloatTensor` of shape `batch_size, sequence_length`, *optional*):
Mask to avoid performing attention on padding token indices. Negative of `attention_mask`, i.e. with 0 for
real tokens and 1 for padding which is kept for compatibility with the original code base.
Mask values selected in `[0, 1]`:
- 1 for tokens that are **masked**,
- 0 for tokens that are **not masked**.
You can only uses one of `input_mask` and `attention_mask`.
labels (`torch.LongTensor` of shape `(batch_size, num_predict)`, *optional*):
Labels for masked language modeling. `num_predict` corresponds to `target_mapping.shape[1]`. If
`target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`.
The labels should correspond to the masked input words that should be predicted and depends on
`target_mapping`. Note in order to perform standard auto-regressive language modeling a *<mask>* token has
to be added to the `input_ids` (see the `prepare_inputs_for_generation` function and examples below)
Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored, the loss
is only computed for labels in `[0, ..., config.vocab_size]`
use_mems (`bool`, *optional*):
Whether to use memory states to speed up sequential decoding. If set to `True`, the model will use the hidden
states from previous forward passes to compute attention, which can significantly improve performance for
sequential decoding tasks.
Examples:
```python
>>> from transformers import AutoTokenizer, XLNetLMHeadModel
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("xlnet/xlnet-large-cased")
>>> model = XLNetLMHeadModel.from_pretrained("xlnet/xlnet-large-cased")
>>> # We show how to setup inputs to predict a next token using a bi-directional context.
>>> input_ids = torch.tensor(
... tokenizer.encode("Hello, my dog is very <mask>", add_special_tokens=False)
... ).unsqueeze(
... 0
... ) # We will predict the masked token
>>> perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float)
>>> perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token
>>> target_mapping = torch.zeros(
... (1, 1, input_ids.shape[1]), dtype=torch.float
... ) # Shape [1, 1, seq_length] => let's predict one token
>>> target_mapping[
... 0, 0, -1
... ] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token)
>>> outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping)
>>> next_token_logits = outputs[
... 0
... ] # Output has shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size]
>>> # The same way can the XLNetLMHeadModel be used to be trained by standard auto-regressive language modeling.
>>> input_ids = torch.tensor(
... tokenizer.encode("Hello, my dog is very <mask>", add_special_tokens=False)
... ).unsqueeze(
... 0
... ) # We will predict the masked token
>>> labels = torch.tensor(tokenizer.encode("cute", add_special_tokens=False)).unsqueeze(0)
>>> assert labels.shape[0] == 1, "only one word will be predicted"
>>> perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float)
>>> perm_mask[
... :, :, -1
... ] = 1.0 # Previous tokens don't see last token as is done in standard auto-regressive lm training
>>> target_mapping = torch.zeros(
... (1, 1, input_ids.shape[1]), dtype=torch.float
... ) # Shape [1, 1, seq_length] => let's predict one token
>>> target_mapping[
... 0, 0, -1
... ] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token)
>>> outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping, labels=labels)
>>> loss = outputs.loss
>>> next_token_logits = (
... outputs.logits
... ) # Logits have shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size]
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
inputs_embeds=inputs_embeds,
use_mems=use_mems,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
**kwargs,
)
hidden_states = transformer_outputs[0]
# Only compute necessary logits
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_loss(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, logits.size(-1)), labels.view(-1))
if not return_dict:
output = (logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return XLNetLMHeadModelOutput(
loss=loss,
logits=logits,
mems=transformer_outputs.mems,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@staticmethod
def _reorder_cache(mems: list[torch.Tensor], beam_idx: torch.Tensor) -> list[torch.Tensor]:
"""
This function is used to re-order the `mems` cache if [`~PreTrainedModel.beam_search`] or
[`~PreTrainedModel.beam_sample`] is called. This is required to match `mems` with the correct beam_idx at every
generation step.
"""
return [layer_past.index_select(1, beam_idx.to(layer_past.device)) for layer_past in mems]
@auto_docstring(
custom_intro="""
XLNet Model with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g.
for GLUE tasks.
"""
)
| XLNetLMHeadModel |
python | pytorch__pytorch | torch/_functorch/autograd_function.py | {
"start": 21834,
"end": 22818
} | class ____:
_pt_reserved_attrs: tuple[str, ...] = ("_pt_reserved_attrs", "_pt_inner_ctx")
def __init__(self, ctx):
if not isinstance(ctx, WrappedCtx):
reserved_attrs = type(self)._pt_reserved_attrs
for name in reserved_attrs:
if not hasattr(ctx, name):
continue
raise RuntimeError(
f"PyTorch reserves the {reserved_attrs} field on ctx. "
"Please name your fields on ctx something else to avoid name "
"collision."
)
self._pt_inner_ctx = ctx
def __getattr__(self, name):
return getattr(self._pt_inner_ctx, name)
def __setattr__(self, name, value):
if name in type(self)._pt_reserved_attrs:
self.__dict__[name] = value
return
return setattr(self._pt_inner_ctx, name, value)
# Wraps ctx to create a new ctx object that overrides saved_tensors.
| WrappedCtx |
python | joke2k__faker | faker/providers/isbn/es_ES/__init__.py | {
"start": 42,
"end": 1265
} | class ____(ISBNProvider):
rules = {
"978": {
"84": [
("0000000", "0999999", 2),
("1000000", "1049999", 5),
("1050000", "1199999", 4),
("1200000", "1299999", 6),
("1300000", "1399999", 4),
("1400000", "1499999", 3),
("1500000", "1999999", 5),
("2000000", "6999999", 3),
("7000000", "8499999", 4),
("8500000", "8999999", 5),
("9000000", "9199999", 4),
("9200000", "9239999", 6),
("9240000", "9299999", 5),
("9300000", "9499999", 6),
("9500000", "9699999", 5),
("9700000", "9999999", 4),
],
"13": [
("0000000", "0099999", 2),
("0100000", "5999999", 0),
("6000000", "6049999", 3),
("6050000", "6999999", 0),
("7000000", "7349999", 4),
("7350000", "8749999", 0),
("8750000", "8999999", 5),
("9000000", "9899999", 0),
("9900000", "9999999", 6),
],
},
}
| Provider |
python | pandas-dev__pandas | pandas/tests/test_sorting.py | {
"start": 941,
"end": 6070
} | class ____:
@pytest.mark.slow
def test_int64_overflow(self):
B = np.concatenate((np.arange(1000), np.arange(1000), np.arange(500)))
A = np.arange(2500)
df = DataFrame(
{
"A": A,
"B": B,
"C": A,
"D": B,
"E": A,
"F": B,
"G": A,
"H": B,
"values": np.random.default_rng(2).standard_normal(2500),
}
)
lg = df.groupby(["A", "B", "C", "D", "E", "F", "G", "H"])
rg = df.groupby(["H", "G", "F", "E", "D", "C", "B", "A"])
left = lg.sum()["values"]
right = rg.sum()["values"]
exp_index, _ = left.index.sortlevel()
tm.assert_index_equal(left.index, exp_index)
exp_index, _ = right.index.sortlevel(0)
tm.assert_index_equal(right.index, exp_index)
tups = list(map(tuple, df[["A", "B", "C", "D", "E", "F", "G", "H"]].values))
tups = com.asarray_tuplesafe(tups)
expected = df.groupby(tups).sum()["values"]
for k, v in expected.items():
assert left[k] == right[k[::-1]]
assert left[k] == v
assert len(left) == len(right)
def test_int64_overflow_groupby_large_range(self):
# GH9096
values = range(55109)
data = DataFrame.from_dict({"a": values, "b": values, "c": values, "d": values})
grouped = data.groupby(["a", "b", "c", "d"])
assert len(grouped) == len(values)
@pytest.mark.slow
@pytest.mark.parametrize("agg", ["mean", "median"])
def test_int64_overflow_groupby_large_df_shuffled(self, agg):
rs = np.random.default_rng(2)
arr = rs.integers(-1 << 12, 1 << 12, (1 << 15, 5))
i = rs.choice(len(arr), len(arr) * 4)
arr = np.vstack((arr, arr[i])) # add some duplicate rows
i = rs.permutation(len(arr))
arr = arr[i] # shuffle rows
df = DataFrame(arr, columns=list("abcde"))
df["jim"], df["joe"] = np.zeros((2, len(df)))
gr = df.groupby(list("abcde"))
# verify this is testing what it is supposed to test!
assert is_int64_overflow_possible(
tuple(ping.ngroups for ping in gr._grouper.groupings)
)
mi = MultiIndex.from_arrays(
[ar.ravel() for ar in np.array_split(np.unique(arr, axis=0), 5, axis=1)],
names=list("abcde"),
)
res = DataFrame(
np.zeros((len(mi), 2)), columns=["jim", "joe"], index=mi
).sort_index()
tm.assert_frame_equal(getattr(gr, agg)(), res)
@pytest.mark.parametrize(
"order, na_position, exp",
[
[
True,
"last",
list(range(5, 105)) + list(range(5)) + list(range(105, 110)),
],
[
True,
"first",
list(range(5)) + list(range(105, 110)) + list(range(5, 105)),
],
[
False,
"last",
list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110)),
],
[
False,
"first",
list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1)),
],
],
)
def test_lexsort_indexer(self, order, na_position, exp):
keys = [[np.nan] * 5 + list(range(100)) + [np.nan] * 5]
result = lexsort_indexer(keys, orders=order, na_position=na_position)
tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp))
@pytest.mark.parametrize(
"ascending, na_position, exp",
[
[
True,
"last",
list(range(5, 105)) + list(range(5)) + list(range(105, 110)),
],
[
True,
"first",
list(range(5)) + list(range(105, 110)) + list(range(5, 105)),
],
[
False,
"last",
list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110)),
],
[
False,
"first",
list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1)),
],
],
)
def test_nargsort(self, ascending, na_position, exp):
# list places NaNs last, np.array(..., dtype="O") may not place NaNs first
items = np.array([np.nan] * 5 + list(range(100)) + [np.nan] * 5, dtype="O")
# mergesort is the most difficult to get right because we want it to be
# stable.
# According to numpy/core/tests/test_multiarray, """The number of
# sorted items must be greater than ~50 to check the actual algorithm
# because quick and merge sort fall over to insertion sort for small
# arrays."""
result = nargsort(
items, kind="mergesort", ascending=ascending, na_position=na_position
)
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
| TestSorting |
python | getsentry__sentry | tests/sentry/workflow_engine/endpoints/validators/actions/test_opsgenie.py | {
"start": 321,
"end": 2540
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
teams = [
{
"id": "123-id",
"team": "moo-deng",
},
{
"id": "321-id",
"team": "moo-waan",
},
]
self.integration, self.org_integration = self.create_provider_integration_for(
self.organization,
self.user,
provider="opsgenie",
name="Example Opsgenie",
external_id="example-opsgenie",
metadata={"services": teams},
)
with assume_test_silo_mode(SiloMode.CONTROL):
self.org_integration.config["team_table"] = teams
self.org_integration.save()
self.valid_data = {
"type": Action.Type.OPSGENIE,
"config": {"targetIdentifier": "123-id", "targetType": "specific"},
"data": {},
"integrationId": self.integration.id,
}
def test_validate(self) -> None:
validator = BaseActionValidator(
data=self.valid_data,
context={"organization": self.organization},
)
result = validator.is_valid()
assert result is True
validator.save()
def test_validate__invalid_team(self) -> None:
validator = BaseActionValidator(
data={
**self.valid_data,
"config": {
"targetType": "specific",
"targetIdentifier": "54321",
},
},
context={"organization": self.organization},
)
result = validator.is_valid()
assert result is False
assert validator.errors == {
"all": [
ErrorDetail(
string='The team "None" does not belong to the Example Opsgenie Opsgenie account.',
code="invalid",
)
],
"team": [
ErrorDetail(
string="Select a valid choice. 54321 is not one of the available choices.",
code="invalid",
)
],
}
| TestOpsgenieActionValidator |
python | Lightning-AI__lightning | src/lightning/pytorch/overrides/distributed.py | {
"start": 7273,
"end": 9483
} | class ____(DistributedSampler):
"""A fork of the PyTorch DistributedSampler that doesn't repeat data, instead allowing the number of batches per
process to be off-by-one from each other. This makes this sampler usable for predictions (it's deterministic and
doesn't require shuffling). It is potentially unsafe to use this sampler for training, because during training the
DistributedDataParallel syncs buffers on each forward pass, so it could freeze if one of the processes runs one
fewer batch. During prediction, buffers are only synced on the first batch, so this is safe to use as long as each
process runs at least one batch. We verify this in an assert.
Taken from https://github.com/jpuigcerver/PyLaia/blob/v1.0.0/laia/data/unpadded_distributed_sampler.py and
https://github.com/pytorch/pytorch/issues/25162#issuecomment-634146002
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
if not isinstance(self.dataset, Sized):
raise TypeError("The given dataset must implement the `__len__` method.")
self.num_samples = len(range(self.rank, len(self.dataset), self.num_replicas))
self.total_size = len(self.dataset)
# If any process has at least one batch, every other process needs to
# have at least one batch, or the DistributedDataParallel could lock up.
assert self.num_samples >= 1 or self.total_size == 0
@override
def __iter__(self) -> Iterator[list[int]]:
if not isinstance(self.dataset, Sized):
raise TypeError("The given dataset must implement the `__len__` method.")
if self.shuffle:
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = list(range(len(self.dataset)))
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank : self.total_size : self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
| UnrepeatedDistributedSampler |
python | coleifer__peewee | tests/hybrid.py | {
"start": 623,
"end": 787
} | class ____(TestModel):
first = TextField()
last = TextField()
@hybrid_property
def full_name(self):
return self.first + ' ' + self.last
| Person |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 532989,
"end": 533651
} | class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(
sgqlc.types.list_of("PullRequestChangedFileEdge"), graphql_name="edges"
)
nodes = sgqlc.types.Field(
sgqlc.types.list_of(PullRequestChangedFile), graphql_name="nodes"
)
page_info = sgqlc.types.Field(
sgqlc.types.non_null(PageInfo), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
| PullRequestChangedFileConnection |
python | run-llama__llama_index | llama-index-integrations/storage/kvstore/llama-index-storage-kvstore-tablestore/llama_index/storage/kvstore/tablestore/base.py | {
"start": 248,
"end": 10337
} | class ____(BaseKVStore):
"""
Tablestore Key-Value Store.
Args:
tablestore_client (OTSClient, optional): External tablestore(ots) client.
If this parameter is set, the following endpoint/instance_name/access_key_id/access_key_secret will be ignored.
endpoint (str, optional): Tablestore instance endpoint.
instance_name (str, optional): Tablestore instance name.
access_key_id (str, optional): Aliyun access key id.
access_key_secret (str, optional): Aliyun access key secret.
Returns:
TablestoreKVStore: A Tablestore kv store object.
"""
def __init__(
self,
tablestore_client: Optional[tablestore.OTSClient] = None,
endpoint: Optional[str] = None,
instance_name: Optional[str] = None,
access_key_id: Optional[str] = None,
access_key_secret: Optional[str] = None,
**kwargs: Any,
) -> None:
super().__init__()
if not tablestore_client:
self._tablestore_client = tablestore.OTSClient(
endpoint,
access_key_id,
access_key_secret,
instance_name,
retry_policy=tablestore.WriteRetryPolicy(),
**kwargs, # pass additional arguments
)
else:
self._tablestore_client = tablestore_client
self._update_collection()
@staticmethod
def _flatten_dict_to_json_strings(original_dict) -> dict:
result_dict = {}
for key, value in original_dict.items():
if isinstance(
value, (bool, bytearray, float, int, six.binary_type, six.text_type)
):
result_dict[key] = value
else:
result_dict[key] = json.dumps(value, ensure_ascii=False)
return result_dict
def _update_collection(self) -> List[str]:
"""Update collection."""
self._collections = self._tablestore_client.list_table()
return self._collections
def _create_collection_if_not_exist(self, collection: str) -> None:
"""Create table if not exist."""
if collection in self._collections:
return
table_list = self._tablestore_client.list_table()
if collection in table_list:
logger.info(f"Tablestore kv store table[{collection}] already exists")
return
logger.info(
f"Tablestore kv store table[{collection}] does not exist, try to create the table.",
)
table_meta = tablestore.TableMeta(collection, [("pk", "STRING")])
reserved_throughput = tablestore.ReservedThroughput(
tablestore.CapacityUnit(0, 0)
)
self._tablestore_client.create_table(
table_meta, tablestore.TableOptions(), reserved_throughput
)
self._update_collection()
sleep(5)
logger.info(f"Tablestore create kv store table[{collection}] successfully.")
def put(self, key: str, val: dict, collection: str = DEFAULT_COLLECTION) -> None:
"""
Put a key-value pair into the store.
Args:
key (str): key
val (dict): value
collection (str): collection name
"""
val = self._flatten_dict_to_json_strings(val)
self._create_collection_if_not_exist(collection)
primary_key = [("pk", key)]
attribute_columns = list(val.items())
row = tablestore.Row(primary_key, attribute_columns)
self._tablestore_client.put_row(collection, row)
async def aput(
self, key: str, val: dict, collection: str = DEFAULT_COLLECTION
) -> None:
"""
Put a key-value pair into the store.
Args:
key (str): key
val (dict): value
collection (str): collection name
"""
raise NotImplementedError
def get(self, key: str, collection: str = DEFAULT_COLLECTION) -> Optional[dict]:
"""
Get a value from the store.
Args:
key (str): key
collection (str): collection name
"""
self._create_collection_if_not_exist(collection)
primary_key = [("pk", key)]
try:
_, row, _ = self._tablestore_client.get_row(
collection, primary_key, None, None, 1
)
if row is None:
return None
return self._parse_row(row)
except tablestore.OTSServiceError as e:
logger.error(
f"get row failed, http_status:{e.get_http_status()}, error_code:{e.get_error_code()}, error_message:{e.get_error_message()}, request_id:{e.get_request_id()}"
)
if (
e.get_error_code() == "OTSParameterInvalid"
and "table not exist" in e.get_error_message()
):
return None
async def aget(
self, key: str, collection: str = DEFAULT_COLLECTION
) -> Optional[dict]:
"""
Get a value from the store.
Args:
key (str): key
collection (str): collection name
"""
raise NotImplementedError
def get_all(self, collection: str = DEFAULT_COLLECTION) -> Dict[str, dict]:
"""
Get all values from the store.
Args:
collection (str): collection name
"""
self._create_collection_if_not_exist(collection)
inclusive_start_primary_key = [("pk", tablestore.INF_MIN)]
exclusive_end_primary_key = [("pk", tablestore.INF_MAX)]
limit = 5000
columns_to_get = []
(
consumed,
next_start_primary_key,
row_list,
next_token,
) = self._tablestore_client.get_range(
collection,
tablestore.Direction.FORWARD,
inclusive_start_primary_key,
exclusive_end_primary_key,
columns_to_get,
limit,
max_version=1,
)
ret_dict = {}
self._parse_rows(ret_dict, row_list)
while next_start_primary_key is not None:
inclusive_start_primary_key = next_start_primary_key
(
consumed,
next_start_primary_key,
row_list,
next_token,
) = self._tablestore_client.get_range(
collection,
tablestore.Direction.FORWARD,
inclusive_start_primary_key,
exclusive_end_primary_key,
columns_to_get,
limit,
max_version=1,
)
self._parse_rows(ret_dict, row_list)
return ret_dict
def _parse_rows(self, return_result: dict, row_list: Optional[list]) -> None:
if row_list:
for row in row_list:
ret = self._parse_row(row)
return_result[row.primary_key[0][1]] = ret
def _delete_rows(self, row_list: Optional[list], collection: str) -> None:
if row_list:
for row in row_list:
key = row.primary_key[0][1]
self.delete(key=key, collection=collection)
@staticmethod
def _parse_row(row: Any) -> dict[str, Any]:
ret = {}
for col in row.attribute_columns:
k = col[0]
v = col[1]
if isinstance(v, str):
try:
ret[k] = json.loads(v)
if not (isinstance(ret[k], (dict, list, tuple))):
ret[k] = v
except json.JSONDecodeError:
ret[k] = v
else:
ret[k] = v
return ret
async def aget_all(self, collection: str = DEFAULT_COLLECTION) -> Dict[str, dict]:
"""
Get all values from the store.
Args:
collection (str): collection name
"""
raise NotImplementedError
def delete(self, key: str, collection: str = DEFAULT_COLLECTION) -> bool:
"""
Delete a value from the store.
Args:
key (str): key
collection (str): collection name
"""
primary_key = [("pk", key)]
_, return_row = self._tablestore_client.delete_row(
collection, primary_key, None
)
return True
async def adelete(self, key: str, collection: str = DEFAULT_COLLECTION) -> bool:
"""
Delete a value from the store.
Args:
key (str): key
collection (str): collection name
"""
raise NotImplementedError
# noinspection DuplicatedCode
def delete_all(self, collection: str = DEFAULT_COLLECTION) -> None:
self._create_collection_if_not_exist(collection)
inclusive_start_primary_key = [("pk", tablestore.INF_MIN)]
exclusive_end_primary_key = [("pk", tablestore.INF_MAX)]
limit = 5000
columns_to_get = []
(
consumed,
next_start_primary_key,
row_list,
next_token,
) = self._tablestore_client.get_range(
collection,
tablestore.Direction.FORWARD,
inclusive_start_primary_key,
exclusive_end_primary_key,
columns_to_get,
limit,
max_version=1,
)
ret_dict = {}
self._delete_rows(row_list, collection)
while next_start_primary_key is not None:
inclusive_start_primary_key = next_start_primary_key
(
consumed,
next_start_primary_key,
row_list,
next_token,
) = self._tablestore_client.get_range(
collection,
tablestore.Direction.FORWARD,
inclusive_start_primary_key,
exclusive_end_primary_key,
columns_to_get,
limit,
max_version=1,
)
self._delete_rows(row_list, collection)
| TablestoreKVStore |
python | readthedocs__readthedocs.org | readthedocs/audit/serializers.py | {
"start": 505,
"end": 631
} | class ____(serializers.ModelSerializer):
class Meta:
model = User
fields = ["id", "username"]
| UserSerializer |
python | realpython__materials | asterioids-pygame-project/source_code_final/space_rocks/game.py | {
"start": 118,
"end": 3423
} | class ____:
MIN_ASTEROID_DISTANCE = 250
def __init__(self):
self._init_pygame()
self.screen = pygame.display.set_mode((800, 600))
self.background = load_sprite("space", False)
self.clock = pygame.time.Clock()
self.font = pygame.font.Font(None, 64)
self.message = ""
self.asteroids = []
self.bullets = []
self.spaceship = Spaceship((400, 300), self.bullets.append)
for _ in range(6):
while True:
position = get_random_position(self.screen)
if (
position.distance_to(self.spaceship.position)
> self.MIN_ASTEROID_DISTANCE
):
break
self.asteroids.append(Asteroid(position, self.asteroids.append))
def main_loop(self):
while True:
self._handle_input()
self._process_game_logic()
self._draw()
def _init_pygame(self):
pygame.init()
pygame.display.set_caption("Space Rocks")
def _handle_input(self):
for event in pygame.event.get():
if event.type == pygame.QUIT or (
event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE
):
quit()
elif (
self.spaceship
and event.type == pygame.KEYDOWN
and event.key == pygame.K_SPACE
):
self.spaceship.shoot()
is_key_pressed = pygame.key.get_pressed()
if self.spaceship:
if is_key_pressed[pygame.K_RIGHT]:
self.spaceship.rotate(clockwise=True)
elif is_key_pressed[pygame.K_LEFT]:
self.spaceship.rotate(clockwise=False)
if is_key_pressed[pygame.K_UP]:
self.spaceship.accelerate()
def _process_game_logic(self):
for game_object in self._get_game_objects():
game_object.move(self.screen)
if self.spaceship:
for asteroid in self.asteroids:
if asteroid.collides_with(self.spaceship):
self.spaceship = None
self.message = "You lost!"
break
for bullet in self.bullets[:]:
for asteroid in self.asteroids[:]:
if asteroid.collides_with(bullet):
self.asteroids.remove(asteroid)
self.bullets.remove(bullet)
asteroid.split()
break
for bullet in self.bullets[:]:
if not self.screen.get_rect().collidepoint(bullet.position):
self.bullets.remove(bullet)
if not self.asteroids and self.spaceship:
self.message = "You won!"
def _draw(self):
self.screen.blit(self.background, (0, 0))
for game_object in self._get_game_objects():
game_object.draw(self.screen)
if self.message:
print_text(self.screen, self.message, self.font)
pygame.display.flip()
self.clock.tick(60)
def _get_game_objects(self):
game_objects = [*self.asteroids, *self.bullets]
if self.spaceship:
game_objects.append(self.spaceship)
return game_objects
| SpaceRocks |
python | doocs__leetcode | solution/0900-0999/0987.Vertical Order Traversal of a Binary Tree/Solution.py | {
"start": 192,
"end": 791
} | class ____:
def verticalTraversal(self, root: Optional[TreeNode]) -> List[List[int]]:
def dfs(root: Optional[TreeNode], i: int, j: int):
if root is None:
return
nodes.append((j, i, root.val))
dfs(root.left, i + 1, j - 1)
dfs(root.right, i + 1, j + 1)
nodes = []
dfs(root, 0, 0)
nodes.sort()
ans = []
prev = -2000
for j, _, val in nodes:
if prev != j:
ans.append([])
prev = j
ans[-1].append(val)
return ans
| Solution |
python | pytorch__pytorch | torch/_export/serde/schema.py | {
"start": 8507,
"end": 8587
} | class ____:
arg: Annotated[TokenArgument, 10]
@_union_dataclass
| InputTokenSpec |
python | python-openxml__python-docx | tests/test_enum.py | {
"start": 326,
"end": 703
} | class ____(BaseXmlEnum):
"""SomeXmlAttr docstring."""
FOO = (1, "foo", "Do foo instead of bar.")
"""Do foo instead of bar."""
BAR = (2, "bar", "Do bar instead of foo.")
"""Do bar instead of foo."""
BAZ = (3, None, "Maps to the value assumed when the attribute is omitted.")
"""Maps to the value assumed when the attribute is omitted."""
| SomeXmlAttr |
python | walkccc__LeetCode | solutions/2166. Design Bitset/2166.py | {
"start": 0,
"end": 716
} | class ____:
def __init__(self, size: int):
self.s = ['0'] * size # the original
self.r = ['1'] * size # the reversed
self.cnt = 0
def fix(self, idx: int) -> None:
if self.s[idx] == '0':
self.cnt += 1
self.s[idx] = '1'
self.r[idx] = '0'
def unfix(self, idx: int) -> None:
if self.s[idx] == '1':
self.cnt -= 1
self.s[idx] = '0'
self.r[idx] = '1'
def flip(self) -> None:
self.s, self.r = self.r, self.s
self.cnt = len(self.s) - self.cnt
def all(self) -> bool:
return self.cnt == len(self.s)
def one(self) -> bool:
return self.cnt
def count(self) -> int:
return self.cnt
def toString(self) -> str:
return ''.join(self.s)
| Bitset |
python | pytorch__pytorch | torch/testing/_internal/common_dist_composable.py | {
"start": 2313,
"end": 3577
} | class ____(nn.Module):
def __init__(self, device: torch.device) -> None:
super().__init__()
# This nested structure exercises traversal order to catch differences
# between valid traversals (e.g. BFS and DFS variations).
self.seq1 = nn.Sequential(
nn.Linear(1, 1, device=device),
FakeSequential(
nn.Linear(1, 1, device=device),
nn.ReLU(),
FakeSequential(
nn.Linear(1, 1, device=device),
),
nn.ReLU(),
),
nn.Linear(1, 2, device=device),
)
self.lin = nn.Linear(2, 2, device=device)
self.seq2 = nn.Sequential(
nn.ReLU(),
nn.Linear(2, 3, device=device),
FakeSequential(
nn.Linear(3, 2, bias=False, device=device),
nn.Linear(2, 4, bias=False, device=device),
),
)
# FIXME(rec): forward() is not a method, it's a local function inside __init__
# that is never used. It should probabkly be outdented by four spaces, or removed.
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.seq2(self.lin(self.seq1(x)))
| NestedSequentialModel |
python | optuna__optuna | optuna/study/_study_direction.py | {
"start": 14,
"end": 421
} | class ____(enum.IntEnum):
"""Direction of a :class:`~optuna.study.Study`.
Attributes:
NOT_SET:
Direction has not been set.
MINIMIZE:
:class:`~optuna.study.Study` minimizes the objective function.
MAXIMIZE:
:class:`~optuna.study.Study` maximizes the objective function.
"""
NOT_SET = 0
MINIMIZE = 1
MAXIMIZE = 2
| StudyDirection |
python | django__django | tests/migrations/test_autodetector.py | {
"start": 8372,
"end": 204279
} | class ____(BaseAutodetectorTests):
"""
Tests the migration autodetector.
"""
author_empty = ModelState(
"testapp", "Author", [("id", models.AutoField(primary_key=True))]
)
author_name = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
],
)
author_name_null = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, null=True)),
],
)
author_name_longer = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=400)),
],
)
author_name_renamed = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("names", models.CharField(max_length=200)),
],
)
author_name_default = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default="Ada Lovelace")),
],
)
author_name_db_default = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, db_default="Ada Lovelace")),
],
)
author_name_check_constraint = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
],
{
"constraints": [
models.CheckConstraint(
condition=models.Q(name__contains="Bob"), name="name_contains_bob"
)
]
},
)
author_dates_of_birth_auto_now = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("date_of_birth", models.DateField(auto_now=True)),
("date_time_of_birth", models.DateTimeField(auto_now=True)),
("time_of_birth", models.TimeField(auto_now=True)),
],
)
author_dates_of_birth_auto_now_add = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("date_of_birth", models.DateField(auto_now_add=True)),
("date_time_of_birth", models.DateTimeField(auto_now_add=True)),
("time_of_birth", models.TimeField(auto_now_add=True)),
],
)
author_name_deconstructible_1 = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=DeconstructibleObject())),
],
)
author_name_deconstructible_2 = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=DeconstructibleObject())),
],
)
author_name_deconstructible_3 = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=models.IntegerField())),
],
)
author_name_deconstructible_4 = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=models.IntegerField())),
],
)
author_name_deconstructible_list_1 = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"name",
models.CharField(
max_length=200, default=[DeconstructibleObject(), 123]
),
),
],
)
author_name_deconstructible_list_2 = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"name",
models.CharField(
max_length=200, default=[DeconstructibleObject(), 123]
),
),
],
)
author_name_deconstructible_list_3 = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"name",
models.CharField(
max_length=200, default=[DeconstructibleObject(), 999]
),
),
],
)
author_name_deconstructible_tuple_1 = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"name",
models.CharField(
max_length=200, default=(DeconstructibleObject(), 123)
),
),
],
)
author_name_deconstructible_tuple_2 = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"name",
models.CharField(
max_length=200, default=(DeconstructibleObject(), 123)
),
),
],
)
author_name_deconstructible_tuple_3 = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"name",
models.CharField(
max_length=200, default=(DeconstructibleObject(), 999)
),
),
],
)
author_name_deconstructible_dict_1 = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"name",
models.CharField(
max_length=200,
default={"item": DeconstructibleObject(), "otheritem": 123},
),
),
],
)
author_name_deconstructible_dict_2 = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"name",
models.CharField(
max_length=200,
default={"item": DeconstructibleObject(), "otheritem": 123},
),
),
],
)
author_name_deconstructible_dict_3 = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"name",
models.CharField(
max_length=200,
default={"item": DeconstructibleObject(), "otheritem": 999},
),
),
],
)
author_name_nested_deconstructible_1 = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"name",
models.CharField(
max_length=200,
default=DeconstructibleObject(
DeconstructibleObject(1),
(
DeconstructibleObject("t1"),
DeconstructibleObject("t2"),
),
a=DeconstructibleObject("A"),
b=DeconstructibleObject(B=DeconstructibleObject("c")),
),
),
),
],
)
author_name_nested_deconstructible_2 = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"name",
models.CharField(
max_length=200,
default=DeconstructibleObject(
DeconstructibleObject(1),
(
DeconstructibleObject("t1"),
DeconstructibleObject("t2"),
),
a=DeconstructibleObject("A"),
b=DeconstructibleObject(B=DeconstructibleObject("c")),
),
),
),
],
)
author_name_nested_deconstructible_changed_arg = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"name",
models.CharField(
max_length=200,
default=DeconstructibleObject(
DeconstructibleObject(1),
(
DeconstructibleObject("t1"),
DeconstructibleObject("t2-changed"),
),
a=DeconstructibleObject("A"),
b=DeconstructibleObject(B=DeconstructibleObject("c")),
),
),
),
],
)
author_name_nested_deconstructible_extra_arg = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"name",
models.CharField(
max_length=200,
default=DeconstructibleObject(
DeconstructibleObject(1),
(
DeconstructibleObject("t1"),
DeconstructibleObject("t2"),
),
None,
a=DeconstructibleObject("A"),
b=DeconstructibleObject(B=DeconstructibleObject("c")),
),
),
),
],
)
author_name_nested_deconstructible_changed_kwarg = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"name",
models.CharField(
max_length=200,
default=DeconstructibleObject(
DeconstructibleObject(1),
(
DeconstructibleObject("t1"),
DeconstructibleObject("t2"),
),
a=DeconstructibleObject("A"),
b=DeconstructibleObject(B=DeconstructibleObject("c-changed")),
),
),
),
],
)
author_name_nested_deconstructible_extra_kwarg = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"name",
models.CharField(
max_length=200,
default=DeconstructibleObject(
DeconstructibleObject(1),
(
DeconstructibleObject("t1"),
DeconstructibleObject("t2"),
),
a=DeconstructibleObject("A"),
b=DeconstructibleObject(B=DeconstructibleObject("c")),
c=None,
),
),
),
],
)
author_custom_pk = ModelState(
"testapp", "Author", [("pk_field", models.IntegerField(primary_key=True))]
)
author_with_biography_non_blank = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField()),
("biography", models.TextField()),
],
)
author_with_biography_blank = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(blank=True)),
("biography", models.TextField(blank=True)),
],
)
author_with_book = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("book", models.ForeignKey("otherapp.Book", models.CASCADE)),
],
)
author_with_book_order_wrt = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("book", models.ForeignKey("otherapp.Book", models.CASCADE)),
],
options={"order_with_respect_to": "book"},
)
author_renamed_with_book = ModelState(
"testapp",
"Writer",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("book", models.ForeignKey("otherapp.Book", models.CASCADE)),
],
)
author_with_publisher_string = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("publisher_name", models.CharField(max_length=200)),
],
)
author_with_publisher = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("publisher", models.ForeignKey("testapp.Publisher", models.CASCADE)),
],
)
author_with_user = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("user", models.ForeignKey("auth.User", models.CASCADE)),
],
)
author_with_custom_user = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("user", models.ForeignKey("thirdapp.CustomUser", models.CASCADE)),
],
)
author_proxy = ModelState(
"testapp", "AuthorProxy", [], {"proxy": True}, ("testapp.author",)
)
author_proxy_options = ModelState(
"testapp",
"AuthorProxy",
[],
{
"proxy": True,
"verbose_name": "Super Author",
},
("testapp.author",),
)
author_proxy_notproxy = ModelState(
"testapp", "AuthorProxy", [], {}, ("testapp.author",)
)
author_proxy_third = ModelState(
"thirdapp", "AuthorProxy", [], {"proxy": True}, ("testapp.author",)
)
author_proxy_third_notproxy = ModelState(
"thirdapp", "AuthorProxy", [], {}, ("testapp.author",)
)
author_proxy_proxy = ModelState(
"testapp", "AAuthorProxyProxy", [], {"proxy": True}, ("testapp.authorproxy",)
)
author_unmanaged = ModelState(
"testapp", "AuthorUnmanaged", [], {"managed": False}, ("testapp.author",)
)
author_unmanaged_managed = ModelState(
"testapp", "AuthorUnmanaged", [], {}, ("testapp.author",)
)
author_unmanaged_default_pk = ModelState(
"testapp", "Author", [("id", models.AutoField(primary_key=True))]
)
author_unmanaged_custom_pk = ModelState(
"testapp",
"Author",
[
("pk_field", models.IntegerField(primary_key=True)),
],
)
author_with_m2m = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("publishers", models.ManyToManyField("testapp.Publisher")),
],
)
author_with_m2m_blank = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("publishers", models.ManyToManyField("testapp.Publisher", blank=True)),
],
)
author_with_m2m_through = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"publishers",
models.ManyToManyField("testapp.Publisher", through="testapp.Contract"),
),
],
)
author_with_renamed_m2m_through = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"publishers",
models.ManyToManyField("testapp.Publisher", through="testapp.Deal"),
),
],
)
author_with_former_m2m = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("publishers", models.CharField(max_length=100)),
],
)
author_with_options = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
],
{
"permissions": [("can_hire", "Can hire")],
"verbose_name": "Authi",
},
)
author_with_db_table_comment = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
],
{"db_table_comment": "Table comment"},
)
author_with_db_table_options = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
],
{"db_table": "author_one"},
)
author_with_new_db_table_options = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
],
{"db_table": "author_two"},
)
author_renamed_with_db_table_options = ModelState(
"testapp",
"NewAuthor",
[
("id", models.AutoField(primary_key=True)),
],
{"db_table": "author_one"},
)
author_renamed_with_new_db_table_options = ModelState(
"testapp",
"NewAuthor",
[
("id", models.AutoField(primary_key=True)),
],
{"db_table": "author_three"},
)
contract = ModelState(
"testapp",
"Contract",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("publisher", models.ForeignKey("testapp.Publisher", models.CASCADE)),
],
)
contract_renamed = ModelState(
"testapp",
"Deal",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("publisher", models.ForeignKey("testapp.Publisher", models.CASCADE)),
],
)
publisher = ModelState(
"testapp",
"Publisher",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=100)),
],
)
publisher_with_author = ModelState(
"testapp",
"Publisher",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("name", models.CharField(max_length=100)),
],
)
publisher_with_aardvark_author = ModelState(
"testapp",
"Publisher",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Aardvark", models.CASCADE)),
("name", models.CharField(max_length=100)),
],
)
publisher_with_book = ModelState(
"testapp",
"Publisher",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("otherapp.Book", models.CASCADE)),
("name", models.CharField(max_length=100)),
],
)
other_pony = ModelState(
"otherapp",
"Pony",
[
("id", models.AutoField(primary_key=True)),
],
)
other_pony_food = ModelState(
"otherapp",
"Pony",
[
("id", models.AutoField(primary_key=True)),
],
managers=[
("food_qs", FoodQuerySet.as_manager()),
("food_mgr", FoodManager("a", "b")),
("food_mgr_kwargs", FoodManager("x", "y", 3, 4)),
],
)
other_stable = ModelState(
"otherapp", "Stable", [("id", models.AutoField(primary_key=True))]
)
third_thing = ModelState(
"thirdapp", "Thing", [("id", models.AutoField(primary_key=True))]
)
book = ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
],
)
book_proxy_fk = ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("thirdapp.AuthorProxy", models.CASCADE)),
("title", models.CharField(max_length=200)),
],
)
book_proxy_proxy_fk = ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.AAuthorProxyProxy", models.CASCADE)),
],
)
book_migrations_fk = ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("migrations.UnmigratedModel", models.CASCADE)),
("title", models.CharField(max_length=200)),
],
)
book_with_no_author_fk = ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("author", models.IntegerField()),
("title", models.CharField(max_length=200)),
],
)
book_with_no_author = ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("title", models.CharField(max_length=200)),
],
)
book_with_author_renamed = ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Writer", models.CASCADE)),
("title", models.CharField(max_length=200)),
],
)
book_with_field_and_author_renamed = ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("writer", models.ForeignKey("testapp.Writer", models.CASCADE)),
("title", models.CharField(max_length=200)),
],
)
book_with_multiple_authors = ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("authors", models.ManyToManyField("testapp.Author")),
("title", models.CharField(max_length=200)),
],
)
book_with_multiple_authors_through_attribution = ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
(
"authors",
models.ManyToManyField(
"testapp.Author", through="otherapp.Attribution"
),
),
("title", models.CharField(max_length=200)),
],
)
book_indexes = ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
],
{
"indexes": [
models.Index(fields=["author", "title"], name="book_title_author_idx")
],
},
)
book_unordered_indexes = ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
],
{
"indexes": [
models.Index(fields=["title", "author"], name="book_author_title_idx")
],
},
)
book_unique_together = ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
],
{
"unique_together": {("author", "title")},
},
)
book_unique_together_2 = ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
],
{
"unique_together": {("title", "author")},
},
)
book_unique_together_3 = ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("newfield", models.IntegerField()),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
],
{
"unique_together": {("title", "newfield")},
},
)
book_unique_together_4 = ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("newfield2", models.IntegerField()),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
],
{
"unique_together": {("title", "newfield2")},
},
)
attribution = ModelState(
"otherapp",
"Attribution",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("book", models.ForeignKey("otherapp.Book", models.CASCADE)),
],
)
edition = ModelState(
"thirdapp",
"Edition",
[
("id", models.AutoField(primary_key=True)),
("book", models.ForeignKey("otherapp.Book", models.CASCADE)),
],
)
custom_user = ModelState(
"thirdapp",
"CustomUser",
[
("id", models.AutoField(primary_key=True)),
("username", models.CharField(max_length=255)),
],
bases=(AbstractBaseUser,),
)
custom_user_no_inherit = ModelState(
"thirdapp",
"CustomUser",
[
("id", models.AutoField(primary_key=True)),
("username", models.CharField(max_length=255)),
],
)
aardvark = ModelState(
"thirdapp", "Aardvark", [("id", models.AutoField(primary_key=True))]
)
aardvark_testapp = ModelState(
"testapp", "Aardvark", [("id", models.AutoField(primary_key=True))]
)
aardvark_based_on_author = ModelState(
"testapp", "Aardvark", [], bases=("testapp.Author",)
)
aardvark_pk_fk_author = ModelState(
"testapp",
"Aardvark",
[
(
"id",
models.OneToOneField(
"testapp.Author", models.CASCADE, primary_key=True
),
),
],
)
knight = ModelState("eggs", "Knight", [("id", models.AutoField(primary_key=True))])
rabbit = ModelState(
"eggs",
"Rabbit",
[
("id", models.AutoField(primary_key=True)),
("knight", models.ForeignKey("eggs.Knight", models.CASCADE)),
("parent", models.ForeignKey("eggs.Rabbit", models.CASCADE)),
],
{
"unique_together": {("parent", "knight")},
"indexes": [
models.Index(
fields=["parent", "knight"], name="rabbit_circular_fk_index"
)
],
},
)
def test_arrange_for_graph(self):
"""Tests auto-naming of migrations for graph matching."""
# Make a fake graph
graph = MigrationGraph()
graph.add_node(("testapp", "0001_initial"), None)
graph.add_node(("testapp", "0002_foobar"), None)
graph.add_node(("otherapp", "0001_initial"), None)
graph.add_dependency(
"testapp.0002_foobar",
("testapp", "0002_foobar"),
("testapp", "0001_initial"),
)
graph.add_dependency(
"testapp.0002_foobar",
("testapp", "0002_foobar"),
("otherapp", "0001_initial"),
)
# Use project state to make a new migration change set
before = self.make_project_state([self.publisher, self.other_pony])
after = self.make_project_state(
[
self.author_empty,
self.publisher,
self.other_pony,
self.other_stable,
]
)
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Run through arrange_for_graph
changes = autodetector.arrange_for_graph(changes, graph)
# Make sure there's a new name, deps match, etc.
self.assertEqual(changes["testapp"][0].name, "0003_author")
self.assertEqual(
changes["testapp"][0].dependencies, [("testapp", "0002_foobar")]
)
self.assertEqual(changes["otherapp"][0].name, "0002_stable")
self.assertEqual(
changes["otherapp"][0].dependencies, [("otherapp", "0001_initial")]
)
def test_arrange_for_graph_with_multiple_initial(self):
# Make a fake graph.
graph = MigrationGraph()
# Use project state to make a new migration change set.
before = self.make_project_state([])
after = self.make_project_state(
[self.author_with_book, self.book, self.attribution]
)
autodetector = MigrationAutodetector(
before, after, MigrationQuestioner({"ask_initial": True})
)
changes = autodetector._detect_changes()
changes = autodetector.arrange_for_graph(changes, graph)
self.assertEqual(changes["otherapp"][0].name, "0001_initial")
self.assertEqual(changes["otherapp"][0].dependencies, [])
self.assertEqual(changes["otherapp"][1].name, "0002_initial")
self.assertCountEqual(
changes["otherapp"][1].dependencies,
[("testapp", "0001_initial"), ("otherapp", "0001_initial")],
)
self.assertEqual(changes["testapp"][0].name, "0001_initial")
self.assertEqual(
changes["testapp"][0].dependencies, [("otherapp", "0001_initial")]
)
def test_trim_apps(self):
"""
Trim does not remove dependencies but does remove unwanted apps.
"""
# Use project state to make a new migration change set
before = self.make_project_state([])
after = self.make_project_state(
[self.author_empty, self.other_pony, self.other_stable, self.third_thing]
)
autodetector = MigrationAutodetector(
before, after, MigrationQuestioner({"ask_initial": True})
)
changes = autodetector._detect_changes()
# Run through arrange_for_graph
graph = MigrationGraph()
changes = autodetector.arrange_for_graph(changes, graph)
changes["testapp"][0].dependencies.append(("otherapp", "0001_initial"))
changes = autodetector._trim_to_apps(changes, {"testapp"})
# Make sure there's the right set of migrations
self.assertEqual(changes["testapp"][0].name, "0001_initial")
self.assertEqual(changes["otherapp"][0].name, "0001_initial")
self.assertNotIn("thirdapp", changes)
def test_custom_migration_name(self):
"""Tests custom naming of migrations for graph matching."""
# Make a fake graph
graph = MigrationGraph()
graph.add_node(("testapp", "0001_initial"), None)
graph.add_node(("testapp", "0002_foobar"), None)
graph.add_node(("otherapp", "0001_initial"), None)
graph.add_dependency(
"testapp.0002_foobar",
("testapp", "0002_foobar"),
("testapp", "0001_initial"),
)
# Use project state to make a new migration change set
before = self.make_project_state([])
after = self.make_project_state(
[self.author_empty, self.other_pony, self.other_stable]
)
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Run through arrange_for_graph
migration_name = "custom_name"
changes = autodetector.arrange_for_graph(changes, graph, migration_name)
# Make sure there's a new name, deps match, etc.
self.assertEqual(changes["testapp"][0].name, "0003_%s" % migration_name)
self.assertEqual(
changes["testapp"][0].dependencies, [("testapp", "0002_foobar")]
)
self.assertEqual(changes["otherapp"][0].name, "0002_%s" % migration_name)
self.assertEqual(
changes["otherapp"][0].dependencies, [("otherapp", "0001_initial")]
)
def test_new_model(self):
"""Tests autodetection of new models."""
changes = self.get_changes([], [self.other_pony_food])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["CreateModel"])
self.assertOperationAttributes(changes, "otherapp", 0, 0, name="Pony")
self.assertEqual(
[name for name, mgr in changes["otherapp"][0].operations[0].managers],
["food_qs", "food_mgr", "food_mgr_kwargs"],
)
def test_old_model(self):
"""Tests deletion of old models."""
changes = self.get_changes([self.author_empty], [])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["DeleteModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author")
def test_add_field(self):
"""Tests autodetection of new fields."""
changes = self.get_changes([self.author_empty], [self.author_name])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AddField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="name")
@mock.patch(
"django.db.migrations.questioner.MigrationQuestioner.ask_not_null_addition",
side_effect=AssertionError("Should not have prompted for not null addition"),
)
def test_add_not_null_field_with_db_default(self, mocked_ask_method):
changes = self.get_changes([self.author_empty], [self.author_name_db_default])
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AddField"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, name="name", preserve_default=True
)
self.assertOperationFieldAttributes(
changes, "testapp", 0, 0, db_default="Ada Lovelace"
)
@mock.patch(
"django.db.migrations.questioner.MigrationQuestioner.ask_not_null_addition",
side_effect=AssertionError("Should not have prompted for not null addition"),
)
def test_add_date_fields_with_auto_now_not_asking_for_default(
self, mocked_ask_method
):
changes = self.get_changes(
[self.author_empty], [self.author_dates_of_birth_auto_now]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes, "testapp", 0, ["AddField", "AddField", "AddField"]
)
self.assertOperationFieldAttributes(changes, "testapp", 0, 0, auto_now=True)
self.assertOperationFieldAttributes(changes, "testapp", 0, 1, auto_now=True)
self.assertOperationFieldAttributes(changes, "testapp", 0, 2, auto_now=True)
@mock.patch(
"django.db.migrations.questioner.MigrationQuestioner.ask_not_null_addition",
side_effect=AssertionError("Should not have prompted for not null addition"),
)
def test_add_date_fields_with_auto_now_add_not_asking_for_null_addition(
self, mocked_ask_method
):
changes = self.get_changes(
[self.author_empty], [self.author_dates_of_birth_auto_now_add]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes, "testapp", 0, ["AddField", "AddField", "AddField"]
)
self.assertOperationFieldAttributes(changes, "testapp", 0, 0, auto_now_add=True)
self.assertOperationFieldAttributes(changes, "testapp", 0, 1, auto_now_add=True)
self.assertOperationFieldAttributes(changes, "testapp", 0, 2, auto_now_add=True)
@mock.patch(
"django.db.migrations.questioner.MigrationQuestioner.ask_auto_now_add_addition"
)
def test_add_date_fields_with_auto_now_add_asking_for_default(
self, mocked_ask_method
):
changes = self.get_changes(
[self.author_empty], [self.author_dates_of_birth_auto_now_add]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes, "testapp", 0, ["AddField", "AddField", "AddField"]
)
self.assertOperationFieldAttributes(changes, "testapp", 0, 0, auto_now_add=True)
self.assertOperationFieldAttributes(changes, "testapp", 0, 1, auto_now_add=True)
self.assertOperationFieldAttributes(changes, "testapp", 0, 2, auto_now_add=True)
self.assertEqual(mocked_ask_method.call_count, 3)
def test_add_field_before_generated_field(self):
initial_state = ModelState(
"testapp",
"Author",
[
("name", models.CharField(max_length=20)),
],
)
updated_state = ModelState(
"testapp",
"Author",
[
("name", models.CharField(max_length=20)),
("surname", models.CharField(max_length=20)),
(
"lower_full_name",
models.GeneratedField(
expression=Concat(Lower("name"), Lower("surname")),
output_field=models.CharField(max_length=30),
db_persist=True,
),
),
],
)
changes = self.get_changes([initial_state], [updated_state])
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AddField", "AddField"])
self.assertOperationFieldAttributes(
changes, "testapp", 0, 1, expression=Concat(Lower("name"), Lower("surname"))
)
def test_add_fk_before_generated_field(self):
initial_state = ModelState(
"testapp",
"Author",
[
("name", models.CharField(max_length=20)),
],
)
updated_state = [
ModelState(
"testapp",
"Publisher",
[
("name", models.CharField(max_length=20)),
],
),
ModelState(
"testapp",
"Author",
[
("name", models.CharField(max_length=20)),
(
"publisher",
models.ForeignKey("testapp.Publisher", models.CASCADE),
),
(
"lower_full_name",
models.GeneratedField(
expression=Concat("name", "publisher_id"),
output_field=models.CharField(max_length=20),
db_persist=True,
),
),
],
),
]
changes = self.get_changes([initial_state], updated_state)
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes, "testapp", 0, ["CreateModel", "AddField", "AddField"]
)
self.assertOperationFieldAttributes(
changes, "testapp", 0, 2, expression=Concat("name", "publisher_id")
)
def test_remove_field(self):
"""Tests autodetection of removed fields."""
changes = self.get_changes([self.author_name], [self.author_empty])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["RemoveField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="name")
def test_remove_generated_field_before_its_base_field(self):
initial_state = [
ModelState(
"testapp",
"Author",
[
("name", models.CharField(max_length=20)),
(
"upper_name",
models.GeneratedField(
expression=Upper("name"),
db_persist=True,
output_field=models.CharField(),
),
),
],
),
]
updated_state = [ModelState("testapp", "Author", [])]
changes = self.get_changes(initial_state, updated_state)
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["RemoveField", "RemoveField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="upper_name")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="name")
def test_remove_generated_field_before_multiple_base_fields(self):
initial_state = [
ModelState(
"testapp",
"Author",
[
("first_name", models.CharField(max_length=20)),
("last_name", models.CharField(max_length=20)),
(
"full_name",
models.GeneratedField(
expression=Concat("first_name", "last_name"),
db_persist=True,
output_field=models.CharField(),
),
),
],
),
]
updated_state = [ModelState("testapp", "Author", [])]
changes = self.get_changes(initial_state, updated_state)
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes, "testapp", 0, ["RemoveField", "RemoveField", "RemoveField"]
)
self.assertOperationAttributes(changes, "testapp", 0, 0, name="full_name")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="first_name")
self.assertOperationAttributes(changes, "testapp", 0, 2, name="last_name")
def test_remove_generated_field_and_one_of_multiple_base_fields(self):
initial_state = [
ModelState(
"testapp",
"Author",
[
("first_name", models.CharField(max_length=20)),
("last_name", models.CharField(max_length=20)),
(
"full_name",
models.GeneratedField(
expression=Concat("first_name", "last_name"),
db_persist=True,
output_field=models.CharField(),
),
),
],
),
]
# Only remove full_name and first_name.
updated_state = [
ModelState(
"testapp",
"Author",
[
("last_name", models.CharField(max_length=20)),
],
),
]
changes = self.get_changes(initial_state, updated_state)
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes,
"testapp",
0,
["RemoveField", "RemoveField"],
)
self.assertOperationAttributes(changes, "testapp", 0, 0, name="full_name")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="first_name")
def test_alter_field(self):
"""Tests autodetection of new fields."""
changes = self.get_changes([self.author_name], [self.author_name_longer])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterField"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, name="name", preserve_default=True
)
def test_supports_functools_partial(self):
def _content_file_name(instance, filename, key, **kwargs):
return "{}/{}".format(instance, filename)
def content_file_name(key, **kwargs):
return functools.partial(_content_file_name, key, **kwargs)
# An unchanged partial reference.
before = [
ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"file",
models.FileField(
max_length=200, upload_to=content_file_name("file")
),
),
],
)
]
after = [
ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"file",
models.FileField(
max_length=200, upload_to=content_file_name("file")
),
),
],
)
]
changes = self.get_changes(before, after)
self.assertNumberMigrations(changes, "testapp", 0)
# A changed partial reference.
args_changed = [
ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"file",
models.FileField(
max_length=200, upload_to=content_file_name("other-file")
),
),
],
)
]
changes = self.get_changes(before, args_changed)
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterField"])
# Can't use assertOperationFieldAttributes because we need the
# deconstructed version, i.e., the exploded func/args/keywords rather
# than the partial: we don't care if it's not the same instance of the
# partial, only if it's the same source function, args, and keywords.
value = changes["testapp"][0].operations[0].field.upload_to
self.assertEqual(
(_content_file_name, ("other-file",), {}),
(value.func, value.args, value.keywords),
)
kwargs_changed = [
ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"file",
models.FileField(
max_length=200,
upload_to=content_file_name("file", spam="eggs"),
),
),
],
)
]
changes = self.get_changes(before, kwargs_changed)
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterField"])
value = changes["testapp"][0].operations[0].field.upload_to
self.assertEqual(
(_content_file_name, ("file",), {"spam": "eggs"}),
(value.func, value.args, value.keywords),
)
@mock.patch(
"django.db.migrations.questioner.MigrationQuestioner.ask_not_null_alteration",
side_effect=AssertionError("Should not have prompted for not null addition"),
)
def test_alter_field_to_not_null_with_default(self, mocked_ask_method):
"""
#23609 - Tests autodetection of nullable to non-nullable alterations.
"""
changes = self.get_changes([self.author_name_null], [self.author_name_default])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterField"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, name="name", preserve_default=True
)
self.assertOperationFieldAttributes(
changes, "testapp", 0, 0, default="Ada Lovelace"
)
@mock.patch(
"django.db.migrations.questioner.MigrationQuestioner.ask_not_null_alteration",
side_effect=AssertionError("Should not have prompted for not null alteration"),
)
def test_alter_field_to_not_null_with_db_default(self, mocked_ask_method):
changes = self.get_changes(
[self.author_name_null], [self.author_name_db_default]
)
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterField"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, name="name", preserve_default=True
)
self.assertOperationFieldAttributes(
changes, "testapp", 0, 0, db_default="Ada Lovelace"
)
@mock.patch(
"django.db.migrations.questioner.MigrationQuestioner.ask_not_null_addition"
)
def test_add_auto_field_does_not_request_default(self, mocked_ask_method):
initial_state = ModelState(
"testapp",
"Author",
[
("pkfield", models.IntegerField(primary_key=True)),
],
)
for auto_field in [
models.AutoField,
models.BigAutoField,
models.SmallAutoField,
]:
with self.subTest(auto_field=auto_field):
updated_state = ModelState(
"testapp",
"Author",
[
("id", auto_field(primary_key=True)),
("pkfield", models.IntegerField(primary_key=False)),
],
)
self.get_changes([initial_state], [updated_state])
mocked_ask_method.assert_not_called()
@mock.patch(
"django.db.migrations.questioner.MigrationQuestioner.ask_not_null_alteration",
return_value=models.NOT_PROVIDED,
)
def test_alter_field_to_not_null_without_default(self, mocked_ask_method):
"""
#23609 - Tests autodetection of nullable to non-nullable alterations.
"""
changes = self.get_changes([self.author_name_null], [self.author_name])
self.assertEqual(mocked_ask_method.call_count, 1)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterField"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, name="name", preserve_default=True
)
self.assertOperationFieldAttributes(
changes, "testapp", 0, 0, default=models.NOT_PROVIDED
)
@mock.patch(
"django.db.migrations.questioner.MigrationQuestioner.ask_not_null_alteration",
return_value="Some Name",
)
def test_alter_field_to_not_null_oneoff_default(self, mocked_ask_method):
"""
#23609 - Tests autodetection of nullable to non-nullable alterations.
"""
changes = self.get_changes([self.author_name_null], [self.author_name])
self.assertEqual(mocked_ask_method.call_count, 1)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterField"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, name="name", preserve_default=False
)
self.assertOperationFieldAttributes(
changes, "testapp", 0, 0, default="Some Name"
)
def test_rename_field(self):
"""Tests autodetection of renamed fields."""
changes = self.get_changes(
[self.author_name],
[self.author_name_renamed],
MigrationQuestioner({"ask_rename": True}),
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["RenameField"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, old_name="name", new_name="names"
)
def test_rename_field_foreign_key_to_field(self):
before = [
ModelState(
"app",
"Foo",
[
("id", models.AutoField(primary_key=True)),
("field", models.IntegerField(unique=True)),
],
),
ModelState(
"app",
"Bar",
[
("id", models.AutoField(primary_key=True)),
(
"foo",
models.ForeignKey("app.Foo", models.CASCADE, to_field="field"),
),
],
),
]
after = [
ModelState(
"app",
"Foo",
[
("id", models.AutoField(primary_key=True)),
("renamed_field", models.IntegerField(unique=True)),
],
),
ModelState(
"app",
"Bar",
[
("id", models.AutoField(primary_key=True)),
(
"foo",
models.ForeignKey(
"app.Foo", models.CASCADE, to_field="renamed_field"
),
),
],
),
]
changes = self.get_changes(
before, after, MigrationQuestioner({"ask_rename": True})
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "app", 1)
self.assertOperationTypes(changes, "app", 0, ["RenameField"])
self.assertOperationAttributes(
changes, "app", 0, 0, old_name="field", new_name="renamed_field"
)
def test_foreign_object_from_to_fields_list(self):
author_state = ModelState(
"app",
"Author",
[("id", models.AutoField(primary_key=True))],
)
book_state = ModelState(
"app",
"Book",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField()),
("author_id", models.IntegerField()),
(
"author",
models.ForeignObject(
"app.Author",
models.CASCADE,
from_fields=["author_id"],
to_fields=["id"],
),
),
],
)
book_state_copy = copy.deepcopy(book_state)
changes = self.get_changes(
[author_state, book_state],
[author_state, book_state_copy],
)
self.assertEqual(changes, {})
def test_rename_foreign_object_fields(self):
fields = ("first", "second")
renamed_fields = ("first_renamed", "second_renamed")
before = [
ModelState(
"app",
"Foo",
[
("id", models.AutoField(primary_key=True)),
("first", models.IntegerField()),
("second", models.IntegerField()),
],
options={"unique_together": {fields}},
),
ModelState(
"app",
"Bar",
[
("id", models.AutoField(primary_key=True)),
("first", models.IntegerField()),
("second", models.IntegerField()),
(
"foo",
models.ForeignObject(
"app.Foo",
models.CASCADE,
from_fields=fields,
to_fields=fields,
),
),
],
),
]
# Case 1: to_fields renames.
after = [
ModelState(
"app",
"Foo",
[
("id", models.AutoField(primary_key=True)),
("first_renamed", models.IntegerField()),
("second_renamed", models.IntegerField()),
],
options={"unique_together": {renamed_fields}},
),
ModelState(
"app",
"Bar",
[
("id", models.AutoField(primary_key=True)),
("first", models.IntegerField()),
("second", models.IntegerField()),
(
"foo",
models.ForeignObject(
"app.Foo",
models.CASCADE,
from_fields=fields,
to_fields=renamed_fields,
),
),
],
),
]
changes = self.get_changes(
before, after, MigrationQuestioner({"ask_rename": True})
)
self.assertNumberMigrations(changes, "app", 1)
self.assertOperationTypes(
changes, "app", 0, ["RenameField", "RenameField", "AlterUniqueTogether"]
)
self.assertOperationAttributes(
changes,
"app",
0,
0,
model_name="foo",
old_name="first",
new_name="first_renamed",
)
self.assertOperationAttributes(
changes,
"app",
0,
1,
model_name="foo",
old_name="second",
new_name="second_renamed",
)
# Case 2: from_fields renames.
after = [
ModelState(
"app",
"Foo",
[
("id", models.AutoField(primary_key=True)),
("first", models.IntegerField()),
("second", models.IntegerField()),
],
options={"unique_together": {fields}},
),
ModelState(
"app",
"Bar",
[
("id", models.AutoField(primary_key=True)),
("first_renamed", models.IntegerField()),
("second_renamed", models.IntegerField()),
(
"foo",
models.ForeignObject(
"app.Foo",
models.CASCADE,
from_fields=renamed_fields,
to_fields=fields,
),
),
],
),
]
changes = self.get_changes(
before, after, MigrationQuestioner({"ask_rename": True})
)
self.assertNumberMigrations(changes, "app", 1)
self.assertOperationTypes(changes, "app", 0, ["RenameField", "RenameField"])
self.assertOperationAttributes(
changes,
"app",
0,
0,
model_name="bar",
old_name="first",
new_name="first_renamed",
)
self.assertOperationAttributes(
changes,
"app",
0,
1,
model_name="bar",
old_name="second",
new_name="second_renamed",
)
def test_rename_referenced_primary_key(self):
before = [
ModelState(
"app",
"Foo",
[
("id", models.CharField(primary_key=True, serialize=False)),
],
),
ModelState(
"app",
"Bar",
[
("id", models.AutoField(primary_key=True)),
("foo", models.ForeignKey("app.Foo", models.CASCADE)),
],
),
]
after = [
ModelState(
"app",
"Foo",
[("renamed_id", models.CharField(primary_key=True, serialize=False))],
),
ModelState(
"app",
"Bar",
[
("id", models.AutoField(primary_key=True)),
("foo", models.ForeignKey("app.Foo", models.CASCADE)),
],
),
]
changes = self.get_changes(
before, after, MigrationQuestioner({"ask_rename": True})
)
self.assertNumberMigrations(changes, "app", 1)
self.assertOperationTypes(changes, "app", 0, ["RenameField"])
self.assertOperationAttributes(
changes, "app", 0, 0, old_name="id", new_name="renamed_id"
)
def test_rename_field_preserved_db_column(self):
"""
RenameField is used if a field is renamed and db_column equal to the
old field's column is added.
"""
before = [
ModelState(
"app",
"Foo",
[
("id", models.AutoField(primary_key=True)),
("field", models.IntegerField()),
],
),
]
after = [
ModelState(
"app",
"Foo",
[
("id", models.AutoField(primary_key=True)),
("renamed_field", models.IntegerField(db_column="field")),
],
),
]
changes = self.get_changes(
before, after, MigrationQuestioner({"ask_rename": True})
)
self.assertNumberMigrations(changes, "app", 1)
self.assertOperationTypes(changes, "app", 0, ["AlterField", "RenameField"])
self.assertOperationAttributes(
changes,
"app",
0,
0,
model_name="foo",
name="field",
)
self.assertEqual(
changes["app"][0].operations[0].field.deconstruct(),
(
"field",
"django.db.models.IntegerField",
[],
{"db_column": "field"},
),
)
self.assertOperationAttributes(
changes,
"app",
0,
1,
model_name="foo",
old_name="field",
new_name="renamed_field",
)
def test_rename_related_field_preserved_db_column(self):
before = [
ModelState(
"app",
"Foo",
[
("id", models.AutoField(primary_key=True)),
],
),
ModelState(
"app",
"Bar",
[
("id", models.AutoField(primary_key=True)),
("foo", models.ForeignKey("app.Foo", models.CASCADE)),
],
),
]
after = [
ModelState(
"app",
"Foo",
[
("id", models.AutoField(primary_key=True)),
],
),
ModelState(
"app",
"Bar",
[
("id", models.AutoField(primary_key=True)),
(
"renamed_foo",
models.ForeignKey(
"app.Foo", models.CASCADE, db_column="foo_id"
),
),
],
),
]
changes = self.get_changes(
before, after, MigrationQuestioner({"ask_rename": True})
)
self.assertNumberMigrations(changes, "app", 1)
self.assertOperationTypes(changes, "app", 0, ["AlterField", "RenameField"])
self.assertOperationAttributes(
changes,
"app",
0,
0,
model_name="bar",
name="foo",
)
self.assertEqual(
changes["app"][0].operations[0].field.deconstruct(),
(
"foo",
"django.db.models.ForeignKey",
[],
{"to": "app.foo", "on_delete": models.CASCADE, "db_column": "foo_id"},
),
)
self.assertOperationAttributes(
changes,
"app",
0,
1,
model_name="bar",
old_name="foo",
new_name="renamed_foo",
)
def test_rename_field_preserve_db_column_preserve_constraint(self):
"""
Renaming a field that already had a db_column attribute and a
constraint generates two no-op operations: RenameField and
AlterConstraint.
"""
before = [
ModelState(
"app",
"Foo",
[
("id", models.AutoField(primary_key=True)),
("field", models.IntegerField(db_column="full_field1_name")),
("field2", models.IntegerField()),
],
options={
"constraints": [
models.UniqueConstraint(
fields=["field", "field2"],
name="unique_field",
),
],
},
),
]
after = [
ModelState(
"app",
"Foo",
[
("id", models.AutoField(primary_key=True)),
(
"full_field1_name",
models.IntegerField(db_column="full_field1_name"),
),
(
"field2",
models.IntegerField(),
),
],
options={
"constraints": [
models.UniqueConstraint(
fields=["full_field1_name", "field2"],
name="unique_field",
),
],
},
),
]
changes = self.get_changes(
before, after, MigrationQuestioner({"ask_rename": True})
)
self.assertNumberMigrations(changes, "app", 1)
self.assertOperationTypes(changes, "app", 0, ["RenameField", "AlterConstraint"])
self.assertOperationAttributes(
changes,
"app",
0,
1,
model_name="foo",
name="unique_field",
)
self.assertEqual(
changes["app"][0].operations[1].deconstruct(),
(
"AlterConstraint",
[],
{
"constraint": after[0].options["constraints"][0],
"model_name": "foo",
"name": "unique_field",
},
),
)
def test_rename_field_without_db_column_recreate_constraint(self):
"""Renaming a field without given db_column recreates a constraint."""
before = [
ModelState(
"app",
"Foo",
[
("id", models.AutoField(primary_key=True)),
("field", models.IntegerField()),
],
options={
"constraints": [
models.UniqueConstraint(
fields=["field"],
name="unique_field",
),
],
},
),
]
after = [
ModelState(
"app",
"Foo",
[
("id", models.AutoField(primary_key=True)),
(
"full_field1_name",
models.IntegerField(),
),
],
options={
"constraints": [
models.UniqueConstraint(
fields=["full_field1_name"],
name="unique_field",
),
],
},
),
]
changes = self.get_changes(
before, after, MigrationQuestioner({"ask_rename": True})
)
self.assertNumberMigrations(changes, "app", 1)
self.assertOperationTypes(
changes, "app", 0, ["RemoveConstraint", "RenameField", "AddConstraint"]
)
def test_rename_field_preserve_db_column_recreate_constraint(self):
"""Removing a field from the constraint triggers recreation."""
before = [
ModelState(
"app",
"Foo",
[
("id", models.AutoField(primary_key=True)),
("field1", models.IntegerField(db_column="field1")),
("field2", models.IntegerField(db_column="field2")),
],
options={
"constraints": [
models.UniqueConstraint(
fields=["field1", "field2"],
name="unique_fields",
),
],
},
),
]
after = [
ModelState(
"app",
"Foo",
[
("id", models.AutoField(primary_key=True)),
("renamed_field1", models.IntegerField(db_column="field1")),
("renamed_field2", models.IntegerField(db_column="field2")),
],
options={
"constraints": [
models.UniqueConstraint(
fields=["renamed_field1"],
name="unique_fields",
),
],
},
),
]
changes = self.get_changes(
before, after, MigrationQuestioner({"ask_rename": True})
)
self.assertNumberMigrations(changes, "app", 1)
self.assertOperationTypes(
changes,
"app",
0,
[
"RemoveConstraint",
"RenameField",
"RenameField",
"AddConstraint",
],
)
def test_rename_field_with_renamed_model(self):
changes = self.get_changes(
[self.author_name],
[
ModelState(
"testapp",
"RenamedAuthor",
[
("id", models.AutoField(primary_key=True)),
("renamed_name", models.CharField(max_length=200)),
],
),
],
MigrationQuestioner({"ask_rename_model": True, "ask_rename": True}),
)
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["RenameModel", "RenameField"])
self.assertOperationAttributes(
changes,
"testapp",
0,
0,
old_name="Author",
new_name="RenamedAuthor",
)
self.assertOperationAttributes(
changes,
"testapp",
0,
1,
old_name="name",
new_name="renamed_name",
)
def test_rename_model(self):
"""Tests autodetection of renamed models."""
changes = self.get_changes(
[self.author_with_book, self.book],
[self.author_renamed_with_book, self.book_with_author_renamed],
MigrationQuestioner({"ask_rename_model": True}),
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["RenameModel"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, old_name="Author", new_name="Writer"
)
# Now that RenameModel handles related fields too, there should be
# no AlterField for the related field.
self.assertNumberMigrations(changes, "otherapp", 0)
def test_rename_model_case(self):
"""
Model name is case-insensitive. Changing case doesn't lead to any
autodetected operations.
"""
author_renamed = ModelState(
"testapp",
"author",
[
("id", models.AutoField(primary_key=True)),
],
)
changes = self.get_changes(
[self.author_empty, self.book],
[author_renamed, self.book],
questioner=MigrationQuestioner({"ask_rename_model": True}),
)
self.assertNumberMigrations(changes, "testapp", 0)
self.assertNumberMigrations(changes, "otherapp", 0)
def test_renamed_referenced_m2m_model_case(self):
publisher_renamed = ModelState(
"testapp",
"publisher",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=100)),
],
)
changes = self.get_changes(
[self.publisher, self.author_with_m2m],
[publisher_renamed, self.author_with_m2m],
questioner=MigrationQuestioner({"ask_rename_model": True}),
)
self.assertNumberMigrations(changes, "testapp", 0)
self.assertNumberMigrations(changes, "otherapp", 0)
def test_rename_m2m_through_model(self):
"""
Tests autodetection of renamed models that are used in M2M relations as
through models.
"""
changes = self.get_changes(
[self.author_with_m2m_through, self.publisher, self.contract],
[
self.author_with_renamed_m2m_through,
self.publisher,
self.contract_renamed,
],
MigrationQuestioner({"ask_rename_model": True}),
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["RenameModel"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, old_name="Contract", new_name="Deal"
)
def test_rename_model_with_renamed_rel_field(self):
"""
Tests autodetection of renamed models while simultaneously renaming one
of the fields that relate to the renamed model.
"""
changes = self.get_changes(
[self.author_with_book, self.book],
[self.author_renamed_with_book, self.book_with_field_and_author_renamed],
MigrationQuestioner({"ask_rename": True, "ask_rename_model": True}),
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["RenameModel"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, old_name="Author", new_name="Writer"
)
# Right number/type of migrations for related field rename?
# Alter is already taken care of.
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["RenameField"])
self.assertOperationAttributes(
changes, "otherapp", 0, 0, old_name="author", new_name="writer"
)
def test_rename_model_with_fks_in_different_position(self):
"""
#24537 - The order of fields in a model does not influence
the RenameModel detection.
"""
before = [
ModelState(
"testapp",
"EntityA",
[
("id", models.AutoField(primary_key=True)),
],
),
ModelState(
"testapp",
"EntityB",
[
("id", models.AutoField(primary_key=True)),
("some_label", models.CharField(max_length=255)),
("entity_a", models.ForeignKey("testapp.EntityA", models.CASCADE)),
],
),
]
after = [
ModelState(
"testapp",
"EntityA",
[
("id", models.AutoField(primary_key=True)),
],
),
ModelState(
"testapp",
"RenamedEntityB",
[
("id", models.AutoField(primary_key=True)),
("entity_a", models.ForeignKey("testapp.EntityA", models.CASCADE)),
("some_label", models.CharField(max_length=255)),
],
),
]
changes = self.get_changes(
before, after, MigrationQuestioner({"ask_rename_model": True})
)
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["RenameModel"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, old_name="EntityB", new_name="RenamedEntityB"
)
def test_rename_model_reverse_relation_dependencies(self):
"""
The migration to rename a model pointed to by a foreign key in another
app must run after the other app's migration that adds the foreign key
with model's original name. Therefore, the renaming migration has a
dependency on that other migration.
"""
before = [
ModelState(
"testapp",
"EntityA",
[
("id", models.AutoField(primary_key=True)),
],
),
ModelState(
"otherapp",
"EntityB",
[
("id", models.AutoField(primary_key=True)),
("entity_a", models.ForeignKey("testapp.EntityA", models.CASCADE)),
],
),
]
after = [
ModelState(
"testapp",
"RenamedEntityA",
[
("id", models.AutoField(primary_key=True)),
],
),
ModelState(
"otherapp",
"EntityB",
[
("id", models.AutoField(primary_key=True)),
(
"entity_a",
models.ForeignKey("testapp.RenamedEntityA", models.CASCADE),
),
],
),
]
changes = self.get_changes(
before, after, MigrationQuestioner({"ask_rename_model": True})
)
self.assertNumberMigrations(changes, "testapp", 1)
self.assertMigrationDependencies(
changes, "testapp", 0, [("otherapp", "__first__")]
)
self.assertOperationTypes(changes, "testapp", 0, ["RenameModel"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, old_name="EntityA", new_name="RenamedEntityA"
)
def test_fk_dependency(self):
"""Having a ForeignKey automatically adds a dependency."""
# Note that testapp (author) has no dependencies,
# otherapp (book) depends on testapp (author),
# thirdapp (edition) depends on otherapp (book)
changes = self.get_changes([], [self.author_name, self.book, self.edition])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author")
self.assertMigrationDependencies(changes, "testapp", 0, [])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["CreateModel"])
self.assertOperationAttributes(changes, "otherapp", 0, 0, name="Book")
self.assertMigrationDependencies(
changes, "otherapp", 0, [("testapp", "auto_1")]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "thirdapp", 1)
self.assertOperationTypes(changes, "thirdapp", 0, ["CreateModel"])
self.assertOperationAttributes(changes, "thirdapp", 0, 0, name="Edition")
self.assertMigrationDependencies(
changes, "thirdapp", 0, [("otherapp", "auto_1")]
)
def test_proxy_fk_dependency(self):
"""FK dependencies still work on proxy models."""
# Note that testapp (author) has no dependencies,
# otherapp (book) depends on testapp (authorproxy)
changes = self.get_changes(
[], [self.author_empty, self.author_proxy_third, self.book_proxy_fk]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author")
self.assertMigrationDependencies(changes, "testapp", 0, [])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["CreateModel"])
self.assertOperationAttributes(changes, "otherapp", 0, 0, name="Book")
self.assertMigrationDependencies(
changes, "otherapp", 0, [("thirdapp", "auto_1")]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "thirdapp", 1)
self.assertOperationTypes(changes, "thirdapp", 0, ["CreateModel"])
self.assertOperationAttributes(changes, "thirdapp", 0, 0, name="AuthorProxy")
self.assertMigrationDependencies(
changes, "thirdapp", 0, [("testapp", "auto_1")]
)
def test_same_app_no_fk_dependency(self):
"""
A migration with a FK between two models of the same app
does not have a dependency to itself.
"""
changes = self.get_changes([], [self.author_with_publisher, self.publisher])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel", "CreateModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Publisher")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="Author")
self.assertMigrationDependencies(changes, "testapp", 0, [])
def test_circular_fk_dependency(self):
"""
Having a circular ForeignKey dependency automatically
resolves the situation into 2 migrations on one side and 1 on the
other.
"""
changes = self.get_changes(
[], [self.author_with_book, self.book, self.publisher_with_book]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel", "CreateModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="Publisher")
self.assertMigrationDependencies(
changes, "testapp", 0, [("otherapp", "auto_1")]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 2)
self.assertOperationTypes(changes, "otherapp", 0, ["CreateModel"])
self.assertOperationTypes(changes, "otherapp", 1, ["AddField"])
self.assertMigrationDependencies(changes, "otherapp", 0, [])
self.assertMigrationDependencies(
changes, "otherapp", 1, [("otherapp", "auto_1"), ("testapp", "auto_1")]
)
# both split migrations should be `initial`
self.assertTrue(changes["otherapp"][0].initial)
self.assertTrue(changes["otherapp"][1].initial)
def test_same_app_circular_fk_dependency(self):
"""
A migration with a FK between two models of the same app does
not have a dependency to itself.
"""
changes = self.get_changes(
[], [self.author_with_publisher, self.publisher_with_author]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes, "testapp", 0, ["CreateModel", "CreateModel", "AddField"]
)
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="Publisher")
self.assertOperationAttributes(changes, "testapp", 0, 2, name="publisher")
self.assertMigrationDependencies(changes, "testapp", 0, [])
def test_same_app_circular_fk_dependency_with_unique_together_and_indexes(self):
"""
#22275 - A migration with circular FK dependency does not try
to create unique together constraint and indexes before creating all
required fields first.
"""
changes = self.get_changes([], [self.knight, self.rabbit])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "eggs", 1)
self.assertOperationTypes(
changes,
"eggs",
0,
["CreateModel", "CreateModel"],
)
self.assertNotIn("unique_together", changes["eggs"][0].operations[0].options)
self.assertMigrationDependencies(changes, "eggs", 0, [])
def test_alter_db_table_add(self):
"""Tests detection for adding db_table in model's options."""
changes = self.get_changes(
[self.author_empty], [self.author_with_db_table_options]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterModelTable"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, name="author", table="author_one"
)
def test_alter_db_table_change(self):
"""Tests detection for changing db_table in model's options'."""
changes = self.get_changes(
[self.author_with_db_table_options], [self.author_with_new_db_table_options]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterModelTable"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, name="author", table="author_two"
)
def test_alter_db_table_remove(self):
"""Tests detection for removing db_table in model's options."""
changes = self.get_changes(
[self.author_with_db_table_options], [self.author_empty]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterModelTable"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, name="author", table=None
)
def test_alter_db_table_no_changes(self):
"""
Alter_db_table doesn't generate a migration if no changes have been
made.
"""
changes = self.get_changes(
[self.author_with_db_table_options], [self.author_with_db_table_options]
)
# Right number of migrations?
self.assertEqual(len(changes), 0)
def test_keep_db_table_with_model_change(self):
"""
Tests when model changes but db_table stays as-is, autodetector must
not create more than one operation.
"""
changes = self.get_changes(
[self.author_with_db_table_options],
[self.author_renamed_with_db_table_options],
MigrationQuestioner({"ask_rename_model": True}),
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["RenameModel"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, old_name="Author", new_name="NewAuthor"
)
def test_alter_db_table_with_model_change(self):
"""
Tests when model and db_table changes, autodetector must create two
operations.
"""
changes = self.get_changes(
[self.author_with_db_table_options],
[self.author_renamed_with_new_db_table_options],
MigrationQuestioner({"ask_rename_model": True}),
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes, "testapp", 0, ["RenameModel", "AlterModelTable"]
)
self.assertOperationAttributes(
changes, "testapp", 0, 0, old_name="Author", new_name="NewAuthor"
)
self.assertOperationAttributes(
changes, "testapp", 0, 1, name="newauthor", table="author_three"
)
def test_alter_db_table_comment_add(self):
changes = self.get_changes(
[self.author_empty], [self.author_with_db_table_comment]
)
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterModelTableComment"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, name="author", table_comment="Table comment"
)
def test_alter_db_table_comment_change(self):
author_with_new_db_table_comment = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
],
{"db_table_comment": "New table comment"},
)
changes = self.get_changes(
[self.author_with_db_table_comment],
[author_with_new_db_table_comment],
)
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterModelTableComment"])
self.assertOperationAttributes(
changes,
"testapp",
0,
0,
name="author",
table_comment="New table comment",
)
def test_alter_db_table_comment_remove(self):
changes = self.get_changes(
[self.author_with_db_table_comment],
[self.author_empty],
)
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterModelTableComment"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, name="author", db_table_comment=None
)
def test_alter_db_table_comment_no_changes(self):
changes = self.get_changes(
[self.author_with_db_table_comment],
[self.author_with_db_table_comment],
)
self.assertNumberMigrations(changes, "testapp", 0)
def test_identical_regex_doesnt_alter(self):
from_state = ModelState(
"testapp",
"model",
[
(
"id",
models.AutoField(
primary_key=True,
validators=[
RegexValidator(
re.compile("^[-a-zA-Z0-9_]+\\Z"),
"Enter a valid “slug” consisting of letters, numbers, "
"underscores or hyphens.",
"invalid",
)
],
),
)
],
)
to_state = ModelState(
"testapp",
"model",
[("id", models.AutoField(primary_key=True, validators=[validate_slug]))],
)
changes = self.get_changes([from_state], [to_state])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 0)
def test_different_regex_does_alter(self):
from_state = ModelState(
"testapp",
"model",
[
(
"id",
models.AutoField(
primary_key=True,
validators=[
RegexValidator(
re.compile("^[a-z]+\\Z", 32),
"Enter a valid “slug” consisting of letters, numbers, "
"underscores or hyphens.",
"invalid",
)
],
),
)
],
)
to_state = ModelState(
"testapp",
"model",
[("id", models.AutoField(primary_key=True, validators=[validate_slug]))],
)
changes = self.get_changes([from_state], [to_state])
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterField"])
def test_alter_regex_string_to_compiled_regex(self):
regex_string = "^[a-z]+$"
from_state = ModelState(
"testapp",
"model",
[
(
"id",
models.AutoField(
primary_key=True, validators=[RegexValidator(regex_string)]
),
)
],
)
to_state = ModelState(
"testapp",
"model",
[
(
"id",
models.AutoField(
primary_key=True,
validators=[RegexValidator(re.compile(regex_string))],
),
)
],
)
changes = self.get_changes([from_state], [to_state])
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterField"])
def test_empty_unique_together(self):
"""Empty unique_together shouldn't generate a migration."""
# Explicitly testing for not specified, since this is the case after
# a CreateModel operation w/o any definition on the original model
model_state_not_specified = ModelState(
"a", "model", [("id", models.AutoField(primary_key=True))]
)
# Explicitly testing for None, since this was the issue in #23452 after
# an AlterUniqueTogether operation with e.g. () as value
model_state_none = ModelState(
"a",
"model",
[("id", models.AutoField(primary_key=True))],
{
"unique_together": None,
},
)
# Explicitly testing for the empty set, since we now always have sets.
# During removal (('col1', 'col2'),) --> () this becomes set([])
model_state_empty = ModelState(
"a",
"model",
[("id", models.AutoField(primary_key=True))],
{
"unique_together": set(),
},
)
def test(from_state, to_state, msg):
changes = self.get_changes([from_state], [to_state])
if changes:
ops = ", ".join(
o.__class__.__name__ for o in changes["a"][0].operations
)
self.fail("Created operation(s) %s from %s" % (ops, msg))
tests = (
(
model_state_not_specified,
model_state_not_specified,
'"not specified" to "not specified"',
),
(model_state_not_specified, model_state_none, '"not specified" to "None"'),
(
model_state_not_specified,
model_state_empty,
'"not specified" to "empty"',
),
(model_state_none, model_state_not_specified, '"None" to "not specified"'),
(model_state_none, model_state_none, '"None" to "None"'),
(model_state_none, model_state_empty, '"None" to "empty"'),
(
model_state_empty,
model_state_not_specified,
'"empty" to "not specified"',
),
(model_state_empty, model_state_none, '"empty" to "None"'),
(model_state_empty, model_state_empty, '"empty" to "empty"'),
)
for t in tests:
test(*t)
def test_create_model_with_indexes(self):
"""Test creation of new model with indexes already defined."""
added_index = models.Index(
fields=["name"], name="create_model_with_indexes_idx"
)
author = ModelState(
"otherapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
],
{
"indexes": [added_index],
},
)
changes = self.get_changes([], [author])
# Right number of migrations?
self.assertEqual(len(changes["otherapp"]), 1)
# Right number of actions?
migration = changes["otherapp"][0]
self.assertEqual(len(migration.operations), 1)
# Right actions order?
self.assertOperationTypes(changes, "otherapp", 0, ["CreateModel"])
self.assertOperationAttributes(changes, "otherapp", 0, 0, name="Author")
self.assertOperationAttributes(
changes,
"otherapp",
0,
0,
name="Author",
options={"indexes": [added_index]},
)
def test_add_indexes(self):
"""Test change detection of new indexes."""
changes = self.get_changes(
[self.author_empty, self.book], [self.author_empty, self.book_indexes]
)
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["AddIndex"])
added_index = models.Index(
fields=["author", "title"], name="book_title_author_idx"
)
self.assertOperationAttributes(
changes, "otherapp", 0, 0, model_name="book", index=added_index
)
def test_remove_indexes(self):
"""Test change detection of removed indexes."""
changes = self.get_changes(
[self.author_empty, self.book_indexes], [self.author_empty, self.book]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["RemoveIndex"])
self.assertOperationAttributes(
changes, "otherapp", 0, 0, model_name="book", name="book_title_author_idx"
)
def test_remove_field_with_model_options(self):
before_state = [
ModelState("testapp", "Animal", []),
ModelState(
"testapp",
"Dog",
fields=[
("name", models.CharField(max_length=100)),
(
"animal",
models.ForeignKey("testapp.Animal", on_delete=models.CASCADE),
),
],
options={
"indexes": [
models.Index(fields=("animal", "name"), name="animal_name_idx")
],
"constraints": [
models.UniqueConstraint(
fields=("animal", "name"), name="animal_name_idx"
),
],
},
),
]
changes = self.get_changes(before_state, [])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes,
"testapp",
0,
[
"RemoveIndex",
"RemoveConstraint",
"RemoveField",
"DeleteModel",
"DeleteModel",
],
)
def test_remove_field_with_remove_index_or_constraint_dependency(self):
before_state = [
ModelState("testapp", "Category", []),
ModelState(
"testapp",
"Model",
fields=[
("date", models.DateField(auto_now=True)),
(
"category",
models.ForeignKey(
"testapp.Category", models.SET_NULL, null=True
),
),
],
options={
"constraints": [
models.UniqueConstraint(
fields=("date", "category"), name="unique_category_for_date"
),
]
},
),
]
changes = self.get_changes(
before_state,
[
ModelState(
"testapp",
"Model",
fields=[
("date", models.DateField(auto_now=True)),
],
),
],
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes,
"testapp",
0,
["RemoveConstraint", "RemoveField", "DeleteModel"],
)
def test_rename_indexes(self):
book_renamed_indexes = ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
],
{
"indexes": [
models.Index(
fields=["author", "title"], name="renamed_book_title_author_idx"
)
],
},
)
changes = self.get_changes(
[self.author_empty, self.book_indexes],
[self.author_empty, book_renamed_indexes],
)
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["RenameIndex"])
self.assertOperationAttributes(
changes,
"otherapp",
0,
0,
model_name="book",
new_name="renamed_book_title_author_idx",
old_name="book_title_author_idx",
)
def test_order_fields_indexes(self):
"""Test change detection of reordering of fields in indexes."""
changes = self.get_changes(
[self.author_empty, self.book_indexes],
[self.author_empty, self.book_unordered_indexes],
)
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["RemoveIndex", "AddIndex"])
self.assertOperationAttributes(
changes, "otherapp", 0, 0, model_name="book", name="book_title_author_idx"
)
added_index = models.Index(
fields=["title", "author"], name="book_author_title_idx"
)
self.assertOperationAttributes(
changes, "otherapp", 0, 1, model_name="book", index=added_index
)
def test_create_model_with_check_constraint(self):
"""Test creation of new model with constraints already defined."""
author = ModelState(
"otherapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
],
{
"constraints": [
models.CheckConstraint(
condition=models.Q(name__contains="Bob"),
name="name_contains_bob",
)
]
},
)
changes = self.get_changes([], [author])
constraint = models.CheckConstraint(
condition=models.Q(name__contains="Bob"), name="name_contains_bob"
)
# Right number of migrations?
self.assertEqual(len(changes["otherapp"]), 1)
# Right number of actions?
migration = changes["otherapp"][0]
self.assertEqual(len(migration.operations), 1)
# Right actions order?
self.assertOperationTypes(changes, "otherapp", 0, ["CreateModel"])
self.assertOperationAttributes(
changes,
"otherapp",
0,
0,
name="Author",
options={"constraints": [constraint]},
)
def test_add_constraints(self):
"""Test change detection of new constraints."""
changes = self.get_changes(
[self.author_name], [self.author_name_check_constraint]
)
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AddConstraint"])
added_constraint = models.CheckConstraint(
condition=models.Q(name__contains="Bob"), name="name_contains_bob"
)
self.assertOperationAttributes(
changes, "testapp", 0, 0, model_name="author", constraint=added_constraint
)
def test_add_constraints_with_new_model(self):
book_with_unique_title_and_pony = ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("title", models.CharField(max_length=200)),
("pony", models.ForeignKey("otherapp.Pony", models.CASCADE)),
],
{
"constraints": [
models.UniqueConstraint(
fields=["title", "pony"],
name="unique_title_pony",
)
]
},
)
changes = self.get_changes(
[self.book_with_no_author],
[book_with_unique_title_and_pony, self.other_pony],
)
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(
changes,
"otherapp",
0,
["CreateModel", "AddField", "AddConstraint"],
)
def test_add_constraints_with_dict_keys(self):
book_types = {"F": "Fantasy", "M": "Mystery"}
book_with_type = ModelState(
"testapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("type", models.CharField(max_length=1)),
],
{
"constraints": [
models.CheckConstraint(
condition=models.Q(type__in=book_types.keys()),
name="book_type_check",
),
],
},
)
book_with_resolved_type = ModelState(
"testapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("type", models.CharField(max_length=1)),
],
{
"constraints": [
models.CheckConstraint(
condition=models.Q(("type__in", tuple(book_types))),
name="book_type_check",
),
],
},
)
changes = self.get_changes([book_with_type], [book_with_resolved_type])
self.assertEqual(len(changes), 0)
def test_add_index_with_new_model(self):
book_with_index_title_and_pony = ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("title", models.CharField(max_length=200)),
("pony", models.ForeignKey("otherapp.Pony", models.CASCADE)),
],
{
"indexes": [
models.Index(fields=["title", "pony"], name="index_title_pony"),
]
},
)
changes = self.get_changes(
[self.book_with_no_author],
[book_with_index_title_and_pony, self.other_pony],
)
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(
changes,
"otherapp",
0,
["CreateModel", "AddField", "AddIndex"],
)
def test_alter_constraint(self):
book_constraint = models.CheckConstraint(
condition=models.Q(title__contains="title"),
name="title_contains_title",
)
book_altered_constraint = models.CheckConstraint(
condition=models.Q(title__contains="title"),
name="title_contains_title",
violation_error_code="error_code",
)
author_altered_constraint = models.CheckConstraint(
condition=models.Q(name__contains="Bob"),
name="name_contains_bob",
violation_error_message="Name doesn't contain Bob",
)
book_check_constraint = copy.deepcopy(self.book)
book_check_constraint_with_error_message = copy.deepcopy(self.book)
author_name_check_constraint_with_error_message = copy.deepcopy(
self.author_name_check_constraint
)
book_check_constraint.options = {"constraints": [book_constraint]}
book_check_constraint_with_error_message.options = {
"constraints": [book_altered_constraint]
}
author_name_check_constraint_with_error_message.options = {
"constraints": [author_altered_constraint]
}
changes = self.get_changes(
[self.author_name_check_constraint, book_check_constraint],
[
author_name_check_constraint_with_error_message,
book_check_constraint_with_error_message,
],
)
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterConstraint"])
self.assertOperationAttributes(
changes,
"testapp",
0,
0,
model_name="author",
name="name_contains_bob",
constraint=author_altered_constraint,
)
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["AlterConstraint"])
self.assertOperationAttributes(
changes,
"otherapp",
0,
0,
model_name="book",
name="title_contains_title",
constraint=book_altered_constraint,
)
self.assertMigrationDependencies(
changes, "otherapp", 0, [("testapp", "auto_1")]
)
def test_remove_constraints(self):
"""Test change detection of removed constraints."""
changes = self.get_changes(
[self.author_name_check_constraint], [self.author_name]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["RemoveConstraint"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, model_name="author", name="name_contains_bob"
)
def test_constraint_dropped_and_recreated(self):
altered_constraint = models.CheckConstraint(
condition=models.Q(name__contains="bob"),
name="name_contains_bob",
)
author_name_check_constraint_lowercased = copy.deepcopy(
self.author_name_check_constraint
)
author_name_check_constraint_lowercased.options = {
"constraints": [altered_constraint]
}
changes = self.get_changes(
[self.author_name_check_constraint],
[author_name_check_constraint_lowercased],
)
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes, "testapp", 0, ["RemoveConstraint", "AddConstraint"]
)
self.assertOperationAttributes(
changes,
"testapp",
0,
0,
model_name="author",
name="name_contains_bob",
)
self.assertOperationAttributes(
changes,
"testapp",
0,
1,
model_name="author",
constraint=altered_constraint,
)
def test_add_unique_together(self):
"""Tests unique_together detection."""
changes = self.get_changes(
[self.author_empty, self.book],
[self.author_empty, self.book_unique_together],
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["AlterUniqueTogether"])
self.assertOperationAttributes(
changes,
"otherapp",
0,
0,
name="book",
unique_together={("author", "title")},
)
def test_remove_unique_together(self):
"""Tests unique_together detection."""
changes = self.get_changes(
[self.author_empty, self.book_unique_together],
[self.author_empty, self.book],
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["AlterUniqueTogether"])
self.assertOperationAttributes(
changes, "otherapp", 0, 0, name="book", unique_together=set()
)
def test_unique_together_remove_fk(self):
"""Tests unique_together and field removal detection & ordering"""
changes = self.get_changes(
[self.author_empty, self.book_unique_together],
[self.author_empty, self.book_with_no_author],
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(
changes,
"otherapp",
0,
["AlterUniqueTogether", "RemoveField"],
)
self.assertOperationAttributes(
changes, "otherapp", 0, 0, name="book", unique_together=set()
)
self.assertOperationAttributes(
changes, "otherapp", 0, 1, model_name="book", name="author"
)
def test_unique_together_no_changes(self):
"""
unique_together doesn't generate a migration if no
changes have been made.
"""
changes = self.get_changes(
[self.author_empty, self.book_unique_together],
[self.author_empty, self.book_unique_together],
)
# Right number of migrations?
self.assertEqual(len(changes), 0)
def test_unique_together_ordering(self):
"""
unique_together also triggers on ordering changes.
"""
changes = self.get_changes(
[self.author_empty, self.book_unique_together],
[self.author_empty, self.book_unique_together_2],
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(
changes,
"otherapp",
0,
["AlterUniqueTogether"],
)
self.assertOperationAttributes(
changes,
"otherapp",
0,
0,
name="book",
unique_together={("title", "author")},
)
def test_add_field_and_unique_together(self):
"""
Added fields will be created before using them in unique_together.
"""
changes = self.get_changes(
[self.author_empty, self.book],
[self.author_empty, self.book_unique_together_3],
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(
changes,
"otherapp",
0,
["AddField", "AlterUniqueTogether"],
)
self.assertOperationAttributes(
changes,
"otherapp",
0,
1,
name="book",
unique_together={("title", "newfield")},
)
def test_create_model_and_unique_together(self):
author = ModelState(
"otherapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
],
)
book_with_author = ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("otherapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
],
{
"unique_together": {("title", "author")},
},
)
changes = self.get_changes(
[self.book_with_no_author], [author, book_with_author]
)
# Right number of migrations?
self.assertEqual(len(changes["otherapp"]), 1)
# Right number of actions?
migration = changes["otherapp"][0]
self.assertEqual(len(migration.operations), 3)
# Right actions order?
self.assertOperationTypes(
changes,
"otherapp",
0,
["CreateModel", "AddField", "AlterUniqueTogether"],
)
def test_remove_field_and_unique_together(self):
"""
Removed fields will be removed after updating unique_together.
"""
changes = self.get_changes(
[self.author_empty, self.book_unique_together_3],
[self.author_empty, self.book_unique_together],
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(
changes,
"otherapp",
0,
["AlterUniqueTogether", "RemoveField"],
)
self.assertOperationAttributes(
changes,
"otherapp",
0,
0,
name="book",
unique_together={("author", "title")},
)
self.assertOperationAttributes(
changes,
"otherapp",
0,
1,
model_name="book",
name="newfield",
)
def test_alter_field_and_unique_together(self):
"""Fields are altered after deleting some unique_together."""
initial_author = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("age", models.IntegerField(db_index=True)),
],
{
"unique_together": {("name",)},
},
)
author_reversed_constraints = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, unique=True)),
("age", models.IntegerField()),
],
{
"unique_together": {("age",)},
},
)
changes = self.get_changes([initial_author], [author_reversed_constraints])
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes,
"testapp",
0,
[
"AlterUniqueTogether",
"AlterField",
"AlterField",
"AlterUniqueTogether",
],
)
self.assertOperationAttributes(
changes,
"testapp",
0,
0,
name="author",
unique_together=set(),
)
self.assertOperationAttributes(
changes,
"testapp",
0,
1,
model_name="author",
name="age",
)
self.assertOperationAttributes(
changes,
"testapp",
0,
2,
model_name="author",
name="name",
)
self.assertOperationAttributes(
changes,
"testapp",
0,
3,
name="author",
unique_together={("age",)},
)
def test_partly_alter_unique_together_increase(self):
initial_author = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("age", models.IntegerField()),
],
{
"unique_together": {("name",)},
},
)
author_new_constraints = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("age", models.IntegerField()),
],
{
"unique_together": {("name",), ("age",)},
},
)
changes = self.get_changes([initial_author], [author_new_constraints])
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes,
"testapp",
0,
["AlterUniqueTogether"],
)
self.assertOperationAttributes(
changes,
"testapp",
0,
0,
name="author",
unique_together={("name",), ("age",)},
)
def test_partly_alter_unique_together_decrease(self):
initial_author = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("age", models.IntegerField()),
],
{
"unique_together": {("name",), ("age",)},
},
)
author_new_constraints = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("age", models.IntegerField()),
],
{
"unique_together": {("name",)},
},
)
changes = self.get_changes([initial_author], [author_new_constraints])
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes,
"testapp",
0,
["AlterUniqueTogether"],
)
self.assertOperationAttributes(
changes,
"testapp",
0,
0,
name="author",
unique_together={("name",)},
)
def test_rename_field_and_unique_together(self):
"""Fields are renamed before updating unique_together."""
changes = self.get_changes(
[self.author_empty, self.book_unique_together_3],
[self.author_empty, self.book_unique_together_4],
MigrationQuestioner({"ask_rename": True}),
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(
changes,
"otherapp",
0,
["RenameField", "AlterUniqueTogether"],
)
self.assertOperationAttributes(
changes,
"otherapp",
0,
1,
name="book",
unique_together={("title", "newfield2")},
)
def test_proxy(self):
"""The autodetector correctly deals with proxy models."""
# First, we test adding a proxy model
changes = self.get_changes(
[self.author_empty], [self.author_empty, self.author_proxy]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"])
self.assertOperationAttributes(
changes,
"testapp",
0,
0,
name="AuthorProxy",
options={"proxy": True, "indexes": [], "constraints": []},
)
# Now, we test turning a proxy model into a non-proxy model
# It should delete the proxy then make the real one
changes = self.get_changes(
[self.author_empty, self.author_proxy],
[self.author_empty, self.author_proxy_notproxy],
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["DeleteModel", "CreateModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="AuthorProxy")
self.assertOperationAttributes(
changes, "testapp", 0, 1, name="AuthorProxy", options={}
)
def test_proxy_non_model_parent(self):
class Mixin:
pass
author_proxy_non_model_parent = ModelState(
"testapp",
"AuthorProxy",
[],
{"proxy": True},
(Mixin, "testapp.author"),
)
changes = self.get_changes(
[self.author_empty],
[self.author_empty, author_proxy_non_model_parent],
)
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"])
self.assertOperationAttributes(
changes,
"testapp",
0,
0,
name="AuthorProxy",
options={"proxy": True, "indexes": [], "constraints": []},
bases=(Mixin, "testapp.author"),
)
def test_proxy_custom_pk(self):
"""
#23415 - The autodetector must correctly deal with custom FK on proxy
models.
"""
# First, we test the default pk field name
changes = self.get_changes(
[], [self.author_empty, self.author_proxy_third, self.book_proxy_fk]
)
# The model the FK is pointing from and to.
self.assertEqual(
changes["otherapp"][0].operations[0].fields[2][1].remote_field.model,
"thirdapp.AuthorProxy",
)
# Now, we test the custom pk field name
changes = self.get_changes(
[], [self.author_custom_pk, self.author_proxy_third, self.book_proxy_fk]
)
# The model the FK is pointing from and to.
self.assertEqual(
changes["otherapp"][0].operations[0].fields[2][1].remote_field.model,
"thirdapp.AuthorProxy",
)
def test_proxy_to_mti_with_fk_to_proxy(self):
# First, test the pk table and field name.
to_state = self.make_project_state(
[self.author_empty, self.author_proxy_third, self.book_proxy_fk],
)
changes = self.get_changes([], to_state)
fk_field = changes["otherapp"][0].operations[0].fields[2][1]
self.assertEqual(
to_state.get_concrete_model_key(fk_field.remote_field.model),
("testapp", "author"),
)
self.assertEqual(fk_field.remote_field.model, "thirdapp.AuthorProxy")
# Change AuthorProxy to use MTI.
from_state = to_state.clone()
to_state = self.make_project_state(
[self.author_empty, self.author_proxy_third_notproxy, self.book_proxy_fk],
)
changes = self.get_changes(from_state, to_state)
# Right number/type of migrations for the AuthorProxy model?
self.assertNumberMigrations(changes, "thirdapp", 1)
self.assertOperationTypes(
changes, "thirdapp", 0, ["DeleteModel", "CreateModel"]
)
# Right number/type of migrations for the Book model with a FK to
# AuthorProxy?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["AlterField"])
# otherapp should depend on thirdapp.
self.assertMigrationDependencies(
changes, "otherapp", 0, [("thirdapp", "auto_1")]
)
# Now, test the pk table and field name.
fk_field = changes["otherapp"][0].operations[0].field
self.assertEqual(
to_state.get_concrete_model_key(fk_field.remote_field.model),
("thirdapp", "authorproxy"),
)
self.assertEqual(fk_field.remote_field.model, "thirdapp.AuthorProxy")
def test_proxy_to_mti_with_fk_to_proxy_proxy(self):
# First, test the pk table and field name.
to_state = self.make_project_state(
[
self.author_empty,
self.author_proxy,
self.author_proxy_proxy,
self.book_proxy_proxy_fk,
]
)
changes = self.get_changes([], to_state)
fk_field = changes["otherapp"][0].operations[0].fields[1][1]
self.assertEqual(
to_state.get_concrete_model_key(fk_field.remote_field.model),
("testapp", "author"),
)
self.assertEqual(fk_field.remote_field.model, "testapp.AAuthorProxyProxy")
# Change AuthorProxy to use MTI. FK still points to AAuthorProxyProxy,
# a proxy of AuthorProxy.
from_state = to_state.clone()
to_state = self.make_project_state(
[
self.author_empty,
self.author_proxy_notproxy,
self.author_proxy_proxy,
self.book_proxy_proxy_fk,
]
)
changes = self.get_changes(from_state, to_state)
# Right number/type of migrations for the AuthorProxy model?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["DeleteModel", "CreateModel"])
# Right number/type of migrations for the Book model with a FK to
# AAuthorProxyProxy?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["AlterField"])
# otherapp should depend on testapp.
self.assertMigrationDependencies(
changes, "otherapp", 0, [("testapp", "auto_1")]
)
# Now, test the pk table and field name.
fk_field = changes["otherapp"][0].operations[0].field
self.assertEqual(
to_state.get_concrete_model_key(fk_field.remote_field.model),
("testapp", "authorproxy"),
)
self.assertEqual(fk_field.remote_field.model, "testapp.AAuthorProxyProxy")
def test_unmanaged_create(self):
"""The autodetector correctly deals with managed models."""
# First, we test adding an unmanaged model
changes = self.get_changes(
[self.author_empty], [self.author_empty, self.author_unmanaged]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, name="AuthorUnmanaged", options={"managed": False}
)
def test_unmanaged_delete(self):
changes = self.get_changes(
[self.author_empty, self.author_unmanaged], [self.author_empty]
)
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["DeleteModel"])
def test_unmanaged_to_managed(self):
# Now, we test turning an unmanaged model into a managed model
changes = self.get_changes(
[self.author_empty, self.author_unmanaged],
[self.author_empty, self.author_unmanaged_managed],
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterModelOptions"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, name="authorunmanaged", options={}
)
def test_managed_to_unmanaged(self):
# Now, we turn managed to unmanaged.
changes = self.get_changes(
[self.author_empty, self.author_unmanaged_managed],
[self.author_empty, self.author_unmanaged],
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterModelOptions"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, name="authorunmanaged", options={"managed": False}
)
def test_unmanaged_custom_pk(self):
"""
#23415 - The autodetector must correctly deal with custom FK on
unmanaged models.
"""
# First, we test the default pk field name
changes = self.get_changes([], [self.author_unmanaged_default_pk, self.book])
# The model the FK on the book model points to.
fk_field = changes["otherapp"][0].operations[0].fields[2][1]
self.assertEqual(fk_field.remote_field.model, "testapp.Author")
# Now, we test the custom pk field name
changes = self.get_changes([], [self.author_unmanaged_custom_pk, self.book])
# The model the FK on the book model points to.
fk_field = changes["otherapp"][0].operations[0].fields[2][1]
self.assertEqual(fk_field.remote_field.model, "testapp.Author")
@override_settings(AUTH_USER_MODEL="thirdapp.CustomUser")
def test_swappable(self):
with isolate_lru_cache(apps.get_swappable_settings_name):
changes = self.get_changes(
[self.custom_user], [self.custom_user, self.author_with_custom_user]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author")
self.assertMigrationDependencies(
changes, "testapp", 0, [("__setting__", "AUTH_USER_MODEL")]
)
def test_swappable_lowercase(self):
model_state = ModelState(
"testapp",
"Document",
[
("id", models.AutoField(primary_key=True)),
(
"owner",
models.ForeignKey(
settings.AUTH_USER_MODEL.lower(),
models.CASCADE,
),
),
],
)
with isolate_lru_cache(apps.get_swappable_settings_name):
changes = self.get_changes([], [model_state])
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Document")
self.assertMigrationDependencies(
changes,
"testapp",
0,
[("__setting__", "AUTH_USER_MODEL")],
)
@override_settings(AUTH_USER_MODEL="thirdapp.CustomUser")
def test_swappable_many_to_many_model_case(self):
document_lowercase = ModelState(
"testapp",
"Document",
[
("id", models.AutoField(primary_key=True)),
("owners", models.ManyToManyField(settings.AUTH_USER_MODEL.lower())),
],
)
document = ModelState(
"testapp",
"Document",
[
("id", models.AutoField(primary_key=True)),
("owners", models.ManyToManyField(settings.AUTH_USER_MODEL)),
],
)
with isolate_lru_cache(apps.get_swappable_settings_name):
changes = self.get_changes(
[self.custom_user, document_lowercase],
[self.custom_user, document],
)
self.assertEqual(len(changes), 0)
def test_swappable_changed(self):
with isolate_lru_cache(apps.get_swappable_settings_name):
before = self.make_project_state([self.custom_user, self.author_with_user])
with override_settings(AUTH_USER_MODEL="thirdapp.CustomUser"):
after = self.make_project_state(
[self.custom_user, self.author_with_custom_user]
)
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterField"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, model_name="author", name="user"
)
fk_field = changes["testapp"][0].operations[0].field
self.assertEqual(fk_field.remote_field.model, "thirdapp.CustomUser")
def test_add_field_with_default(self):
"""#22030 - Adding a field with a default should work."""
changes = self.get_changes([self.author_empty], [self.author_name_default])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AddField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="name")
def test_custom_deconstructible(self):
"""
Two instances which deconstruct to the same value aren't considered a
change.
"""
changes = self.get_changes(
[self.author_name_deconstructible_1], [self.author_name_deconstructible_2]
)
# Right number of migrations?
self.assertEqual(len(changes), 0)
def test_deconstruct_field_kwarg(self):
"""Field instances are handled correctly by nested deconstruction."""
changes = self.get_changes(
[self.author_name_deconstructible_3], [self.author_name_deconstructible_4]
)
self.assertEqual(changes, {})
def test_deconstructible_list(self):
"""Nested deconstruction descends into lists."""
# When lists contain items that deconstruct to identical values, those
# lists should be considered equal for the purpose of detecting state
# changes (even if the original items are unequal).
changes = self.get_changes(
[self.author_name_deconstructible_list_1],
[self.author_name_deconstructible_list_2],
)
self.assertEqual(changes, {})
# Legitimate differences within the deconstructed lists should be
# reported as a change
changes = self.get_changes(
[self.author_name_deconstructible_list_1],
[self.author_name_deconstructible_list_3],
)
self.assertEqual(len(changes), 1)
def test_deconstructible_tuple(self):
"""Nested deconstruction descends into tuples."""
# When tuples contain items that deconstruct to identical values, those
# tuples should be considered equal for the purpose of detecting state
# changes (even if the original items are unequal).
changes = self.get_changes(
[self.author_name_deconstructible_tuple_1],
[self.author_name_deconstructible_tuple_2],
)
self.assertEqual(changes, {})
# Legitimate differences within the deconstructed tuples should be
# reported as a change
changes = self.get_changes(
[self.author_name_deconstructible_tuple_1],
[self.author_name_deconstructible_tuple_3],
)
self.assertEqual(len(changes), 1)
def test_deconstructible_dict(self):
"""Nested deconstruction descends into dict values."""
# When dicts contain items whose values deconstruct to identical
# values, those dicts should be considered equal for the purpose of
# detecting state changes (even if the original values are unequal).
changes = self.get_changes(
[self.author_name_deconstructible_dict_1],
[self.author_name_deconstructible_dict_2],
)
self.assertEqual(changes, {})
# Legitimate differences within the deconstructed dicts should be
# reported as a change
changes = self.get_changes(
[self.author_name_deconstructible_dict_1],
[self.author_name_deconstructible_dict_3],
)
self.assertEqual(len(changes), 1)
def test_nested_deconstructible_objects(self):
"""
Nested deconstruction is applied recursively to the args/kwargs of
deconstructed objects.
"""
# If the items within a deconstructed object's args/kwargs have the
# same deconstructed values - whether or not the items themselves are
# different instances - then the object as a whole is regarded as
# unchanged.
changes = self.get_changes(
[self.author_name_nested_deconstructible_1],
[self.author_name_nested_deconstructible_2],
)
self.assertEqual(changes, {})
# Differences that exist solely within the args list of a deconstructed
# object should be reported as changes
changes = self.get_changes(
[self.author_name_nested_deconstructible_1],
[self.author_name_nested_deconstructible_changed_arg],
)
self.assertEqual(len(changes), 1)
# Additional args should also be reported as a change
changes = self.get_changes(
[self.author_name_nested_deconstructible_1],
[self.author_name_nested_deconstructible_extra_arg],
)
self.assertEqual(len(changes), 1)
# Differences that exist solely within the kwargs dict of a
# deconstructed object should be reported as changes
changes = self.get_changes(
[self.author_name_nested_deconstructible_1],
[self.author_name_nested_deconstructible_changed_kwarg],
)
self.assertEqual(len(changes), 1)
# Additional kwargs should also be reported as a change
changes = self.get_changes(
[self.author_name_nested_deconstructible_1],
[self.author_name_nested_deconstructible_extra_kwarg],
)
self.assertEqual(len(changes), 1)
def test_deconstruct_type(self):
"""
#22951 -- Uninstantiated classes with deconstruct are correctly
returned by deep_deconstruct during serialization.
"""
author = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"name",
models.CharField(
max_length=200,
# IntegerField intentionally not instantiated.
default=models.IntegerField,
),
),
],
)
changes = self.get_changes([], [author])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"])
def test_replace_string_with_foreignkey(self):
"""
#22300 - Adding an FK in the same "spot" as a deleted CharField should
work.
"""
changes = self.get_changes(
[self.author_with_publisher_string],
[self.author_with_publisher, self.publisher],
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes, "testapp", 0, ["CreateModel", "RemoveField", "AddField"]
)
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Publisher")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="publisher_name")
self.assertOperationAttributes(changes, "testapp", 0, 2, name="publisher")
def test_foreign_key_removed_before_target_model(self):
"""
Removing an FK and the model it targets in the same change must remove
the FK field before the model to maintain consistency.
"""
changes = self.get_changes(
[self.author_with_publisher, self.publisher], [self.author_name]
) # removes both the model and FK
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["RemoveField", "DeleteModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="publisher")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="Publisher")
@mock.patch(
"django.db.migrations.questioner.MigrationQuestioner.ask_not_null_addition",
side_effect=AssertionError("Should not have prompted for not null addition"),
)
def test_add_many_to_many(self, mocked_ask_method):
"""
#22435 - Adding a ManyToManyField should not prompt for a default.
"""
changes = self.get_changes(
[self.author_empty, self.publisher], [self.author_with_m2m, self.publisher]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AddField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="publishers")
def test_alter_many_to_many(self):
changes = self.get_changes(
[self.author_with_m2m, self.publisher],
[self.author_with_m2m_blank, self.publisher],
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="publishers")
def test_create_with_through_model(self):
"""
Adding a m2m with a through model and the models that use it should be
ordered correctly.
"""
changes = self.get_changes(
[], [self.author_with_m2m_through, self.publisher, self.contract]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes,
"testapp",
0,
[
"CreateModel",
"CreateModel",
"CreateModel",
"AddField",
],
)
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="Publisher")
self.assertOperationAttributes(changes, "testapp", 0, 2, name="Contract")
self.assertOperationAttributes(
changes, "testapp", 0, 3, model_name="author", name="publishers"
)
def test_create_with_through_model_separate_apps(self):
author_with_m2m_through = ModelState(
"authors",
"Author",
[
("id", models.AutoField(primary_key=True)),
(
"publishers",
models.ManyToManyField(
"testapp.Publisher", through="contract.Contract"
),
),
],
)
contract = ModelState(
"contract",
"Contract",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("authors.Author", models.CASCADE)),
("publisher", models.ForeignKey("testapp.Publisher", models.CASCADE)),
],
)
changes = self.get_changes(
[], [author_with_m2m_through, self.publisher, contract]
)
self.assertNumberMigrations(changes, "testapp", 1)
self.assertNumberMigrations(changes, "contract", 1)
self.assertNumberMigrations(changes, "authors", 2)
self.assertMigrationDependencies(
changes,
"authors",
1,
{("authors", "auto_1"), ("contract", "auto_1"), ("testapp", "auto_1")},
)
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Publisher")
self.assertOperationTypes(changes, "contract", 0, ["CreateModel"])
self.assertOperationAttributes(changes, "contract", 0, 0, name="Contract")
self.assertOperationTypes(changes, "authors", 0, ["CreateModel"])
self.assertOperationTypes(changes, "authors", 1, ["AddField"])
self.assertOperationAttributes(changes, "authors", 0, 0, name="Author")
self.assertOperationAttributes(
changes, "authors", 1, 0, model_name="author", name="publishers"
)
def test_many_to_many_removed_before_through_model(self):
"""
Removing a ManyToManyField and the "through" model in the same change
must remove the field before the model to maintain consistency.
"""
changes = self.get_changes(
[
self.book_with_multiple_authors_through_attribution,
self.author_name,
self.attribution,
],
[self.book_with_no_author, self.author_name],
)
# Remove both the through model and ManyToMany
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(
changes, "otherapp", 0, ["RemoveField", "DeleteModel"]
)
self.assertOperationAttributes(
changes, "otherapp", 0, 0, name="authors", model_name="book"
)
self.assertOperationAttributes(changes, "otherapp", 0, 1, name="Attribution")
def test_many_to_many_removed_before_through_model_2(self):
"""
Removing a model that contains a ManyToManyField and the "through"
model in the same change must remove the field before the model to
maintain consistency.
"""
changes = self.get_changes(
[
self.book_with_multiple_authors_through_attribution,
self.author_name,
self.attribution,
],
[self.author_name],
)
# Remove both the through model and ManyToMany
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(
changes, "otherapp", 0, ["RemoveField", "DeleteModel", "DeleteModel"]
)
self.assertOperationAttributes(
changes, "otherapp", 0, 0, name="authors", model_name="book"
)
self.assertOperationAttributes(changes, "otherapp", 0, 1, name="Attribution")
self.assertOperationAttributes(changes, "otherapp", 0, 2, name="Book")
def test_m2m_w_through_multistep_remove(self):
"""
A model with a m2m field that specifies a "through" model cannot be
removed in the same migration as that through model as the schema will
pass through an inconsistent state. The autodetector should produce two
migrations to avoid this issue.
"""
changes = self.get_changes(
[self.author_with_m2m_through, self.publisher, self.contract],
[self.publisher],
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes,
"testapp",
0,
["RemoveField", "RemoveField", "DeleteModel", "DeleteModel"],
)
self.assertOperationAttributes(
changes, "testapp", 0, 0, name="author", model_name="contract"
)
self.assertOperationAttributes(
changes, "testapp", 0, 1, name="publisher", model_name="contract"
)
self.assertOperationAttributes(changes, "testapp", 0, 2, name="Author")
self.assertOperationAttributes(changes, "testapp", 0, 3, name="Contract")
def test_concrete_field_changed_to_many_to_many(self):
"""
#23938 - Changing a concrete field into a ManyToManyField
first removes the concrete field and then adds the m2m field.
"""
changes = self.get_changes(
[self.author_with_former_m2m], [self.author_with_m2m, self.publisher]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes, "testapp", 0, ["CreateModel", "RemoveField", "AddField"]
)
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Publisher")
self.assertOperationAttributes(
changes, "testapp", 0, 1, name="publishers", model_name="author"
)
self.assertOperationAttributes(
changes, "testapp", 0, 2, name="publishers", model_name="author"
)
def test_many_to_many_changed_to_concrete_field(self):
"""
#23938 - Changing a ManyToManyField into a concrete field
first removes the m2m field and then adds the concrete field.
"""
changes = self.get_changes(
[self.author_with_m2m, self.publisher], [self.author_with_former_m2m]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes, "testapp", 0, ["RemoveField", "DeleteModel", "AddField"]
)
self.assertOperationAttributes(
changes, "testapp", 0, 0, name="publishers", model_name="author"
)
self.assertOperationAttributes(changes, "testapp", 0, 1, name="Publisher")
self.assertOperationAttributes(
changes, "testapp", 0, 2, name="publishers", model_name="author"
)
self.assertOperationFieldAttributes(changes, "testapp", 0, 2, max_length=100)
def test_non_circular_foreignkey_dependency_removal(self):
"""
If two models with a ForeignKey from one to the other are removed at
the same time, the autodetector should remove them in the correct
order.
"""
changes = self.get_changes(
[self.author_with_publisher, self.publisher_with_author], []
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes, "testapp", 0, ["RemoveField", "DeleteModel", "DeleteModel"]
)
self.assertOperationAttributes(
changes, "testapp", 0, 0, name="author", model_name="publisher"
)
self.assertOperationAttributes(changes, "testapp", 0, 1, name="Author")
self.assertOperationAttributes(changes, "testapp", 0, 2, name="Publisher")
def test_alter_model_options(self):
"""Changing a model's options should make a change."""
changes = self.get_changes([self.author_empty], [self.author_with_options])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterModelOptions"])
self.assertOperationAttributes(
changes,
"testapp",
0,
0,
options={
"permissions": [("can_hire", "Can hire")],
"verbose_name": "Authi",
},
)
# Changing them back to empty should also make a change
changes = self.get_changes([self.author_with_options], [self.author_empty])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterModelOptions"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, name="author", options={}
)
def test_alter_model_options_proxy(self):
"""Changing a proxy model's options should also make a change."""
changes = self.get_changes(
[self.author_proxy, self.author_empty],
[self.author_proxy_options, self.author_empty],
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterModelOptions"])
self.assertOperationAttributes(
changes,
"testapp",
0,
0,
name="authorproxy",
options={"verbose_name": "Super Author"},
)
def test_set_alter_order_with_respect_to(self):
"""Setting order_with_respect_to adds a field."""
changes = self.get_changes(
[self.book, self.author_with_book],
[self.book, self.author_with_book_order_wrt],
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterOrderWithRespectTo"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, name="author", order_with_respect_to="book"
)
def test_add_alter_order_with_respect_to(self):
"""
Setting order_with_respect_to when adding the FK too does
things in the right order.
"""
changes = self.get_changes(
[self.author_name], [self.book, self.author_with_book_order_wrt]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes, "testapp", 0, ["AddField", "AlterOrderWithRespectTo"]
)
self.assertOperationAttributes(
changes, "testapp", 0, 0, model_name="author", name="book"
)
self.assertOperationAttributes(
changes, "testapp", 0, 1, name="author", order_with_respect_to="book"
)
def test_remove_alter_order_with_respect_to(self):
"""
Removing order_with_respect_to when removing the FK too does
things in the right order.
"""
changes = self.get_changes(
[self.book, self.author_with_book_order_wrt], [self.author_name]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes, "testapp", 0, ["AlterOrderWithRespectTo", "RemoveField"]
)
self.assertOperationAttributes(
changes, "testapp", 0, 0, name="author", order_with_respect_to=None
)
self.assertOperationAttributes(
changes, "testapp", 0, 1, model_name="author", name="book"
)
def test_add_model_order_with_respect_to(self):
"""
Setting order_with_respect_to when adding the whole model
does things in the right order.
"""
changes = self.get_changes([], [self.book, self.author_with_book_order_wrt])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"])
self.assertOperationAttributes(
changes,
"testapp",
0,
0,
name="Author",
options={"order_with_respect_to": "book"},
)
self.assertNotIn(
"_order",
[name for name, field in changes["testapp"][0].operations[0].fields],
)
def test_add_model_order_with_respect_to_unique_together(self):
changes = self.get_changes(
[],
[
self.book,
ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("book", models.ForeignKey("otherapp.Book", models.CASCADE)),
],
options={
"order_with_respect_to": "book",
"unique_together": {("id", "_order")},
},
),
],
)
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"])
self.assertOperationAttributes(
changes,
"testapp",
0,
0,
name="Author",
options={
"order_with_respect_to": "book",
"unique_together": {("id", "_order")},
},
)
def test_add_model_order_with_respect_to_constraint(self):
after = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("book", models.ForeignKey("otherapp.Book", models.CASCADE)),
],
options={
"order_with_respect_to": "book",
"constraints": [
models.CheckConstraint(
condition=models.Q(_order__gt=1), name="book_order_gt_1"
),
],
},
)
changes = self.get_changes([], [self.book, after])
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes,
"testapp",
0,
["CreateModel"],
)
self.assertOperationAttributes(
changes,
"testapp",
0,
0,
name="Author",
options={
"order_with_respect_to": "book",
"constraints": [
models.CheckConstraint(
condition=models.Q(_order__gt=1), name="book_order_gt_1"
)
],
},
)
def test_add_model_order_with_respect_to_index(self):
after = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("book", models.ForeignKey("otherapp.Book", models.CASCADE)),
],
options={
"order_with_respect_to": "book",
"indexes": [models.Index(fields=["_order"], name="book_order_idx")],
},
)
changes = self.get_changes([], [self.book, after])
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"])
self.assertOperationAttributes(
changes,
"testapp",
0,
0,
name="Author",
options={
"order_with_respect_to": "book",
"indexes": [models.Index(fields=["_order"], name="book_order_idx")],
},
)
def test_set_alter_order_with_respect_to_index_constraint_unique_together(self):
tests = [
(
"AddIndex",
{
"indexes": [
models.Index(fields=["_order"], name="book_order_idx"),
]
},
),
(
"AddConstraint",
{
"constraints": [
models.CheckConstraint(
condition=models.Q(_order__gt=1),
name="book_order_gt_1",
),
]
},
),
("AlterUniqueTogether", {"unique_together": {("id", "_order")}}),
]
for operation, extra_option in tests:
with self.subTest(operation=operation):
after = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("book", models.ForeignKey("otherapp.Book", models.CASCADE)),
],
options={
"order_with_respect_to": "book",
**extra_option,
},
)
changes = self.get_changes(
[self.book, self.author_with_book],
[self.book, after],
)
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes,
"testapp",
0,
[
"AlterOrderWithRespectTo",
operation,
],
)
def test_alter_model_managers(self):
"""
Changing the model managers adds a new operation.
"""
changes = self.get_changes([self.other_pony], [self.other_pony_food])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["AlterModelManagers"])
self.assertOperationAttributes(changes, "otherapp", 0, 0, name="pony")
self.assertEqual(
[name for name, mgr in changes["otherapp"][0].operations[0].managers],
["food_qs", "food_mgr", "food_mgr_kwargs"],
)
self.assertEqual(
changes["otherapp"][0].operations[0].managers[1][1].args, ("a", "b", 1, 2)
)
self.assertEqual(
changes["otherapp"][0].operations[0].managers[2][1].args, ("x", "y", 3, 4)
)
def test_swappable_first_inheritance(self):
"""Swappable models get their CreateModel first."""
changes = self.get_changes([], [self.custom_user, self.aardvark])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "thirdapp", 1)
self.assertOperationTypes(
changes, "thirdapp", 0, ["CreateModel", "CreateModel"]
)
self.assertOperationAttributes(changes, "thirdapp", 0, 0, name="CustomUser")
self.assertOperationAttributes(changes, "thirdapp", 0, 1, name="Aardvark")
def test_default_related_name_option(self):
model_state = ModelState(
"app",
"model",
[
("id", models.AutoField(primary_key=True)),
],
options={"default_related_name": "related_name"},
)
changes = self.get_changes([], [model_state])
self.assertNumberMigrations(changes, "app", 1)
self.assertOperationTypes(changes, "app", 0, ["CreateModel"])
self.assertOperationAttributes(
changes,
"app",
0,
0,
name="model",
options={"default_related_name": "related_name"},
)
altered_model_state = ModelState(
"app",
"Model",
[
("id", models.AutoField(primary_key=True)),
],
)
changes = self.get_changes([model_state], [altered_model_state])
self.assertNumberMigrations(changes, "app", 1)
self.assertOperationTypes(changes, "app", 0, ["AlterModelOptions"])
self.assertOperationAttributes(changes, "app", 0, 0, name="model", options={})
@override_settings(AUTH_USER_MODEL="thirdapp.CustomUser")
def test_swappable_first_setting(self):
"""Swappable models get their CreateModel first."""
with isolate_lru_cache(apps.get_swappable_settings_name):
changes = self.get_changes([], [self.custom_user_no_inherit, self.aardvark])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "thirdapp", 1)
self.assertOperationTypes(
changes, "thirdapp", 0, ["CreateModel", "CreateModel"]
)
self.assertOperationAttributes(changes, "thirdapp", 0, 0, name="CustomUser")
self.assertOperationAttributes(changes, "thirdapp", 0, 1, name="Aardvark")
def test_bases_first(self):
"""Bases of other models come first."""
changes = self.get_changes(
[], [self.aardvark_based_on_author, self.author_name]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel", "CreateModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="Aardvark")
def test_bases_first_mixed_case_app_label(self):
app_label = "MiXedCaseApp"
changes = self.get_changes(
[],
[
ModelState(
app_label,
"owner",
[
("id", models.AutoField(primary_key=True)),
],
),
ModelState(
app_label,
"place",
[
("id", models.AutoField(primary_key=True)),
(
"owner",
models.ForeignKey("MiXedCaseApp.owner", models.CASCADE),
),
],
),
ModelState(app_label, "restaurant", [], bases=("MiXedCaseApp.place",)),
],
)
self.assertNumberMigrations(changes, app_label, 1)
self.assertOperationTypes(
changes,
app_label,
0,
[
"CreateModel",
"CreateModel",
"CreateModel",
],
)
self.assertOperationAttributes(changes, app_label, 0, 0, name="owner")
self.assertOperationAttributes(changes, app_label, 0, 1, name="place")
self.assertOperationAttributes(changes, app_label, 0, 2, name="restaurant")
def test_multiple_bases(self):
"""
Inheriting models doesn't move *_ptr fields into AddField operations.
"""
A = ModelState("app", "A", [("a_id", models.AutoField(primary_key=True))])
B = ModelState("app", "B", [("b_id", models.AutoField(primary_key=True))])
C = ModelState("app", "C", [], bases=("app.A", "app.B"))
D = ModelState("app", "D", [], bases=("app.A", "app.B"))
E = ModelState("app", "E", [], bases=("app.A", "app.B"))
changes = self.get_changes([], [A, B, C, D, E])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "app", 1)
self.assertOperationTypes(
changes,
"app",
0,
["CreateModel", "CreateModel", "CreateModel", "CreateModel", "CreateModel"],
)
self.assertOperationAttributes(changes, "app", 0, 0, name="A")
self.assertOperationAttributes(changes, "app", 0, 1, name="B")
self.assertOperationAttributes(changes, "app", 0, 2, name="C")
self.assertOperationAttributes(changes, "app", 0, 3, name="D")
self.assertOperationAttributes(changes, "app", 0, 4, name="E")
def test_proxy_bases_first(self):
"""Bases of proxies come first."""
changes = self.get_changes(
[], [self.author_empty, self.author_proxy, self.author_proxy_proxy]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes, "testapp", 0, ["CreateModel", "CreateModel", "CreateModel"]
)
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="AuthorProxy")
self.assertOperationAttributes(
changes, "testapp", 0, 2, name="AAuthorProxyProxy"
)
def test_pk_fk_included(self):
"""
A relation used as the primary key is kept as part of CreateModel.
"""
changes = self.get_changes([], [self.aardvark_pk_fk_author, self.author_name])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel", "CreateModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="Aardvark")
def test_first_dependency(self):
"""
A dependency to an app with no migrations uses __first__.
"""
# Load graph
loader = MigrationLoader(connection)
before = self.make_project_state([])
after = self.make_project_state([self.book_migrations_fk])
after.real_apps = {"migrations"}
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes(graph=loader.graph)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["CreateModel"])
self.assertOperationAttributes(changes, "otherapp", 0, 0, name="Book")
self.assertMigrationDependencies(
changes, "otherapp", 0, [("migrations", "__first__")]
)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_last_dependency(self):
"""
A dependency to an app with existing migrations uses the
last migration of that app.
"""
# Load graph
loader = MigrationLoader(connection)
before = self.make_project_state([])
after = self.make_project_state([self.book_migrations_fk])
after.real_apps = {"migrations"}
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes(graph=loader.graph)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["CreateModel"])
self.assertOperationAttributes(changes, "otherapp", 0, 0, name="Book")
self.assertMigrationDependencies(
changes, "otherapp", 0, [("migrations", "0002_second")]
)
def test_alter_fk_before_model_deletion(self):
"""
ForeignKeys are altered _before_ the model they used to
refer to are deleted.
"""
changes = self.get_changes(
[self.author_name, self.publisher_with_author],
[self.aardvark_testapp, self.publisher_with_aardvark_author],
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(
changes, "testapp", 0, ["CreateModel", "AlterField", "DeleteModel"]
)
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Aardvark")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="author")
self.assertOperationAttributes(changes, "testapp", 0, 2, name="Author")
def test_fk_dependency_other_app(self):
"""
#23100 - ForeignKeys correctly depend on other apps' models.
"""
changes = self.get_changes(
[self.author_name, self.book], [self.author_with_book, self.book]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AddField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="book")
self.assertMigrationDependencies(
changes, "testapp", 0, [("otherapp", "__first__")]
)
def test_alter_unique_together_fk_to_m2m(self):
changes = self.get_changes(
[self.author_name, self.book_unique_together],
[
self.author_name,
ModelState(
"otherapp",
"Book",
[
("id", models.AutoField(primary_key=True)),
("author", models.ManyToManyField("testapp.Author")),
("title", models.CharField(max_length=200)),
],
),
],
)
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(
changes, "otherapp", 0, ["AlterUniqueTogether", "RemoveField", "AddField"]
)
self.assertOperationAttributes(
changes, "otherapp", 0, 0, name="book", unique_together=set()
)
self.assertOperationAttributes(
changes, "otherapp", 0, 1, model_name="book", name="author"
)
self.assertOperationAttributes(
changes, "otherapp", 0, 2, model_name="book", name="author"
)
def test_alter_field_to_fk_dependency_other_app(self):
changes = self.get_changes(
[self.author_empty, self.book_with_no_author_fk],
[self.author_empty, self.book],
)
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["AlterField"])
self.assertMigrationDependencies(
changes, "otherapp", 0, [("testapp", "__first__")]
)
def test_circular_dependency_mixed_addcreate(self):
"""
#23315 - The dependency resolver knows to put all CreateModel
before AddField and not become unsolvable.
"""
address = ModelState(
"a",
"Address",
[
("id", models.AutoField(primary_key=True)),
("country", models.ForeignKey("b.DeliveryCountry", models.CASCADE)),
],
)
person = ModelState(
"a",
"Person",
[
("id", models.AutoField(primary_key=True)),
],
)
apackage = ModelState(
"b",
"APackage",
[
("id", models.AutoField(primary_key=True)),
("person", models.ForeignKey("a.Person", models.CASCADE)),
],
)
country = ModelState(
"b",
"DeliveryCountry",
[
("id", models.AutoField(primary_key=True)),
],
)
changes = self.get_changes([], [address, person, apackage, country])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "a", 2)
self.assertNumberMigrations(changes, "b", 1)
self.assertOperationTypes(changes, "a", 0, ["CreateModel", "CreateModel"])
self.assertOperationTypes(changes, "a", 1, ["AddField"])
self.assertOperationTypes(changes, "b", 0, ["CreateModel", "CreateModel"])
@override_settings(AUTH_USER_MODEL="a.Tenant")
def test_circular_dependency_swappable(self):
"""
#23322 - The dependency resolver knows to explicitly resolve
swappable models.
"""
with isolate_lru_cache(apps.get_swappable_settings_name):
tenant = ModelState(
"a",
"Tenant",
[
("id", models.AutoField(primary_key=True)),
("primary_address", models.ForeignKey("b.Address", models.CASCADE)),
],
bases=(AbstractBaseUser,),
)
address = ModelState(
"b",
"Address",
[
("id", models.AutoField(primary_key=True)),
(
"tenant",
models.ForeignKey(settings.AUTH_USER_MODEL, models.CASCADE),
),
],
)
changes = self.get_changes([], [address, tenant])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "a", 2)
self.assertOperationTypes(changes, "a", 0, ["CreateModel"])
self.assertOperationTypes(changes, "a", 1, ["AddField"])
self.assertMigrationDependencies(changes, "a", 0, [])
self.assertMigrationDependencies(
changes, "a", 1, [("a", "auto_1"), ("b", "auto_1")]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "b", 1)
self.assertOperationTypes(changes, "b", 0, ["CreateModel"])
self.assertMigrationDependencies(
changes, "b", 0, [("__setting__", "AUTH_USER_MODEL")]
)
@override_settings(AUTH_USER_MODEL="b.Tenant")
def test_circular_dependency_swappable2(self):
"""
#23322 - The dependency resolver knows to explicitly resolve
swappable models but with the swappable not being the first migrated
model.
"""
with isolate_lru_cache(apps.get_swappable_settings_name):
address = ModelState(
"a",
"Address",
[
("id", models.AutoField(primary_key=True)),
(
"tenant",
models.ForeignKey(settings.AUTH_USER_MODEL, models.CASCADE),
),
],
)
tenant = ModelState(
"b",
"Tenant",
[
("id", models.AutoField(primary_key=True)),
("primary_address", models.ForeignKey("a.Address", models.CASCADE)),
],
bases=(AbstractBaseUser,),
)
changes = self.get_changes([], [address, tenant])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "a", 2)
self.assertOperationTypes(changes, "a", 0, ["CreateModel"])
self.assertOperationTypes(changes, "a", 1, ["AddField"])
self.assertMigrationDependencies(changes, "a", 0, [])
self.assertMigrationDependencies(
changes, "a", 1, [("__setting__", "AUTH_USER_MODEL"), ("a", "auto_1")]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "b", 1)
self.assertOperationTypes(changes, "b", 0, ["CreateModel"])
self.assertMigrationDependencies(changes, "b", 0, [("a", "auto_1")])
@override_settings(AUTH_USER_MODEL="a.Person")
def test_circular_dependency_swappable_self(self):
"""
#23322 - The dependency resolver knows to explicitly resolve
swappable models.
"""
with isolate_lru_cache(apps.get_swappable_settings_name):
person = ModelState(
"a",
"Person",
[
("id", models.AutoField(primary_key=True)),
(
"parent1",
models.ForeignKey(
settings.AUTH_USER_MODEL,
models.CASCADE,
related_name="children",
),
),
],
)
changes = self.get_changes([], [person])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "a", 1)
self.assertOperationTypes(changes, "a", 0, ["CreateModel"])
self.assertMigrationDependencies(changes, "a", 0, [])
@override_settings(AUTH_USER_MODEL="a.User")
def test_swappable_circular_multi_mti(self):
with isolate_lru_cache(apps.get_swappable_settings_name):
parent = ModelState(
"a",
"Parent",
[("user", models.ForeignKey(settings.AUTH_USER_MODEL, models.CASCADE))],
)
child = ModelState("a", "Child", [], bases=("a.Parent",))
user = ModelState("a", "User", [], bases=(AbstractBaseUser, "a.Child"))
changes = self.get_changes([], [parent, child, user])
self.assertNumberMigrations(changes, "a", 1)
self.assertOperationTypes(
changes, "a", 0, ["CreateModel", "CreateModel", "CreateModel", "AddField"]
)
@mock.patch(
"django.db.migrations.questioner.MigrationQuestioner.ask_not_null_addition",
side_effect=AssertionError("Should not have prompted for not null addition"),
)
def test_add_blank_textfield_and_charfield(self, mocked_ask_method):
"""
#23405 - Adding a NOT NULL and blank `CharField` or `TextField`
without default should not prompt for a default.
"""
changes = self.get_changes(
[self.author_empty], [self.author_with_biography_blank]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AddField", "AddField"])
self.assertOperationAttributes(changes, "testapp", 0, 0)
@mock.patch(
"django.db.migrations.questioner.MigrationQuestioner.ask_not_null_addition"
)
def test_add_non_blank_textfield_and_charfield(self, mocked_ask_method):
"""
#23405 - Adding a NOT NULL and non-blank `CharField` or `TextField`
without default should prompt for a default.
"""
changes = self.get_changes(
[self.author_empty], [self.author_with_biography_non_blank]
)
self.assertEqual(mocked_ask_method.call_count, 2)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AddField", "AddField"])
self.assertOperationAttributes(changes, "testapp", 0, 0)
def test_mti_inheritance_model_removal(self):
Animal = ModelState(
"app",
"Animal",
[
("id", models.AutoField(primary_key=True)),
],
)
Dog = ModelState("app", "Dog", [], bases=("app.Animal",))
changes = self.get_changes([Animal, Dog], [Animal])
self.assertNumberMigrations(changes, "app", 1)
self.assertOperationTypes(changes, "app", 0, ["DeleteModel"])
self.assertOperationAttributes(changes, "app", 0, 0, name="Dog")
def test_add_model_with_field_removed_from_base_model(self):
"""
Removing a base field takes place before adding a new inherited model
that has a field with the same name.
"""
before = [
ModelState(
"app",
"readable",
[
("id", models.AutoField(primary_key=True)),
("title", models.CharField(max_length=200)),
],
),
]
after = [
ModelState(
"app",
"readable",
[
("id", models.AutoField(primary_key=True)),
],
),
ModelState(
"app",
"book",
[
("title", models.CharField(max_length=200)),
],
bases=("app.readable",),
),
]
changes = self.get_changes(before, after)
self.assertNumberMigrations(changes, "app", 1)
self.assertOperationTypes(changes, "app", 0, ["RemoveField", "CreateModel"])
self.assertOperationAttributes(
changes, "app", 0, 0, name="title", model_name="readable"
)
self.assertOperationAttributes(changes, "app", 0, 1, name="book")
def test_parse_number(self):
tests = [
("no_number", None),
("0001_initial", 1),
("0002_model3", 2),
("0002_auto_20380101_1112", 2),
("0002_squashed_0003", 3),
("0002_model2_squashed_0003_other4", 3),
("0002_squashed_0003_squashed_0004", 4),
("0002_model2_squashed_0003_other4_squashed_0005_other6", 5),
("0002_custom_name_20380101_1112_squashed_0003_model", 3),
("2_squashed_4", 4),
]
for migration_name, expected_number in tests:
with self.subTest(migration_name=migration_name):
self.assertEqual(
MigrationAutodetector.parse_number(migration_name),
expected_number,
)
def test_add_custom_fk_with_hardcoded_to(self):
class HardcodedForeignKey(models.ForeignKey):
def __init__(self, *args, **kwargs):
kwargs["to"] = "testapp.Author"
super().__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
del kwargs["to"]
return name, path, args, kwargs
book_hardcoded_fk_to = ModelState(
"testapp",
"Book",
[
("author", HardcodedForeignKey(on_delete=models.CASCADE)),
],
)
changes = self.get_changes(
[self.author_empty],
[self.author_empty, book_hardcoded_fk_to],
)
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Book")
@mock.patch(
"django.db.migrations.questioner.MigrationQuestioner.ask_not_null_addition"
)
def test_add_composite_pk(self, mocked_ask_method):
before = [
ModelState(
"app",
"foo",
[
("id", models.AutoField(primary_key=True)),
],
),
]
after = [
ModelState(
"app",
"foo",
[
("pk", models.CompositePrimaryKey("foo_id", "bar_id")),
("id", models.IntegerField()),
],
),
]
changes = self.get_changes(before, after)
self.assertEqual(mocked_ask_method.call_count, 0)
self.assertNumberMigrations(changes, "app", 1)
self.assertOperationTypes(changes, "app", 0, ["AddField", "AlterField"])
self.assertOperationAttributes(
changes,
"app",
0,
0,
name="pk",
model_name="foo",
preserve_default=True,
)
self.assertOperationAttributes(
changes,
"app",
0,
1,
name="id",
model_name="foo",
preserve_default=True,
)
def test_remove_composite_pk(self):
before = [
ModelState(
"app",
"foo",
[
("pk", models.CompositePrimaryKey("foo_id", "bar_id")),
("id", models.IntegerField()),
],
),
]
after = [
ModelState(
"app",
"foo",
[
("id", models.AutoField(primary_key=True)),
],
),
]
changes = self.get_changes(before, after)
self.assertNumberMigrations(changes, "app", 1)
self.assertOperationTypes(changes, "app", 0, ["RemoveField", "AlterField"])
self.assertOperationAttributes(
changes,
"app",
0,
0,
name="pk",
model_name="foo",
)
self.assertOperationAttributes(
changes,
"app",
0,
1,
name="id",
model_name="foo",
preserve_default=True,
)
| AutodetectorTests |
python | getsentry__sentry | src/sentry/testutils/helpers/notifications.py | {
"start": 2027,
"end": 2214
} | class ____(DummyNotification):
def __init__(self, organization, some_value) -> None:
super().__init__(organization)
self.some_value = some_value
| AnotherDummyNotification |
python | django__django | tests/proxy_models/models.py | {
"start": 2800,
"end": 2978
} | class ____(models.Model):
name = models.CharField(max_length=50)
country = models.ForeignKey(Country, models.CASCADE)
def __str__(self):
return self.name
| State |
python | python-attrs__attrs | typing-examples/baseline.py | {
"start": 3150,
"end": 3602
} | class ____:
a: int
b: int
# NG versions of asdict/astuple
attrs.asdict(MatchArgs2(1, 2))
attrs.astuple(MatchArgs2(1, 2))
def accessing_from_attrs() -> None:
"""
Use a function to keep the ns clean.
"""
attrs.converters.optional
attrs.exceptions.FrozenError
attrs.filters.include
attrs.filters.exclude
attrs.setters.frozen
attrs.validators.and_
attrs.cmp_using
@attrs.define(unsafe_hash=True)
| MatchArgs2 |
python | pytorch__pytorch | torch/_functorch/_aot_autograd/descriptors.py | {
"start": 19899,
"end": 20135
} | class ____(AOTInput):
"""The seed for functionalized Philox RNG calls, specifically for forward graph."""
def expr(self) -> str:
return "__philox_forward_seed"
@dataclasses.dataclass(frozen=True)
| PhiloxForwardSeedAOTInput |
python | ray-project__ray | python/ray/llm/_internal/serve/core/protocol.py | {
"start": 639,
"end": 2265
} | class ____(DeploymentProtocol):
"""
This is the common interface between all the llm deployment. All llm deployments
need to implement a sync constructor, an async start method, and check_health method.
"""
def __init__(self):
"""
Constructor takes basic setup that doesn't require async operations.
"""
async def start(self) -> None:
"""
Start the underlying engine. This handles async initialization.
"""
async def chat(
self, request: "ChatCompletionRequest"
) -> AsyncGenerator[Union[str, "ChatCompletionResponse", "ErrorResponse"], None]:
"""
Inferencing to the engine for chat, and return the response.
"""
async def completions(
self, request: "CompletionRequest"
) -> AsyncGenerator[
Union[List[Union[str, "ErrorResponse"]], "CompletionResponse"], None
]:
"""
Inferencing to the engine for completion api, and return the response.
"""
async def check_health(self) -> None:
"""
Check the health of the replica. Does not return anything.
Raise error when the engine is dead and needs to be restarted.
"""
async def reset_prefix_cache(self) -> None:
"""Reset the prefix cache of the underlying engine"""
async def start_profile(self) -> None:
"""Start profiling"""
async def stop_profile(self) -> None:
"""Stop profiling"""
# TODO (Kourosh): This does not belong here.
async def llm_config(self) -> Optional["LLMConfig"]:
"""Get the LLM config"""
| LLMServerProtocol |
python | PyCQA__pylint | tests/functional/d/disallowed_name.py | {
"start": 102,
"end": 285
} | class ____(): # [disallowed-name]
pass
foo = {}.keys() # Should raise disallowed-name once _check_name() is refactored.
foo = 42 # [disallowed-name]
aaa = 42 # [invalid-name]
| foo |
python | apache__airflow | airflow-core/tests/unit/models/test_callback.py | {
"start": 6574,
"end": 7762
} | class ____:
def test_polymorphic_serde(self, session):
"""Test that ExecutorCallback can be serialized and deserialized"""
callback = ExecutorCallback(TEST_SYNC_CALLBACK, fetch_method=CallbackFetchMethod.IMPORT_PATH)
session.add(callback)
session.commit()
retrieved = session.query(Callback).filter_by(id=callback.id).one()
assert isinstance(retrieved, ExecutorCallback)
assert retrieved.fetch_method == CallbackFetchMethod.IMPORT_PATH
assert retrieved.data == TEST_SYNC_CALLBACK.serialize()
assert retrieved.state == CallbackState.PENDING.value
assert retrieved.output is None
assert retrieved.priority_weight == 1
assert retrieved.created_at is not None
assert retrieved.trigger_id is None
def test_queue(self):
callback = ExecutorCallback(TEST_SYNC_CALLBACK, fetch_method=CallbackFetchMethod.DAG_ATTRIBUTE)
assert callback.state == CallbackState.PENDING
callback.queue()
assert callback.state == CallbackState.QUEUED
# Note: class DagProcessorCallback is tested in airflow-core/tests/unit/dag_processing/test_manager.py
| TestExecutorCallback |
python | scipy__scipy | scipy/stats/tests/test_multivariate.py | {
"start": 107954,
"end": 111073
} | class ____:
def test_reproducibility(self):
x = special_ortho_group.rvs(3, random_state=np.random.default_rng(514))
expected = np.array([[-0.93200988, 0.01533561, -0.36210826],
[0.35742128, 0.20446501, -0.91128705],
[0.06006333, -0.97875374, -0.19604469]])
assert_array_almost_equal(x, expected)
def test_invalid_dim(self):
assert_raises(ValueError, special_ortho_group.rvs, None)
assert_raises(ValueError, special_ortho_group.rvs, (2, 2))
assert_raises(ValueError, special_ortho_group.rvs, -1)
assert_raises(ValueError, special_ortho_group.rvs, 2.5)
def test_frozen_matrix(self):
dim = 7
frozen = special_ortho_group(dim)
rvs1 = frozen.rvs(random_state=1234)
rvs2 = special_ortho_group.rvs(dim, random_state=1234)
assert_equal(rvs1, rvs2)
def test_det_and_ortho(self):
xs = [special_ortho_group.rvs(dim)
for dim in range(2,12)
for i in range(3)]
# Test that determinants are always +1
dets = [np.linalg.det(x) for x in xs]
assert_allclose(dets, [1.]*30, rtol=1e-13)
# Test that these are orthogonal matrices
for x in xs:
assert_array_almost_equal(np.dot(x, x.T),
np.eye(x.shape[0]))
def test_haar(self):
# Test that the distribution is constant under rotation
# Every column should have the same distribution
# Additionally, the distribution should be invariant under another rotation
# Generate samples
dim = 5
samples = 1000 # Not too many, or the test takes too long
ks_prob = .05
xs = special_ortho_group.rvs(
dim, size=samples, random_state=np.random.default_rng(513)
)
# Dot a few rows (0, 1, 2) with unit vectors (0, 2, 4, 3),
# effectively picking off entries in the matrices of xs.
# These projections should all have the same distribution,
# establishing rotational invariance. We use the two-sided
# KS test to confirm this.
# We could instead test that angles between random vectors
# are uniformly distributed, but the below is sufficient.
# It is not feasible to consider all pairs, so pick a few.
els = ((0,0), (0,2), (1,4), (2,3))
#proj = {(er, ec): [x[er][ec] for x in xs] for er, ec in els}
proj = {(er, ec): sorted([x[er][ec] for x in xs]) for er, ec in els}
pairs = [(e0, e1) for e0 in els for e1 in els if e0 > e1]
ks_tests = [ks_2samp(proj[p0], proj[p1])[1] for (p0, p1) in pairs]
assert_array_less([ks_prob]*len(pairs), ks_tests)
def test_one_by_one(self):
# Test that the distribution is a delta function at the identity matrix
# when dim=1
assert_allclose(special_ortho_group.rvs(1, size=1000), 1, rtol=1e-13)
def test_zero_by_zero(self):
assert_equal(special_ortho_group.rvs(0, size=4).shape, (4, 0, 0))
| TestSpecialOrthoGroup |
python | huggingface__transformers | src/transformers/models/mllama/image_processing_mllama_fast.py | {
"start": 8291,
"end": 15628
} | class ____(BaseImageProcessorFast):
resample = PILImageResampling.BILINEAR
image_mean = IMAGENET_STANDARD_MEAN
image_std = IMAGENET_STANDARD_STD
size = {"height": 224, "width": 224}
do_resize = True
do_rescale = True
do_normalize = True
do_convert_rgb = True
do_pad = True
max_image_tiles = 4
valid_kwargs = MllamaImageProcessorKwargs
def __init__(self, **kwargs: Unpack[MllamaImageProcessorKwargs]):
super().__init__(**kwargs)
@auto_docstring
def preprocess(self, images: ImageInput, **kwargs: Unpack[MllamaImageProcessorKwargs]) -> BatchFeature:
return super().preprocess(images, **kwargs)
def _prepare_images_structure(self, images: ImageInput, expected_ndims: int = 3) -> ImageInput:
"""
Prepare a nested images structure for processing.
"""
images = self.fetch_images(images)
return make_nested_list_of_images(images, expected_ndims=expected_ndims)
def convert_to_rgb(
self,
image: ImageInput,
) -> ImageInput:
"""
Converts an image to RGB format. Only converts if the image is of type PIL.Image.Image, otherwise returns the image
as is.
Args:
image (ImageInput):
The image to convert.
Returns:
ImageInput: The converted image.
"""
return convert_to_rgb(image)
def pad(
self,
image: "torch.Tensor",
size: dict[str, int],
aspect_ratio: tuple[int, int],
) -> "torch.Tensor":
"""
Pad an image to the `size` x `aspect_ratio`. For example, if size is {height: 224, width: 224} and aspect ratio is
(1, 2), the image will be padded to 224x448.
Args:
image (`torch.Tensor`):
Image to resize.
size (`Dict[str, int]`):
Size of the output image.
aspect_ratio (`Tuple[int, int]`):
The aspect ratio of the image.
Returns:
`torch.Tensor`: The padded image.
"""
image_height, image_width = image.shape[-2:]
num_tiles_height, num_tiles_width = aspect_ratio
padded_height = num_tiles_height * size.height
padded_width = num_tiles_width * size.width
pad_size = (0, 0, padded_width - image_width, padded_height - image_height)
image = F.pad(
image,
pad_size,
fill=0,
)
return image
def resize(
self,
image: "torch.Tensor",
size: SizeDict,
max_image_tiles: int,
interpolation: "F.InterpolationMode" = None,
antialias: bool = True,
) -> Union["torch.Tensor", tuple[int, int]]:
"""
Resizes an image to fit within a tiled canvas while maintaining its aspect ratio.
The optimal canvas size is calculated based on the maximum number of tiles and the tile size.
The function first determines the best tile arrangement for the image, then resizes the image
to fit within this canvas. The resized image and the number of tiles along the height and width
dimensions are returned.
Args:
image (`np.ndarray`):
Image to resize.
size (`Dict[str, int]`):
Size of the output image.
max_image_tiles (`int`):
The maximum number of tiles to split the image into.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use when resizing the image.
Returns:
`Union[np.ndarray, Tuple[int, int]]`: The resized image and a tuple containing the number of tiles
along the height and width dimensions.
"""
image_height, image_width = image.shape[-2:]
tile_size = size.height
canvas_height, canvas_width = get_optimal_tiled_canvas(
image_height=image_height,
image_width=image_width,
max_image_tiles=max_image_tiles,
tile_size=tile_size,
)
num_tiles_height = canvas_height // tile_size
num_tiles_width = canvas_width // tile_size
new_height, new_width = get_image_size_fit_to_canvas(
image_height=image_height,
image_width=image_width,
canvas_height=canvas_height,
canvas_width=canvas_width,
tile_size=tile_size,
)
image = F.resize(image, (new_height, new_width), interpolation=interpolation, antialias=antialias)
return image, (num_tiles_height, num_tiles_width)
def _preprocess(
self,
images: list["torch.Tensor"],
size: SizeDict,
interpolation: Optional["F.InterpolationMode"],
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: Optional[Union[float, list[float]]],
image_std: Optional[Union[float, list[float]]],
max_image_tiles: Optional[int],
return_tensors: Optional[Union[str, TensorType]],
disable_grouping: Optional[bool],
**kwargs,
) -> BatchFeature:
# Group images by size for batched resizing
grouped_images, grouped_images_index = group_images_by_shape(
images, is_nested=True, disable_grouping=disable_grouping
)
split_images_grouped = {}
aspect_ratio_grouped = {}
for shape, stacked_images in grouped_images.items():
stacked_images, aspect_ratio = self.resize(
image=stacked_images, size=size, interpolation=interpolation, max_image_tiles=max_image_tiles
)
stacked_images = self.pad(
image=stacked_images,
size=size,
aspect_ratio=aspect_ratio,
)
num_tiles_height, num_tiles_width = aspect_ratio
aspect_ratio_grouped[shape] = [aspect_ratio] * len(stacked_images)
# same aspect ratio for all images in the batch
split_images = split_to_tiles(stacked_images, num_tiles_height, num_tiles_width)
# Fused rescale and normalize
split_images = self.rescale_and_normalize(
split_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
split_images_grouped[shape] = split_images
split_images = reorder_images(split_images_grouped, grouped_images_index, is_nested=True)
aspect_ratios = reorder_images(aspect_ratio_grouped, grouped_images_index, is_nested=True)
split_images, num_tiles = pad_batches_and_tiles(split_images, max_image_tiles)
aspect_ratio_ids = convert_aspect_ratios_to_ids(aspect_ratios, max_image_tiles=max_image_tiles)
aspect_ratio_mask = build_aspect_ratio_mask(aspect_ratios, max_image_tiles=max_image_tiles)
encoded_inputs = BatchFeature(
data={
"pixel_values": split_images,
"aspect_ratio_ids": aspect_ratio_ids,
"aspect_ratio_mask": aspect_ratio_mask,
},
tensor_type=return_tensors,
)
encoded_inputs["num_tiles"] = num_tiles
return encoded_inputs
__all__ = ["MllamaImageProcessorFast"]
| MllamaImageProcessorFast |
python | huggingface__transformers | tests/models/phi4_multimodal/test_feature_extraction_phi4_multimodal.py | {
"start": 3447,
"end": 12960
} | class ____(SequenceFeatureExtractionTestMixin, unittest.TestCase):
feature_extraction_class = Phi4MultimodalFeatureExtractor
def setUp(self):
self.feat_extract_tester = Phi4MultimodalFeatureExtractionTester(self)
def test_feat_extract_from_and_save_pretrained(self):
feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
saved_file = feat_extract_first.save_pretrained(tmpdirname)[0]
check_json_file_has_correct_format(saved_file)
feat_extract_second = self.feature_extraction_class.from_pretrained(tmpdirname)
dict_first = feat_extract_first.to_dict()
dict_second = feat_extract_second.to_dict()
mel_1 = feat_extract_first.mel_filters
mel_2 = feat_extract_second.mel_filters
self.assertTrue(np.allclose(mel_1, mel_2))
self.assertEqual(dict_first, dict_second)
def test_feat_extract_to_json_file(self):
feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
json_file_path = os.path.join(tmpdirname, "feat_extract.json")
feat_extract_first.to_json_file(json_file_path)
feat_extract_second = self.feature_extraction_class.from_json_file(json_file_path)
dict_first = feat_extract_first.to_dict()
dict_second = feat_extract_second.to_dict()
mel_1 = feat_extract_first.mel_filters
mel_2 = feat_extract_second.mel_filters
self.assertTrue(np.allclose(mel_1, mel_2))
self.assertEqual(dict_first, dict_second)
def test_feat_extract_from_pretrained_kwargs(self):
feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
saved_file = feat_extract_first.save_pretrained(tmpdirname)[0]
check_json_file_has_correct_format(saved_file)
feat_extract_second = self.feature_extraction_class.from_pretrained(
tmpdirname, feature_size=2 * self.feat_extract_dict["feature_size"]
)
mel_1 = feat_extract_first.mel_filters
mel_2 = feat_extract_second.mel_filters
self.assertTrue(2 * mel_1.shape[1] == mel_2.shape[1])
def test_call(self):
# Tests that all call wrap to encode_plus and batch_encode_plus
feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)]
np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs]
pt_speech_inputs = [torch.tensor(speech_input) for speech_input in speech_inputs]
# Test feature size
input_features = feature_extractor(np_speech_inputs, return_tensors="np").audio_input_features
max_audio_len = (1200 - feature_extractor.win_length) // feature_extractor.hop_length + 1
self.assertTrue(input_features.ndim == 3)
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size)
self.assertTrue(input_features.shape[-2] == max_audio_len)
# Test not batched input
encoded_sequences_1 = feature_extractor(pt_speech_inputs[0], return_tensors="np").audio_input_features
encoded_sequences_2 = feature_extractor(np_speech_inputs[0], return_tensors="np").audio_input_features
self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3))
# Test batched
encoded_sequences_1 = feature_extractor(pt_speech_inputs, return_tensors="np").audio_input_features
encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").audio_input_features
for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2):
self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3))
# Test 2-D numpy arrays are batched.
speech_inputs = [floats_list((1, x))[0] for x in (800, 800, 800)]
np_speech_inputs = np.asarray(speech_inputs)
pt_speech_inputs = torch.tensor(speech_inputs)
encoded_sequences_1 = feature_extractor(pt_speech_inputs, return_tensors="np").audio_input_features
encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").audio_input_features
for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2):
self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3))
@require_torch
def test_double_precision_pad(self):
import torch
feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
np_speech_inputs = np.random.rand(100, 32).astype(np.float64)
py_speech_inputs = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
np_processed = feature_extractor.pad([{"audio_input_features": inputs}], return_tensors="np")
self.assertTrue(np_processed.audio_input_features.dtype == np.float32)
pt_processed = feature_extractor.pad([{"audio_input_features": inputs}], return_tensors="pt")
self.assertTrue(pt_processed.audio_input_features.dtype == torch.float32)
def _load_datasamples(self, num_samples):
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
# automatic decoding with librispeech
speech_samples = ds.sort("id")[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def test_torch_integration(self):
# fmt: off
EXPECTED_INPUT_FEATURES = torch.tensor(
[
6.5243, 7.2267, 8.0917, 8.0041, 6.8247, 6.3216, 5.9599, 5.6770,
5.7441, 5.6138, 6.6793, 6.8597, 5.5375, 6.5330, 5.4880, 7.3280,
9.0736, 9.7665, 9.8773, 10.0828, 10.0518, 10.1736, 10.0145, 9.2545,
11.0495, 11.6518, 10.8654, 10.2293, 9.1045, 9.4819,
]
)
# fmt: on
input_speech = self._load_datasamples(1)
feature_extractor = Phi4MultimodalFeatureExtractor()
input_features = feature_extractor(input_speech, return_tensors="pt").audio_input_features
self.assertEqual(input_features.shape, (1, 584, 80))
torch.testing.assert_close(input_features[0, 0, :30], EXPECTED_INPUT_FEATURES, rtol=1e-4, atol=1e-4)
@unittest.mock.patch(
"transformers.models.phi4_multimodal.feature_extraction_phi4_multimodal.is_torch_available", lambda: False
)
def test_numpy_integration(self):
# fmt: off
EXPECTED_INPUT_FEATURES = np.array(
[
6.5242944, 7.226712, 8.091721, 8.004097, 6.824679, 6.3216243,
5.959894, 5.676975, 5.744051, 5.61384, 6.6793485, 6.8597484,
5.5374746, 6.532976, 5.4879804, 7.3279905, 9.073576, 9.766463,
9.877262, 10.082759, 10.051792, 10.173581, 10.0144825, 9.254548,
11.049487, 11.651841, 10.865354, 10.229329, 9.104464, 9.481946,
]
)
# fmt: on
input_speech = self._load_datasamples(1)
feature_extractor = Phi4MultimodalFeatureExtractor()
input_features = feature_extractor(input_speech, return_tensors="np").audio_input_features
self.assertEqual(input_features.shape, (1, 584, 80))
self.assertTrue(np.allclose(input_features[0, 0, :30], EXPECTED_INPUT_FEATURES, atol=1e-4))
@require_torch
def test_torch_integration_batch(self):
# fmt: off
EXPECTED_INPUT_FEATURES = torch.tensor(
[
[
6.5243, 7.2267, 8.0917, 8.0041, 6.8247, 6.3216, 5.9599, 5.6770,
5.7441, 5.6138, 6.6793, 6.8597, 5.5375, 6.5330, 5.4880, 7.3280,
9.0736, 9.7665, 9.8773, 10.0828, 10.0518, 10.1736, 10.0145, 9.2545,
11.0495, 11.6518, 10.8654, 10.2293, 9.1045, 9.4819
],
[
7.5105, 7.9453, 8.6161, 7.7666, 7.2572, 6.8823, 6.3242, 6.1899,
6.9706, 8.0810, 7.3227, 5.8580, 5.4990, 7.7373, 8.5447, 7.7203,
6.3230, 7.1995, 7.1463, 7.3153, 7.4054, 7.2855, 6.9396, 7.0255,
7.3285, 7.2748, 8.0742, 7.3998, 6.4813, 6.7509
],
[
7.7932, 8.1604, 8.7653, 8.2080, 7.2630, 6.4537, 4.8394, 6.3153,
8.0207, 8.3379, 6.0896, 5.7369, 5.8601, 4.7598, 4.8850, 6.2529,
3.9354, 6.1577, 7.9921, 9.6577, 10.1449, 9.1414, 9.3361, 9.0022,
9.2533, 10.0548, 10.4372, 8.8550, 9.1266, 9.9013
]
]
)
# fmt: on
input_speech = self._load_datasamples(3)
feature_extractor = Phi4MultimodalFeatureExtractor()
input_features = feature_extractor(input_speech, return_tensors="pt").audio_input_features
self.assertEqual(input_features.shape, (3, 1247, 80))
print(input_features[:, 0, :30])
torch.testing.assert_close(input_features[:, 0, :30], EXPECTED_INPUT_FEATURES, rtol=1e-4, atol=1e-4)
| Phi4MultimodalFeatureExtractionTest |
python | tensorflow__tensorflow | tensorflow/python/summary/writer/event_file_writer.py | {
"start": 1015,
"end": 5622
} | class ____:
"""Writes `Event` protocol buffers to an event file.
The `EventFileWriter` class creates an event file in the specified directory,
and asynchronously writes Event protocol buffers to the file. The Event file
is encoded using the tfrecord format, which is similar to RecordIO.
This class is not thread-safe.
"""
def __init__(self, logdir, max_queue=10, flush_secs=120,
filename_suffix=None):
"""Creates a `EventFileWriter` and an event file to write to.
On construction the summary writer creates a new event file in `logdir`.
This event file will contain `Event` protocol buffers, which are written to
disk via the add_event method.
The other arguments to the constructor control the asynchronous writes to
the event file:
* `flush_secs`: How often, in seconds, to flush the added summaries
and events to disk.
* `max_queue`: Maximum number of summaries or events pending to be
written to disk before one of the 'add' calls block.
Args:
logdir: A string. Directory where event file will be written.
max_queue: Integer. Size of the queue for pending events and summaries.
flush_secs: Number. How often, in seconds, to flush the
pending events and summaries to disk.
filename_suffix: A string. Every event file's name is suffixed with
`filename_suffix`.
"""
self._logdir = str(logdir)
gfile.MakeDirs(self._logdir)
self._max_queue = max_queue
self._flush_secs = flush_secs
self._flush_complete = threading.Event()
self._flush_sentinel = object()
self._close_sentinel = object()
self._ev_writer = _pywrap_events_writer.EventsWriter(
compat.as_bytes(os.path.join(self._logdir, "events")))
if filename_suffix:
self._ev_writer.InitWithSuffix(compat.as_bytes(filename_suffix))
self._initialize()
self._closed = False
def _initialize(self):
"""Initializes or re-initializes the queue and writer thread.
The EventsWriter itself does not need to be re-initialized explicitly,
because it will auto-initialize itself if used after being closed.
"""
self._event_queue = CloseableQueue(self._max_queue)
self._worker = _EventLoggerThread(self._event_queue, self._ev_writer,
self._flush_secs, self._flush_complete,
self._flush_sentinel,
self._close_sentinel)
self._worker.start()
def get_logdir(self):
"""Returns the directory where event file will be written."""
return self._logdir
def reopen(self):
"""Reopens the EventFileWriter.
Can be called after `close()` to add more events in the same directory.
The events will go into a new events file.
Does nothing if the EventFileWriter was not closed.
"""
if self._closed:
self._initialize()
self._closed = False
def add_event(self, event):
"""Adds an event to the event file.
Args:
event: An `Event` protocol buffer.
"""
if not self._closed:
self._try_put(event)
def _try_put(self, item):
"""Attempts to enqueue an item to the event queue.
If the queue is closed, this will close the EventFileWriter and reraise the
exception that caused the queue closure, if one exists.
Args:
item: the item to enqueue
"""
try:
self._event_queue.put(item)
except QueueClosedError:
self._internal_close()
if self._worker.failure_exc_info:
_, exception, _ = self._worker.failure_exc_info
raise exception from None
def flush(self):
"""Flushes the event file to disk.
Call this method to make sure that all pending events have been written to
disk.
"""
if not self._closed:
# Request a flush operation by enqueuing a sentinel and then waiting for
# the writer thread to mark the flush as complete.
self._flush_complete.clear()
self._try_put(self._flush_sentinel)
self._flush_complete.wait()
if self._worker.failure_exc_info:
self._internal_close()
_, exception, _ = self._worker.failure_exc_info
raise exception
def close(self):
"""Flushes the event file to disk and close the file.
Call this method when you do not need the summary writer anymore.
"""
if not self._closed:
self.flush()
self._try_put(self._close_sentinel)
self._internal_close()
def _internal_close(self):
self._closed = True
self._worker.join()
self._ev_writer.Close()
| EventFileWriter |
python | google__python-fire | fire/test_components.py | {
"start": 5569,
"end": 5656
} | class ____:
def create(self):
x = {}
x['y'] = x
return x
| CircularReference |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/control_flow/py_func_test.py | {
"start": 16744,
"end": 18805
} | class ____(PyFuncTestBase):
"""Encapsulates tests shared between py_func and eager_py_func."""
def verifyPyFuncsNoIncrease(self, make_graph):
ops.reset_default_graph()
gc.collect()
gc.collect()
initial_size = script_ops._py_funcs.size()
for _ in range(1000):
make_graph()
ops.reset_default_graph()
gc.collect()
gc.collect()
self.assertEqual(initial_size, script_ops._py_funcs.size())
def testCleanup(self):
def make_graph():
g = ops.Graph()
with g.as_default():
c = constant_op.constant([1.], dtypes.float32)
_ = script_ops.py_func(lambda x: x + 1, [c], [dtypes.float32])
_ = script_ops.eager_py_func(lambda x: x + 1, [c], [dtypes.float32])
# These ops have a reference to 'c' which has a reference to the
# graph.
# Checks if the functions are being deleted though the graph is
# referenced from them (see #18292).
script_ops.py_func(
lambda x: x + c.shape[0], [c], [dtypes.float32])
script_ops.eager_py_func(
lambda x: x + c.shape[0], [c], [dtypes.float32])
self.verifyPyFuncsNoIncrease(make_graph)
def testCleanupInTfFunction(self):
self.skipTest("b/144098211")
def make_graph():
g = ops.Graph()
with g.as_default():
@def_function.function
def fn():
c = constant_op.constant([1.], dtypes.float32)
_ = script_ops.py_func(lambda x: x + 1, [c], [dtypes.float32])
_ = script_ops.eager_py_func(lambda x: x + 1, [c], [dtypes.float32])
# These ops have a reference to 'c' which has a reference to the
# graph.
# Checks if the functions are being deleted though the graph is
# referenced from them (see #18292).
script_ops.py_func(
lambda x: x + c.shape[0], [c], [dtypes.float32])
script_ops.eager_py_func(
lambda x: x + c.shape[0], [c], [dtypes.float32])
fn()
self.verifyPyFuncsNoIncrease(make_graph)
| PyFuncAndEagerPyFuncTest |
python | django-import-export__django-import-export | tests/core/admin.py | {
"start": 2401,
"end": 2497
} | class ____(ImportExportModelAdmin, ExportActionModelAdmin, admin.ModelAdmin):
pass
| AuthorAdmin |
python | Textualize__textual | tests/command_palette/test_interaction.py | {
"start": 349,
"end": 2190
} | class ____(App[None]):
COMMANDS = {SimpleSource}
def on_mount(self) -> None:
self.action_command_palette()
async def test_initial_list_no_highlight() -> None:
"""When the list initially appears, the first item is highlghted."""
async with CommandPaletteApp().run_test() as pilot:
assert CommandPalette.is_open(pilot.app)
assert pilot.app.screen.query_one(CommandList).visible is False
await pilot.press("a")
assert pilot.app.screen.query_one(CommandList).visible is True
assert pilot.app.screen.query_one(CommandList).highlighted == 0
async def test_down_arrow_selects_an_item() -> None:
"""Typing in a search value then pressing down should select a command."""
async with CommandPaletteApp().run_test() as pilot:
assert CommandPalette.is_open(pilot.app)
assert pilot.app.screen.query_one(CommandList).visible is False
await pilot.press("a")
assert pilot.app.screen.query_one(CommandList).visible is True
assert pilot.app.screen.query_one(CommandList).highlighted == 0
await pilot.press("down")
assert pilot.app.screen.query_one(CommandList).highlighted == 1
async def test_enter_selects_an_item() -> None:
"""Typing in a search value then pressing enter should dismiss the command palette."""
async with CommandPaletteApp().run_test() as pilot:
assert CommandPalette.is_open(pilot.app)
assert pilot.app.screen.query_one(CommandList).visible is False
await pilot.press("a")
assert pilot.app.screen.query_one(CommandList).visible is True
assert pilot.app.screen.query_one(CommandList).highlighted == 0
await pilot.press("enter")
assert not CommandPalette.is_open(pilot.app)
assert not pilot.app.screen.query(CommandList)
| CommandPaletteApp |
python | getsentry__sentry | src/sentry/auth/providers/saml2/provider.py | {
"start": 12781,
"end": 12863
} | class ____(TypedDict):
url: str
binding: NotRequired[str]
| _SamlConfigService |
python | realpython__materials | python-constants/file_handler/file_handler.py | {
"start": 56,
"end": 290
} | class ____:
def __init__(self, file, reader=DEFAULT_READER):
self._file = file
self._reader = reader
def read(self):
self._reader.read(self._file)
# FileHandler implementation goes here...
| FileHandler |
python | getsentry__sentry | tests/sentry/api/endpoints/test_project_repo_path_parsing.py | {
"start": 1671,
"end": 5183
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.integration, self.oi = self.create_provider_integration_for(
self.organization,
self.user,
provider="github",
name="getsentry",
external_id="1234",
metadata={"domain_name": "github.com/getsentry"},
)
self.repo = self.create_repo(
project=self.project,
name="getsentry/sentry",
provider="integrations:github",
integration_id=self.integration.id,
url="https://github.com/getsentry/sentry",
)
def test_basic(self) -> None:
serializer = PathMappingSerializer(
context={"organization_id": self.organization.id},
data={
"source_url": "https://github.com/getsentry/sentry/blob/random.py",
"stack_path": "/random.py",
},
)
assert serializer.is_valid()
assert serializer.data["stack_path"] == "/random.py"
assert serializer.data["source_url"] == "https://github.com/getsentry/sentry/blob/random.py"
def test_window_stack_path(self) -> None:
serializer = PathMappingSerializer(
context={"organization_id": self.organization.id},
data={
"source_url": "https://github.com/getsentry/sentry/blob/duck.py",
"stack_path": "C:\\duck.py",
},
)
assert serializer.is_valid()
assert serializer.data["stack_path"] == "C:\\duck.py"
assert serializer.data["source_url"] == "https://github.com/getsentry/sentry/blob/duck.py"
def test_wrong_file(self) -> None:
serializer = PathMappingSerializer(
context={"organization_id": self.organization.id},
data={
"source_url": "https://github.com/getsentry/sentry/blob/random.py",
"stack_path": "/badfile.py",
},
)
assert not serializer.is_valid()
assert (
serializer.errors["sourceUrl"][0]
== "Source code URL points to a different file than the stack trace"
)
def test_no_integration(self) -> None:
new_org = self.create_organization()
serializer = PathMappingSerializer(
context={"organization_id": new_org.id},
data={
"source_url": "https://github.com/getsentry/sentry/blob/capybaras_and_chameleons.py",
"stack_path": "/capybaras_and_chameleons.py",
},
)
assert not serializer.is_valid()
assert serializer.errors["sourceUrl"][0] == "Could not find integration"
def test_no_repo(self) -> None:
new_org = self.create_organization()
self.integration, self.oi = self.create_provider_integration_for(
new_org,
self.user,
provider="github",
name="getsentry",
external_id="1235",
metadata={"domain_name": "github.com/getsentry"},
)
serializer = PathMappingSerializer(
context={"organization_id": new_org.id},
data={
"source_url": "https://github.com/getsentry/sentry/blob/capybaras_and_chameleons.py",
"stack_path": "/capybaras_and_chameleons.py",
},
)
assert not serializer.is_valid()
assert serializer.errors["sourceUrl"][0] == "Could not find repo"
| PathMappingSerializerTest |
python | wepe__MachineLearning | DeepLearning Tutorials/FaceRecognition_CNN(olivettifaces)/train_CNN_olivettifaces.py | {
"start": 4739,
"end": 13916
} | class ____(object):
def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)):
assert image_shape[1] == filter_shape[1]
self.input = input
fan_in = numpy.prod(filter_shape[1:])
fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) /
numpy.prod(poolsize))
# initialize weights with random weights
W_bound = numpy.sqrt(6. / (fan_in + fan_out))
self.W = theano.shared(
numpy.asarray(
rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype=theano.config.floatX
),
borrow=True
)
# the bias is a 1D tensor -- one bias per output feature map
b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX)
self.b = theano.shared(value=b_values, borrow=True)
# 卷积
conv_out = conv.conv2d(
input=input,
filters=self.W,
filter_shape=filter_shape,
image_shape=image_shape
)
# 子采样
pooled_out = downsample.max_pool_2d(
input=conv_out,
ds=poolsize,
ignore_border=True
)
self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
# store parameters of this layer
self.params = [self.W, self.b]
#保存训练参数的函数
def save_params(param1,param2,param3,param4):
import cPickle
write_file = open('params.pkl', 'wb')
cPickle.dump(param1, write_file, -1)
cPickle.dump(param2, write_file, -1)
cPickle.dump(param3, write_file, -1)
cPickle.dump(param4, write_file, -1)
write_file.close()
"""
上面定义好了CNN的一些基本构件,下面的函数将CNN应用于olivettifaces这个数据集,CNN的模型基于LeNet。
采用的优化算法是批量随机梯度下降算法,minibatch SGD,所以下面很多参数都带有batch_size,比如image_shape=(batch_size, 1, 57, 47)
可以设置的参数有:
batch_size,但应注意n_train_batches、n_valid_batches、n_test_batches的计算都依赖于batch_size
nkerns=[5, 10]即第一二层的卷积核个数可以设置
全连接层HiddenLayer的输出神经元个数n_out可以设置,要同时更改分类器的输入n_in
另外,还有一个很重要的就是学习速率learning_rate.
"""
def evaluate_olivettifaces(learning_rate=0.05, n_epochs=200,
dataset='olivettifaces.gif',
nkerns=[5, 10], batch_size=40):
#随机数生成器,用于初始化参数
rng = numpy.random.RandomState(23455)
#加载数据
datasets = load_data(dataset)
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
#计算各数据集的batch个数
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
n_test_batches = test_set_x.get_value(borrow=True).shape[0]
n_train_batches /= batch_size
n_valid_batches /= batch_size
n_test_batches /= batch_size
#定义几个变量,x代表人脸数据,作为layer0的输入
index = T.lscalar()
x = T.matrix('x')
y = T.ivector('y')
######################
#建立CNN模型:
#input+layer0(LeNetConvPoolLayer)+layer1(LeNetConvPoolLayer)+layer2(HiddenLayer)+layer3(LogisticRegression)
######################
print '... building the model'
# Reshape matrix of rasterized images of shape (batch_size, 57 * 47)
# to a 4D tensor, compatible with our LeNetConvPoolLayer
# (57, 47) is the size of images.
layer0_input = x.reshape((batch_size, 1, 57, 47))
# 第一个卷积+maxpool层
# 卷积后得到:(57-5+1 , 47-5+1) = (53, 43)
# maxpooling后得到: (53/2, 43/2) = (26, 21),因为忽略了边界
# 4D output tensor is thus of shape (batch_size, nkerns[0], 26, 21)
layer0 = LeNetConvPoolLayer(
rng,
input=layer0_input,
image_shape=(batch_size, 1, 57, 47),
filter_shape=(nkerns[0], 1, 5, 5),
poolsize=(2, 2)
)
# 第二个卷积+maxpool层,输入是上层的输出,即(batch_size, nkerns[0], 26, 21)
# 卷积后得到:(26-5+1 , 21-5+1) = (22, 17)
# maxpooling后得到: (22/2, 17/2) = (11, 8),因为忽略了边界
# 4D output tensor is thus of shape (batch_size, nkerns[1], 11, 8)
layer1 = LeNetConvPoolLayer(
rng,
input=layer0.output,
image_shape=(batch_size, nkerns[0], 26, 21),
filter_shape=(nkerns[1], nkerns[0], 5, 5),
poolsize=(2, 2)
)
# HiddenLayer全连接层,它的输入的大小是(batch_size, num_pixels),也就是说要将每个样本经layer0、layer1后得到的特征图整成一个一维的长向量,
#有batch_size个样本,故输入的大小为(batch_size, num_pixels),每一行是一个样本的长向量
#因此将上一层的输出(batch_size, nkerns[1], 11, 8)转化为(batch_size, nkerns[1] * 11* 8),用flatten
layer2_input = layer1.output.flatten(2)
layer2 = HiddenLayer(
rng,
input=layer2_input,
n_in=nkerns[1] * 11 * 8,
n_out=2000, #全连接层输出神经元的个数,自己定义的,可以根据需要调节
activation=T.tanh
)
#分类器
layer3 = LogisticRegression(input=layer2.output, n_in=2000, n_out=40) #n_in等于全连接层的输出,n_out等于40个类别
###############
# 定义优化算法的一些基本要素:代价函数,训练、验证、测试model、参数更新规则(即梯度下降)
###############
# 代价函数
cost = layer3.negative_log_likelihood(y)
test_model = theano.function(
[index],
layer3.errors(y),
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size],
y: test_set_y[index * batch_size: (index + 1) * batch_size]
}
)
validate_model = theano.function(
[index],
layer3.errors(y),
givens={
x: valid_set_x[index * batch_size: (index + 1) * batch_size],
y: valid_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# 所有参数
params = layer3.params + layer2.params + layer1.params + layer0.params
#各个参数的梯度
grads = T.grad(cost, params)
#参数更新规则
updates = [
(param_i, param_i - learning_rate * grad_i)
for param_i, grad_i in zip(params, grads)
]
#train_model在训练过程中根据MSGD优化更新参数
train_model = theano.function(
[index],
cost,
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
###############
# 训练CNN阶段,寻找最优的参数。
###############
print '... training'
#在LeNet5中,batch_size=500,n_train_batches=50000/500=100,patience=10000
#在olivettifaces中,batch_size=40,n_train_batches=320/40=8, paticence可以相应地设置为800,这个可以根据实际情况调节,调大一点也无所谓
patience = 800
patience_increase = 2
improvement_threshold = 0.99
validation_frequency = min(n_train_batches, patience / 2)
best_validation_loss = numpy.inf
best_iter = 0
test_score = 0.
start_time = time.clock()
epoch = 0
done_looping = False
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in xrange(n_train_batches):
iter = (epoch - 1) * n_train_batches + minibatch_index
if iter % 100 == 0:
print 'training @ iter = ', iter
cost_ij = train_model(minibatch_index)
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i) for i
in xrange(n_valid_batches)]
this_validation_loss = numpy.mean(validation_losses)
print('epoch %i, minibatch %i/%i, validation error %f %%' %
(epoch, minibatch_index + 1, n_train_batches,
this_validation_loss * 100.))
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if this_validation_loss < best_validation_loss * \
improvement_threshold:
patience = max(patience, iter * patience_increase)
# save best validation score and iteration number
best_validation_loss = this_validation_loss
best_iter = iter
save_params(layer0.params,layer1.params,layer2.params,layer3.params)#保存参数
# test it on the test set
test_losses = [
test_model(i)
for i in xrange(n_test_batches)
]
test_score = numpy.mean(test_losses)
print((' epoch %i, minibatch %i/%i, test error of '
'best model %f %%') %
(epoch, minibatch_index + 1, n_train_batches,
test_score * 100.))
if patience <= iter:
done_looping = True
break
end_time = time.clock()
print('Optimization complete.')
print('Best validation score of %f %% obtained at iteration %i, '
'with test performance %f %%' %
(best_validation_loss * 100., best_iter + 1, test_score * 100.))
print >> sys.stderr, ('The code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.))
if __name__ == '__main__':
evaluate_olivettifaces()
| LeNetConvPoolLayer |
python | getsentry__sentry | src/sentry/hybridcloud/rpc/service.py | {
"start": 17724,
"end": 26602
} | class ____:
region: Region | None
service_name: str
method_name: str
serial_arguments: ArgumentDict
@property
def address(self) -> str:
if self.region is None:
if not settings.SENTRY_CONTROL_ADDRESS:
raise RpcServiceSetupException(
self.service_name, self.method_name, "Control silo address is not configured"
)
return settings.SENTRY_CONTROL_ADDRESS
else:
if not self.region.address:
raise RpcServiceSetupException(
self.service_name,
self.method_name,
f"Address for region {self.region.name!r} is not configured",
)
return self.region.address
@property
def path(self) -> str:
return django.urls.reverse(
"sentry-api-0-rpc-service",
kwargs={"service_name": self.service_name, "method_name": self.method_name},
)
def dispatch(self, use_test_client: bool = False) -> Any:
serial_response = self._send_to_remote_silo(use_test_client)
return_value = serial_response["value"]
service, _ = _look_up_service_method(self.service_name, self.method_name)
return service.deserialize_rpc_response(self.method_name, return_value)
def _metrics_tags(self, **additional_tags: str | int) -> Mapping[str, str | int | None]:
return dict(
rpc_destination_region=self.region.name if self.region else "control",
rpc_method=f"{self.service_name}.{self.method_name}",
**additional_tags,
)
def get_method_retry_count(self) -> int:
retry_key = f"{self.service_name}.{self.method_name}"
try:
retry_counts_map = options.get("hybridcloud.rpc.method_retry_overrides")
assert isinstance(
retry_counts_map, dict
), "An invalid RPC retry override option was set"
if retry_key in retry_counts_map:
return int(retry_counts_map[retry_key])
except Exception:
# Either we don't have an override option set correctly, or the
# value set for the override is invalid
sentry_sdk.capture_exception()
return options.get("hybridcloud.rpc.retries")
def get_method_timeout(self) -> float:
timeout_key = f"{self.service_name}.{self.method_name}"
try:
timeout_overrides_map = options.get("hybridcloud.rpc.method_timeout_overrides")
assert isinstance(
timeout_overrides_map, dict
), "An invalid RPC timeout override option was set"
if timeout_key in timeout_overrides_map:
return float(timeout_overrides_map[timeout_key])
except Exception:
# Either we don't have an override option set correctly, or the
# value set for the override is invalid
sentry_sdk.capture_exception()
return settings.RPC_TIMEOUT
def _send_to_remote_silo(self, use_test_client: bool) -> Any:
request_body = {
"meta": {}, # reserved for future use
"args": self.serial_arguments,
}
data = json.dumps(request_body).encode(_RPC_CONTENT_CHARSET)
signature = generate_request_signature(self.path, data)
headers = {
"Content-Type": f"application/json; charset={_RPC_CONTENT_CHARSET}",
"Authorization": f"Rpcsignature {signature}",
}
with self._open_request_context():
self._check_disabled()
if use_test_client:
response = self._fire_test_request(headers, data)
else:
response = self._fire_request(headers, data)
metrics.incr(
"hybrid_cloud.dispatch_rpc.response_code",
tags=self._metrics_tags(status=response.status_code),
)
if response.status_code == 200:
return response.json()
self._raise_from_response_status_error(response)
@contextmanager
def _open_request_context(self) -> Generator[None]:
timer = metrics.timer("hybrid_cloud.dispatch_rpc.duration", tags=self._metrics_tags())
span = sentry_sdk.start_span(
op="hybrid_cloud.dispatch_rpc",
name=f"rpc to {self.service_name}.{self.method_name}",
)
with span, timer:
yield
def _remote_exception(self, message: str) -> RpcRemoteException:
return RpcRemoteException(self.service_name, self.method_name, message)
def _raise_from_response_status_error(self, response: requests.Response) -> NoReturn:
rpc_method = f"{self.service_name}.{self.method_name}"
scope = sentry_sdk.get_isolation_scope()
scope.set_tag("rpc_method", rpc_method)
scope.set_tag("rpc_status_code", response.status_code)
if in_test_environment():
if response.status_code == 500:
raise self._remote_exception(
f"Error invoking rpc at {self.path!r}: check error logs for more details"
)
detail = response.json()["detail"]
raise self._remote_exception(
f"Error ({response.status_code} status) invoking rpc at {self.path!r}: {detail}"
)
# Careful not to reveal too much information in production
if response.status_code == 403:
raise self._remote_exception("Unauthorized service access")
if response.status_code == 400:
logger.warning(
"rpc.bad_request",
extra={
"rpc_method": rpc_method,
"error": response.content.decode("utf8"),
},
)
raise self._remote_exception("Invalid service request")
raise self._remote_exception(f"Service unavailable ({response.status_code} status)")
def _fire_test_request(self, headers: Mapping[str, str], data: bytes) -> Any:
from django.test import Client
from sentry.db.postgres.transactions import in_test_assert_no_transaction
in_test_assert_no_transaction(
f"remote service method to {self.path} called inside transaction! Move service calls to outside of transactions."
)
if self.region:
target_mode = SiloMode.REGION
else:
target_mode = SiloMode.CONTROL
with (
SingleProcessSiloModeState.exit(),
SingleProcessSiloModeState.enter(target_mode, self.region),
):
extra: Mapping[str, Any] = {
f"HTTP_{k.replace('-', '_').upper()}": v for k, v in headers.items()
}
return Client().post(self.path, data, headers["Content-Type"], **extra)
def _fire_request(self, headers: MutableMapping[str, str], data: bytes) -> requests.Response:
retry_count = self.get_method_retry_count()
retry_adapter = HTTPAdapter(
max_retries=Retry(
total=retry_count,
backoff_factor=0.1,
status_forcelist=[503],
allowed_methods=["POST"],
)
)
http = requests.Session()
http.mount("http://", retry_adapter)
http.mount("https://", retry_adapter)
# TODO: Performance considerations (persistent connections, pooling, etc.)?
url = self.address + self.path
timeout = self.get_method_timeout()
try:
return http.post(url, headers=headers, data=data, timeout=timeout)
except requests.exceptions.ConnectionError as e:
metrics.incr(
"hybrid_cloud.dispatch_rpc.failure",
tags=self._metrics_tags(kind="connectionerror"),
)
raise self._remote_exception("RPC Connection failed") from e
except requests.exceptions.RetryError as e:
metrics.incr(
"hybrid_cloud.dispatch_rpc.failure",
tags=self._metrics_tags(kind="retryerror"),
)
raise self._remote_exception("RPC failed, max retries reached.") from e
except requests.exceptions.Timeout as e:
metrics.incr(
"hybrid_cloud.dispatch_rpc.failure",
tags=self._metrics_tags(kind="timeout"),
)
raise self._remote_exception(f"Timeout of {settings.RPC_TIMEOUT} exceeded") from e
def _check_disabled(self) -> None:
if disabled_service_methods := options.get("hybrid_cloud.rpc.disabled-service-methods"):
service_method = f"{self.service_name}.{self.method_name}"
if service_method in disabled_service_methods:
raise RpcDisabledException(f"RPC {service_method} disabled")
| _RemoteSiloCall |
python | ray-project__ray | python/ray/util/spark/databricks_hook.py | {
"start": 2652,
"end": 8866
} | class ____(RayOnSparkStartHook):
def get_default_temp_root_dir(self):
return _DATABRICKS_DEFAULT_TMP_ROOT_DIR
def on_ray_dashboard_created(self, port):
display_databricks_driver_proxy_url(
get_spark_session().sparkContext, port, "Ray Cluster Dashboard"
)
def on_cluster_created(self, ray_cluster_handler):
db_api_entry = get_db_entry_point()
if self.is_global:
# Disable auto shutdown if
# 1) autoscaling enabled
# because in autoscaling mode, background spark job will be killed
# automatically when ray cluster is idle.
# 2) global mode cluster
# Because global mode cluster is designed to keep running until
# user request to shut down it, and global mode cluster is shared
# by other users, the code here cannot track usage from other users
# so that we don't know whether it is safe to shut down the global
# cluster automatically.
auto_shutdown_minutes = 0
else:
auto_shutdown_minutes = float(
os.environ.get(DATABRICKS_RAY_ON_SPARK_AUTOSHUTDOWN_MINUTES, "30")
)
if auto_shutdown_minutes == 0:
_logger.info(
"The Ray cluster will keep running until you manually detach the "
"Databricks notebook or call "
"`ray.util.spark.shutdown_ray_cluster()`."
)
return
if auto_shutdown_minutes < 0:
raise ValueError(
"You must set "
f"'{DATABRICKS_RAY_ON_SPARK_AUTOSHUTDOWN_MINUTES}' "
"to a value >= 0."
)
try:
db_api_entry.getIdleTimeMillisSinceLastNotebookExecution()
except Exception:
_logger.warning(
"Failed to retrieve idle time since last notebook execution, "
"so that we cannot automatically shut down Ray cluster when "
"Databricks notebook is inactive for the specified minutes. "
"You need to manually detach Databricks notebook "
"or call `ray.util.spark.shutdown_ray_cluster()` to shut down "
"Ray cluster on spark."
)
return
_logger.info(
"The Ray cluster will be shut down automatically if you don't run "
"commands on the Databricks notebook for "
f"{auto_shutdown_minutes} minutes. You can change the "
"auto-shutdown minutes by setting "
f"'{DATABRICKS_RAY_ON_SPARK_AUTOSHUTDOWN_MINUTES}' environment "
"variable, setting it to 0 means that the Ray cluster keeps running "
"until you manually call `ray.util.spark.shutdown_ray_cluster()` or "
"detach Databricks notebook."
)
def auto_shutdown_watcher():
auto_shutdown_millis = auto_shutdown_minutes * 60 * 1000
while True:
if ray_cluster_handler.is_shutdown:
# The cluster is shut down. The watcher thread exits.
return
idle_time = db_api_entry.getIdleTimeMillisSinceLastNotebookExecution()
if idle_time > auto_shutdown_millis:
from ray.util.spark import cluster_init
with cluster_init._active_ray_cluster_rwlock:
if ray_cluster_handler is cluster_init._active_ray_cluster:
cluster_init.shutdown_ray_cluster()
return
time.sleep(DATABRICKS_AUTO_SHUTDOWN_POLL_INTERVAL_SECONDS)
threading.Thread(target=auto_shutdown_watcher, daemon=True).start()
def on_spark_job_created(self, job_group_id):
db_api_entry = get_db_entry_point()
db_api_entry.registerBackgroundSparkJobGroup(job_group_id)
def custom_environment_variables(self):
conf = {
**super().custom_environment_variables(),
# Hardcode `GLOO_SOCKET_IFNAME` to `eth0` for Databricks runtime.
# Torch on DBR does not reliably detect the correct interface to use,
# and ends up selecting the loopback interface, breaking cross-node
# commnication.
"GLOO_SOCKET_IFNAME": "eth0",
# 'DISABLE_MLFLOW_INTEGRATION' is the environmental variable to disable
# huggingface transformers MLflow integration,
# it doesn't work well in Databricks runtime,
# So disable it by default.
"DISABLE_MLFLOW_INTEGRATION": "TRUE",
}
if verify_databricks_auth_env():
conf[DATABRICKS_HOST] = os.environ[DATABRICKS_HOST]
if DATABRICKS_TOKEN in os.environ:
# PAT auth
conf[DATABRICKS_TOKEN] = os.environ[DATABRICKS_TOKEN]
else:
# OAuth
conf[DATABRICKS_CLIENT_ID] = os.environ[DATABRICKS_CLIENT_ID]
conf[DATABRICKS_CLIENT_SECRET] = os.environ[DATABRICKS_CLIENT_SECRET]
else:
warn_msg = (
"MLflow support is not correctly configured within Ray tasks."
"To enable MLflow integration, "
"you need to set environmental variables DATABRICKS_HOST + "
"DATABRICKS_TOKEN, or set environmental variables "
"DATABRICKS_HOST + DATABRICKS_CLIENT_ID + DATABRICKS_CLIENT_SECRET "
"before calling `ray.util.spark.setup_ray_cluster`, these variables "
"are used to set up authentication with Databricks MLflow "
"service. For details, you can refer to Databricks documentation at "
"<a href='https://docs.databricks.com/en/dev-tools/auth/pat.html'>"
"Databricks PAT auth</a> or "
"<a href='https://docs.databricks.com/en/dev-tools/auth/"
"oauth-m2m.html'>Databricks OAuth</a>."
)
get_databricks_display_html_function()(
f"<b style='color:red;'>{warn_msg}<br></b>"
)
return conf
| DefaultDatabricksRayOnSparkStartHook |
python | redis__redis-py | tests/test_scenario/maint_notifications_helpers.py | {
"start": 2004,
"end": 13386
} | class ____:
@staticmethod
def get_cluster_nodes_info(
fault_injector: FaultInjectorClient,
endpoint_config: Dict[str, Any],
timeout: int = 60,
) -> Dict[str, Any]:
"""Get cluster nodes information from Redis Enterprise."""
try:
# Use rladmin status to get node information
bdb_id = endpoint_config.get("bdb_id")
get_status_action = ActionRequest(
action_type=ActionType.EXECUTE_RLADMIN_COMMAND,
parameters={
"rladmin_command": "status",
"bdb_id": bdb_id,
},
)
trigger_action_result = fault_injector.trigger_action(get_status_action)
action_id = trigger_action_result.get("action_id")
if not action_id:
raise ValueError(
f"Failed to trigger get cluster status action for bdb_id {bdb_id}: {trigger_action_result}"
)
action_status_check_response = fault_injector.get_operation_result(
action_id, timeout=timeout
)
logging.info(
f"Completed cluster nodes info reading: {action_status_check_response}"
)
return action_status_check_response
except Exception as e:
pytest.fail(f"Failed to get cluster nodes info: {e}")
@staticmethod
def find_target_node_and_empty_node(
fault_injector: FaultInjectorClient,
endpoint_config: Dict[str, Any],
) -> Tuple[str, str]:
"""Find the node with master shards and the node with no shards.
Returns:
tuple: (target_node, empty_node) where target_node has master shards
and empty_node has no shards
"""
cluster_info = ClusterOperations.get_cluster_nodes_info(
fault_injector, endpoint_config
)
output = cluster_info.get("output", {}).get("output", "")
if not output:
raise ValueError("No cluster status output found")
# Parse the sections to find nodes with master shards and nodes with no shards
lines = output.split("\n")
shards_section_started = False
nodes_section_started = False
# Get all node IDs from CLUSTER NODES section
all_nodes = set()
nodes_with_any_shards = set() # Nodes with shards from ANY database
nodes_with_target_db_shards = set() # Nodes with shards from target database
master_nodes = set() # Master nodes for target database only
for line in lines:
line = line.strip()
# Start of CLUSTER NODES section
if line.startswith("CLUSTER NODES:"):
nodes_section_started = True
continue
elif line.startswith("DATABASES:"):
nodes_section_started = False
continue
elif nodes_section_started and line and not line.startswith("NODE:ID"):
# Parse node line: node:1 master 10.0.101.206 ... (ignore the role)
parts = line.split()
if len(parts) >= 1:
node_id = parts[0].replace("*", "") # Remove * prefix if present
all_nodes.add(node_id)
# Start of SHARDS section - only care about shard roles here
if line.startswith("SHARDS:"):
shards_section_started = True
continue
elif shards_section_started and line.startswith("DB:ID"):
continue
elif shards_section_started and line and not line.startswith("ENDPOINTS:"):
# Parse shard line: db:1 m-standard redis:1 node:2 master 0-8191 1.4MB OK
parts = line.split()
if len(parts) >= 5:
db_id = parts[0] # db:1, db:2, etc.
node_id = parts[3] # node:2
shard_role = parts[4] # master/slave - this is what matters
# Track ALL nodes with shards (for finding truly empty nodes)
nodes_with_any_shards.add(node_id)
# Only track master nodes for the specific database we're testing
bdb_id = endpoint_config.get("bdb_id")
if db_id == f"db:{bdb_id}":
nodes_with_target_db_shards.add(node_id)
if shard_role == "master":
master_nodes.add(node_id)
elif line.startswith("ENDPOINTS:") or not line:
shards_section_started = False
# Find empty node (node with no shards from ANY database)
nodes_with_no_shards_target_bdb = all_nodes - nodes_with_target_db_shards
logging.debug(f"All nodes: {all_nodes}")
logging.debug(f"Nodes with shards from any database: {nodes_with_any_shards}")
logging.debug(
f"Nodes with target database shards: {nodes_with_target_db_shards}"
)
logging.debug(f"Master nodes (target database only): {master_nodes}")
logging.debug(
f"Nodes with no shards from target database: {nodes_with_no_shards_target_bdb}"
)
if not nodes_with_no_shards_target_bdb:
raise ValueError("All nodes have shards from target database")
if not master_nodes:
raise ValueError("No nodes with master shards from target database found")
# Return the first available empty node and master node (numeric part only)
empty_node = next(iter(nodes_with_no_shards_target_bdb)).split(":")[
1
] # node:1 -> 1
target_node = next(iter(master_nodes)).split(":")[1] # node:2 -> 2
return target_node, empty_node
@staticmethod
def find_endpoint_for_bind(
fault_injector: FaultInjectorClient,
endpoint_config: Dict[str, Any],
endpoint_name: str,
timeout: int = 60,
) -> str:
"""Find the endpoint ID from cluster status.
Returns:
str: The endpoint ID (e.g., "1:1")
"""
cluster_info = ClusterOperations.get_cluster_nodes_info(
fault_injector, endpoint_config, timeout
)
output = cluster_info.get("output", {}).get("output", "")
if not output:
raise ValueError("No cluster status output found")
# Parse the ENDPOINTS section to find endpoint ID
lines = output.split("\n")
endpoints_section_started = False
for line in lines:
line = line.strip()
# Start of ENDPOINTS section
if line.startswith("ENDPOINTS:"):
endpoints_section_started = True
continue
elif line.startswith("SHARDS:"):
endpoints_section_started = False
break
elif endpoints_section_started and line and not line.startswith("DB:ID"):
# Parse endpoint line: db:1 m-standard endpoint:1:1 node:2 single No
parts = line.split()
if len(parts) >= 3 and parts[1] == endpoint_name:
endpoint_full = parts[2] # endpoint:1:1
if endpoint_full.startswith("endpoint:"):
endpoint_id = endpoint_full.replace("endpoint:", "") # 1:1
return endpoint_id
raise ValueError(f"No endpoint ID for {endpoint_name} found in cluster status")
@staticmethod
def execute_failover(
fault_injector: FaultInjectorClient,
endpoint_config: Dict[str, Any],
timeout: int = 60,
) -> Dict[str, Any]:
"""Execute failover command and wait for completion."""
try:
bdb_id = endpoint_config.get("bdb_id")
failover_action = ActionRequest(
action_type=ActionType.FAILOVER,
parameters={
"bdb_id": bdb_id,
},
)
trigger_action_result = fault_injector.trigger_action(failover_action)
action_id = trigger_action_result.get("action_id")
if not action_id:
raise ValueError(
f"Failed to trigger fail over action for bdb_id {bdb_id}: {trigger_action_result}"
)
action_status_check_response = fault_injector.get_operation_result(
action_id, timeout=timeout
)
logging.info(
f"Completed cluster nodes info reading: {action_status_check_response}"
)
return action_status_check_response
except Exception as e:
pytest.fail(f"Failed to get cluster nodes info: {e}")
@staticmethod
def execute_rladmin_migrate(
fault_injector: FaultInjectorClient,
endpoint_config: Dict[str, Any],
target_node: str,
empty_node: str,
) -> str:
"""Execute rladmin migrate command and wait for completion."""
command = f"migrate node {target_node} all_shards target_node {empty_node}"
# Get bdb_id from endpoint configuration
bdb_id = endpoint_config.get("bdb_id")
try:
# Correct parameter format for fault injector
parameters = {
"bdb_id": bdb_id,
"rladmin_command": command, # Just the command without "rladmin" prefix
}
logging.debug(f"Executing rladmin_command with parameter: {parameters}")
action = ActionRequest(
action_type=ActionType.EXECUTE_RLADMIN_COMMAND, parameters=parameters
)
result = fault_injector.trigger_action(action)
logging.debug(f"Migrate command action result: {result}")
action_id = result.get("action_id")
if not action_id:
raise Exception(f"Failed to trigger migrate action: {result}")
return action_id
except Exception as e:
raise Exception(f"Failed to execute rladmin migrate: {e}")
@staticmethod
def execute_rladmin_bind_endpoint(
fault_injector: FaultInjectorClient,
endpoint_config: Dict[str, Any],
endpoint_id: str,
) -> str:
"""Execute rladmin bind endpoint command and wait for completion."""
command = f"bind endpoint {endpoint_id} policy single"
bdb_id = endpoint_config.get("bdb_id")
try:
parameters = {
"rladmin_command": command, # Just the command without "rladmin" prefix
"bdb_id": bdb_id,
}
logging.info(f"Executing rladmin_command with parameter: {parameters}")
action = ActionRequest(
action_type=ActionType.EXECUTE_RLADMIN_COMMAND, parameters=parameters
)
result = fault_injector.trigger_action(action)
logging.info(
f"Migrate command {command} with parameters {parameters} trigger result: {result}"
)
action_id = result.get("action_id")
if not action_id:
raise Exception(f"Failed to trigger bind endpoint action: {result}")
return action_id
except Exception as e:
raise Exception(f"Failed to execute rladmin bind endpoint: {e}")
| ClusterOperations |
python | fluentpython__example-code | attic/concurrency/wikipedia/orig/potd_tests.py | {
"start": 31,
"end": 2921
} | class ____(unittest.TestCase):
def setUp(self):
self.thumb_url = ("""http://upload.wikimedia.org/wikipedia/"""
"""commons/thumb/f/fe/Orthographic_projection_SW.jpg/350px"""
"""-Orthographic_projection_SW.jpg""")
def test_buid_page_url(self):
date = '2014-05-01'
result = potd.build_page_url(date)
self.assertEqual(result, 'http://en.wikipedia.org/wiki/Template:POTD/2014-05-01')
def test_fetch_status_code(self):
date = '2014-05-02'
url = potd.build_page_url(date)
response = potd.fetch(url)
self.assertEqual(response.status_code, 200)
def test_fetch_status_code_not_found(self):
date = '2100-01-01'
url = potd.build_page_url(date)
response = potd.fetch(url)
self.assertEqual(response.status_code, 404)
def test_extract_image_url(self):
image_url = potd.extract_image_url(HTML)
self.assertEqual(image_url, self.thumb_url)
def test_fetch_image_jpeg(self):
response = potd.fetch(self.thumb_url)
self.assertEqual(response.headers['content-type'], 'image/jpeg')
def test_list_days_of_month(self):
year = 2014
month = 5
days = potd.list_days_of_month(year, month)
self.assertEqual(len(days), 31)
self.assertEqual('2014-05-01', days[0])
self.assertEqual('2014-05-31', days[-1])
def test_list_days_of_february(self):
year = 2014
month = 2
days = potd.list_days_of_month(year, month)
self.assertEqual(len(days), 28)
self.assertEqual('2014-02-01', days[0])
self.assertEqual('2014-02-28', days[-1])
def test_format_date(self):
year = 2014
month = 2
day = 1
a_date = '2014-02-01'
date = potd.format_date(year, month, day)
self.assertEqual(a_date, date)
self.assertEqual(potd.format_date(2010, 11, 12), '2010-11-12')
def test_build_save_path(self):
date = '2014-06-04'
path = potd.SAVE_DIR + date + '_350px-Orthographic_projection_SW.jpg'
self.assertEqual(path, potd.build_save_path(date, self.thumb_url))
HTML = (
'''<td><a href="/wiki/File:Orthographic_projection_SW.jpg" class="image"
title="Orthographic projection"><img alt="Orthographic projection"
src="//upload.wikimedia.org/wikipedia/commons/thumb/f/fe/O'''
'''rthographic_projection_SW.jpg/350px-Orthographic_projection_SW.jpg"
width="350" height="350" srcset="//upload.wikimedia.org/wikipedia/comm'''
'''ons/thumb/f/fe/Orthographic_projection_SW.jpg/525px-
Orthographic_projection_SW.jpg 1.5x, //upload.wikimedia.org/wikipedia/
commons/thumb/f/fe/Orthographic_projection_SW.jpg/700px-
Orthographic_projection_SW.jpg 2x" data-file-width="2058" data-file-
height="2058"></a></td>
''')
if __name__ == '__main__':
unittest.main()
| TestSequenceFunctions |
python | getsentry__sentry | src/sentry/middleware/integrations/tasks.py | {
"start": 4272,
"end": 5891
} | class ____(_AsyncRegionDispatcher):
@property
def log_code(self) -> str:
return IntegrationProviderSlug.DISCORD.value
def unpack_payload(self, response: Response) -> Any:
# Region will return a response assuming it's meant to go directly to Discord. Since we're
# handling the request asynchronously, we extract only the data, and post it to the webhook
# that discord provides.
# https://discord.com/developers/docs/interactions/receiving-and-responding#followup-messages
return orjson.loads(response.content).get("data")
@instrumented_task(
name="sentry.middleware.integrations.tasks.convert_to_async_discord_response",
namespace=integrations_control_tasks,
retry=Retry(times=2, delay=5),
silo_mode=SiloMode.CONTROL,
)
def convert_to_async_discord_response(
region_names: list[str],
payload: dict[str, Any],
response_url: str,
) -> None:
"""
This task asks relevant region silos for response data to send asynchronously to Discord. It
assumes Discord has received a callback of type:5 (DEFERRED_CHANNEL_MESSAGE_WITH_SOURCE).
(See https://discord.com/developers/docs/interactions/receiving-and-responding#interaction-response-object-interaction-callback-type)
In the event this task finishes prior to returning the above type, the outbound post will fail.
"""
response = _AsyncDiscordDispatcher(payload, response_url).dispatch(region_names)
if response is not None and response.status_code == status.HTTP_404_NOT_FOUND:
raise Exception("Discord hook is not ready.")
| _AsyncDiscordDispatcher |
python | huggingface__transformers | src/transformers/models/llava_onevision/image_processing_llava_onevision.py | {
"start": 1573,
"end": 3866
} | class ____(ImagesKwargs, total=False):
r"""
image_grid_pinpoints (`list[list[int]]`, *optional*):
A list of possible resolutions to use for processing high resolution images. The best resolution is selected
based on the original size of the image. Can be overridden by `image_grid_pinpoints` in the `preprocess`
method.
"""
image_grid_pinpoints: list[list[int]]
# Copied from transformers.models.llava_next.image_processing_llava_next.divide_to_patches
def divide_to_patches(image: np.ndarray, patch_size: int, input_data_format) -> list[np.ndarray]:
"""
Divides an image into patches of a specified size.
Args:
image (`np.ndarray`):
The input image.
patch_size (`int`):
The size of each patch.
input_data_format (`ChannelDimension` or `str`):
The channel dimension format of the input image.
Returns:
list: A list of np.ndarray representing the patches.
"""
patches = []
height, width = get_image_size(image, channel_dim=input_data_format)
for i in range(0, height, patch_size):
for j in range(0, width, patch_size):
if input_data_format == ChannelDimension.LAST:
patch = image[i : i + patch_size, j : j + patch_size]
else:
patch = image[:, i : i + patch_size, j : j + patch_size]
patches.append(patch)
return patches
# Copied from transformers.models.llava_next.image_processing_llava_next.expand_to_square
def expand_to_square(image: np.ndarray, background_color, input_data_format) -> np.ndarray:
"""
Expands an image to a square by adding a background color.
"""
height, width = get_image_size(image, channel_dim=input_data_format)
if width == height:
return image
elif width > height:
result = np.ones((width, width, image.shape[2]), dtype=image.dtype) * background_color
result[(width - height) // 2 : (width - height) // 2 + height, :] = image
return result
else:
result = np.ones((height, height, image.shape[2]), dtype=image.dtype) * background_color
result[:, (height - width) // 2 : (height - width) // 2 + width] = image
return result
| LlavaOnevisionImageProcessorKwargs |
python | astropy__astropy | astropy/units/tests/test_structured.py | {
"start": 29212,
"end": 30758
} | class ____(StructuredTestBaseWithUnits):
"""Somewhat minimal tests. Conversion is most stringent."""
def setup_class(self):
super().setup_class()
self.qpv = self.pv << self.pv_unit
self.pv_mask = np.array(
[
(True, False),
(False, False),
(False, True),
],
[("p", bool), ("v", bool)],
)
self.mpv = Masked(self.qpv, mask=self.pv_mask)
def test_init(self):
assert isinstance(self.mpv, Masked)
assert isinstance(self.mpv, Quantity)
assert_array_equal(self.mpv.unmasked, self.qpv)
assert_array_equal(self.mpv.mask, self.pv_mask)
def test_slicing(self):
mp = self.mpv["p"]
assert isinstance(mp, Masked)
assert isinstance(mp, Quantity)
assert_array_equal(mp.unmasked, self.qpv["p"])
assert_array_equal(mp.mask, self.pv_mask["p"])
def test_conversion(self):
mpv = self.mpv.to("AU,AU/day")
assert isinstance(mpv, Masked)
assert isinstance(mpv, Quantity)
assert_array_equal(mpv.unmasked, self.qpv.to("AU,AU/day"))
assert_array_equal(mpv.mask, self.pv_mask)
assert np.all(mpv == self.mpv)
def test_si(self):
mpv = self.mpv.si
assert isinstance(mpv, Masked)
assert isinstance(mpv, Quantity)
assert_array_equal(mpv.unmasked, self.qpv.si)
assert_array_equal(mpv.mask, self.pv_mask)
assert np.all(mpv == self.mpv)
| TestStructuredMaskedQuantity |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/state.py | {
"start": 2377,
"end": 2904
} | class ____(Protocol[_O]):
"""used at result loading time to install a _LoaderCallable callable
upon a specific InstanceState, which will be used to populate an
attribute when that attribute is accessed.
Concrete examples are per-instance deferred column loaders and
relationship lazy loaders.
"""
def __call__(
self,
state: InstanceState[_O],
dict_: _InstanceDict,
row: Row[Unpack[TupleAny]],
) -> None: ...
@inspection._self_inspects
| _InstallLoaderCallableProto |
python | numba__numba | numba/core/types/containers.py | {
"start": 2931,
"end": 3238
} | class ____(Buffer):
"""
Type class for memoryview objects.
"""
def is_homogeneous(*tys):
"""Are the types homogeneous?
"""
if tys:
first, tys = tys[0], tys[1:]
return not any(t != first for t in tys)
else:
# *tys* is empty.
return False
| MemoryView |
python | anthropics__anthropic-sdk-python | src/anthropic/types/beta/beta_direct_caller.py | {
"start": 193,
"end": 256
} | class ____(BaseModel):
type: Literal["direct"]
| BetaDirectCaller |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.