language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pypa__pip | tests/unit/test_req_install.py | {
"start": 1847,
"end": 4503
} | class ____:
def test_install_req_from_string_invalid_requirement(self) -> None:
"""
Requirement strings that cannot be parsed by
packaging.requirements.Requirement raise an InstallationError.
"""
with pytest.raises(InstallationError) as excinfo:
install_req_from_req_string("http:/this/is/invalid")
assert str(excinfo.value) == (
"Invalid requirement: 'http:/this/is/invalid': "
"Expected end or semicolon (after name and no valid version specifier)\n"
" http:/this/is/invalid\n"
" ^"
)
def test_install_req_from_string_without_comes_from(self) -> None:
"""
Test to make sure that install_req_from_string succeeds
when called with URL (PEP 508) but without comes_from.
"""
# Test with a PEP 508 url install string:
wheel_url = (
"https://download.pytorch.org/whl/cu90/"
"torch-1.0.0-cp36-cp36m-win_amd64.whl"
)
install_str = "torch@ " + wheel_url
install_req = install_req_from_req_string(install_str)
assert isinstance(install_req, InstallRequirement)
assert install_req.link is not None
assert install_req.link.url == wheel_url
assert install_req.req is not None
assert install_req.req.url == wheel_url
assert install_req.comes_from is None
assert install_req.is_wheel
def test_install_req_from_string_with_comes_from_without_link(self) -> None:
"""
Test to make sure that install_req_from_string succeeds
when called with URL (PEP 508) and comes_from
does not have a link.
"""
# Test with a PEP 508 url install string:
wheel_url = (
"https://download.pytorch.org/whl/cu90/"
"torch-1.0.0-cp36-cp36m-win_amd64.whl"
)
install_str = "torch@ " + wheel_url
# Dummy numpy "comes_from" requirement without link:
comes_from = InstallRequirement(Requirement("numpy>=1.15.0"), comes_from=None)
# Attempt install from install string comes:
install_req = install_req_from_req_string(install_str, comes_from=comes_from)
assert isinstance(install_req, InstallRequirement)
assert isinstance(install_req.comes_from, InstallRequirement)
assert install_req.comes_from.link is None
assert install_req.link is not None
assert install_req.link.url == wheel_url
assert install_req.req is not None
assert install_req.req.url == wheel_url
assert install_req.is_wheel
| TestInstallRequirementFrom |
python | openai__openai-python | src/openai/types/responses/parsed_response.py | {
"start": 2121,
"end": 2364
} | class ____(ResponseOutputMessage, GenericModel, Generic[ContentType]):
if TYPE_CHECKING:
content: List[ParsedContent[ContentType]] # type: ignore[assignment]
else:
content: List[ParsedContent]
| ParsedResponseOutputMessage |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_flakiness.py | {
"start": 4827,
"end": 6216
} | class ____(Exception):
pass
@composite
def single_bool_lists(draw):
n = draw(integers(0, 20))
result = [False] * (n + 1)
result[n] = True
return result
@xfail_on_crosshair(Why.nested_given)
@example([True, False, False, False], [3], None)
@example([False, True, False, False], [3], None)
@example([False, False, True, False], [3], None)
@example([False, False, False, True], [3], None)
@settings(deadline=None, suppress_health_check=[HealthCheck.nested_given])
@given(lists(booleans()) | single_bool_lists(), lists(integers(1, 3)), random_module())
def test_failure_sequence_inducing(building, testing, rnd):
buildit = iter(building)
testit = iter(testing)
def build(x):
try:
assume(not next(buildit))
except StopIteration:
pass
return x
@given(integers().map(build))
@settings(
verbosity=Verbosity.quiet,
database=None,
suppress_health_check=list(HealthCheck),
phases=no_shrink,
)
def test(x):
try:
i = next(testit)
except StopIteration:
return
if i == 1:
return
elif i == 2:
reject()
else:
raise Nope
try:
test()
except (Nope, Flaky, Unsatisfiable):
pass
except UnsatisfiedAssumption:
raise SatisfyMe from None
| SatisfyMe |
python | geekcomputers__Python | brickout-game/brickout-game.py | {
"start": 5316,
"end": 10210
} | class ____(pygame.sprite.Group):
def __init__(self, screen, x, y, width, height):
self.__screen = screen
self._x = x
self._y = y
self._width = width
self._height = height
self._bricks = []
X = x
Y = y
for i in range(3):
for j in range(4):
self._bricks.append(Brick(screen, width, height, X, Y))
X += width + (width / 7.0)
Y += height + (height / 7.0)
X = x
def add(self, brick):
"""
adds a brick to this BrickWall (group)
"""
self._bricks.append(brick)
def remove(self, brick):
"""
removes a brick from this BrickWall (group)
"""
self._bricks.remove(brick)
def draw(self):
"""
draws all bricks onto screen.
"""
for brick in self._bricks:
if brick != None:
brick.draw()
def update(self, ball):
"""
checks collision between ball and bricks.
"""
for i in range(len(self._bricks)):
if (self._bricks[i] != None) and self._bricks[i].collide(ball):
self._bricks[i] = None
# removes the None-elements from the brick list.
for brick in self._bricks:
if brick is None:
self._bricks.remove(brick)
def hasWin(self):
"""
Has player win the game?
"""
return len(self._bricks) == 0
def collide(self, ball):
"""
check collisions between the ball and
any of the bricks.
"""
for brick in self._bricks:
if brick.collide(ball):
return True
return False
# The game objects ball, paddle and brick wall
ball = Ball(screen, 25, random.randint(1, 700), 250)
paddle = Paddle(screen, 100, 20, 250, 450)
brickWall = BrickWall(screen, 25, 25, 150, 50)
isGameOver = False # determines whether game is lose
gameStatus = True # game is still running
score = 0 # score for the game.
pygame.display.set_caption("Brickout-game")
# Loop until the user clicks the close button.
done = False
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
# for displaying text in the game
pygame.font.init() # you have to call this at the start,
# if you want to use this module.
# message for game over
mgGameOver = pygame.font.SysFont("Comic Sans MS", 40)
# message for winning the game.
mgWin = pygame.font.SysFont("Comic Sans MS", 40)
# message for score
mgScore = pygame.font.SysFont("Comic Sans MS", 40)
textsurfaceGameOver = mgGameOver.render("Game Over!", False, (0, 0, 0))
textsurfaceWin = mgWin.render("You win!", False, (0, 0, 0))
textsurfaceScore = mgScore.render("score: " + str(score), False, (0, 0, 0))
# -------- Main Program Loop -----------
while not done:
# --- Main event loop
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
# --- Game logic should go here
# --- Screen-clearing code goes here
# Here, we clear the screen to white. Don't put other drawing commands
# above this, or they will be erased with this command.
# If you want a background image, replace this clear with blit'ing the
# background image.
screen.fill(WHITE)
# --- Drawing code should go here
"""
Because I use OOP in the game logic and the drawing code,
are both in the same section.
"""
if gameStatus:
# first draws ball for appropriate displaying the score.
brickWall.draw()
# for counting and displaying the score
if brickWall.collide(ball):
score += 10
textsurfaceScore = mgScore.render("score: " + str(score), False, (0, 0, 0))
screen.blit(textsurfaceScore, (300, 0))
# after scoring. because hit bricks are removed in the update-method
brickWall.update(ball)
paddle.draw()
paddle.update()
if ball.update(paddle, brickWall):
isGameOver = True
gameStatus = False
if brickWall.hasWin():
gameStatus = False
ball.draw()
else: # game isn't running.
if isGameOver: # player lose
screen.blit(textsurfaceGameOver, (0, 0))
textsurfaceScore = mgScore.render("score: " + str(score), False, (0, 0, 0))
screen.blit(textsurfaceScore, (300, 0))
elif brickWall.hasWin(): # player win
screen.blit(textsurfaceWin, (0, 0))
textsurfaceScore = mgScore.render("score: " + str(score), False, (0, 0, 0))
screen.blit(textsurfaceScore, (300, 0))
# --- Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# --- Limit to 60 frames per second
clock.tick(60)
# Close the window and quit.
pygame.quit()
| BrickWall |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_image02.py | {
"start": 315,
"end": 898
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("image02.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_image(
"D7", self.image_dir + "yellow.png", {"x_offset": 1, "y_offset": 2}
)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | kamyu104__LeetCode-Solutions | Python/search-a-2d-matrix-ii.py | {
"start": 33,
"end": 580
} | class ____(object):
# @param {integer[][]} matrix
# @param {integer} target
# @return {boolean}
def searchMatrix(self, matrix, target):
m = len(matrix)
if m == 0:
return False
n = len(matrix[0])
if n == 0:
return False
i, j = 0, n - 1
while i < m and j >= 0:
if matrix[i][j] == target:
return True
elif matrix[i][j] > target:
j -= 1
else:
i += 1
return False
| Solution |
python | pandas-dev__pandas | asv_bench/benchmarks/frame_methods.py | {
"start": 8743,
"end": 9246
} | class ____:
def setup(self):
N = 100_000
data = np.random.randn(N, 2)
mi = MultiIndex.from_arrays(
[
np.arange(N),
date_range("1970-01-01", periods=N, freq="ms"),
]
)
self.df = DataFrame(data)
self.df_mi = DataFrame(data, index=mi)
def time_to_records(self):
self.df.to_records(index=True)
def time_to_records_multiindex(self):
self.df_mi.to_records(index=True)
| ToRecords |
python | apache__airflow | airflow-core/tests/unit/cli/commands/test_config_command.py | {
"start": 22565,
"end": 25292
} | class ____:
@conf_vars({("core", "executor"): "SequentialExecutor"})
def test_update_config_all_options_dry_run(self, tmp_path, monkeypatch, capsys):
cfg_file = tmp_path / "airflow.cfg"
initial_config = "[core]\nexecutor = SequentialExecutor\n"
cfg_file.write_text(initial_config)
monkeypatch.setattr(config_command, "AIRFLOW_CONFIG", str(cfg_file))
def fake_write_custom_config(file, **kwargs):
file.write("updated_config_dry_run")
monkeypatch.setattr(conf, "write_custom_config", fake_write_custom_config)
parser = cli_parser.get_parser()
args = parser.parse_args(
[
"config",
"update",
"--all-recommendations",
]
)
config_command.update_config(args)
output = capsys.readouterr().out
assert "Dry-run mode enabled" in output
assert "The following are the changes in airflow config:" in output
current_cfg = cfg_file.read_text()
assert initial_config in current_cfg, "Dry-run should not modify the config file."
@conf_vars({("core", "executor"): "SequentialExecutor"})
def test_update_config_all_options_fix(self, tmp_path, monkeypatch, capsys):
cfg_file = tmp_path / "airflow.cfg"
initial_config = "[core]\nexecutor = SequentialExecutor\n"
cfg_file.write_text(initial_config)
monkeypatch.setattr(config_command, "AIRFLOW_CONFIG", str(cfg_file))
def fake_write_custom_config(file, **kwargs):
file.write("updated_config")
monkeypatch.setattr(conf, "write_custom_config", fake_write_custom_config)
def fake_copy2(src, dst):
with open(dst, "w") as f:
f.write("backup_config")
monkeypatch.setattr(shutil, "copy2", fake_copy2)
parser = cli_parser.get_parser()
args = parser.parse_args(
[
"config",
"update",
"--fix",
"--all-recommendations",
]
)
config_command.update_config(args)
output = capsys.readouterr().out
assert "Backup saved as" in output
assert "The following are the changes in airflow config:" in output
updated_cfg = cfg_file.read_text()
assert "updated_config" in updated_cfg, "Fix mode should update the configuration file."
backup_path = str(cfg_file) + ".bak"
assert os.path.exists(backup_path), "Backup file should be created."
backup_content = open(backup_path).read()
assert "backup_config" in backup_content, "Backup file should contain the original content."
| TestCliConfigUpdate |
python | more-itertools__more-itertools | tests/test_more.py | {
"start": 46815,
"end": 48781
} | class ____(TestCase):
"""Tests for ``split_before()``"""
def test_starts_with_sep(self):
actual = list(mi.split_before('xooxoo', lambda c: c == 'x'))
expected = [['x', 'o', 'o'], ['x', 'o', 'o']]
self.assertEqual(actual, expected)
def test_ends_with_sep(self):
actual = list(mi.split_before('ooxoox', lambda c: c == 'x'))
expected = [['o', 'o'], ['x', 'o', 'o'], ['x']]
self.assertEqual(actual, expected)
def test_no_sep(self):
actual = list(mi.split_before('ooo', lambda c: c == 'x'))
expected = [['o', 'o', 'o']]
self.assertEqual(actual, expected)
def test_empty_collection(self):
actual = list(mi.split_before([], lambda c: bool(c)))
expected = []
self.assertEqual(actual, expected)
def test_max_split(self):
for args, expected in [
(
('a,b,c,d', lambda c: c == ',', -1),
[['a'], [',', 'b'], [',', 'c'], [',', 'd']],
),
(
('a,b,c,d', lambda c: c == ',', 0),
[['a', ',', 'b', ',', 'c', ',', 'd']],
),
(
('a,b,c,d', lambda c: c == ',', 1),
[['a'], [',', 'b', ',', 'c', ',', 'd']],
),
(
('a,b,c,d', lambda c: c == ',', 2),
[['a'], [',', 'b'], [',', 'c', ',', 'd']],
),
(
('a,b,c,d', lambda c: c == ',', 10),
[['a'], [',', 'b'], [',', 'c'], [',', 'd']],
),
(
('a,b,c,d', lambda c: c == '@', 2),
[['a', ',', 'b', ',', 'c', ',', 'd']],
),
(
('a,b,c,d', lambda c: c != ',', 2),
[['a', ','], ['b', ','], ['c', ',', 'd']],
),
]:
actual = list(mi.split_before(*args))
self.assertEqual(actual, expected)
| SplitBeforeTest |
python | numpy__numpy | numpy/matrixlib/tests/test_defmatrix.py | {
"start": 10101,
"end": 10341
} | class ____:
def test_basic(self):
x = asmatrix(np.zeros((3, 2), float))
y = np.zeros((3, 1), float)
y[:, 0] = [0.8, 0.2, 0.3]
x[:, 1] = y > 0.5
assert_equal(x, [[0, 1], [0, 0], [0, 0]])
| TestIndexing |
python | pydata__xarray | xarray/core/coordinates.py | {
"start": 6049,
"end": 29960
} | class ____(AbstractCoordinates):
"""Dictionary like container for Xarray coordinates (variables + indexes).
This collection is a mapping of coordinate names to
:py:class:`~xarray.DataArray` objects.
It can be passed directly to the :py:class:`~xarray.Dataset` and
:py:class:`~xarray.DataArray` constructors via their `coords` argument. This
will add both the coordinates variables and their index.
Coordinates are either:
- returned via the :py:attr:`Dataset.coords`, :py:attr:`DataArray.coords`,
and :py:attr:`DataTree.coords` properties,
- built from Xarray or Pandas index objects
(e.g., :py:meth:`Coordinates.from_xindex` or
:py:meth:`Coordinates.from_pandas_multiindex`),
- built manually from input coordinate data and Xarray ``Index`` objects via
:py:meth:`Coordinates.__init__` (beware that no consistency check is done
on those inputs).
To create new coordinates from an existing Xarray ``Index`` object, use
:py:meth:`Coordinates.from_xindex` instead of
:py:meth:`Coordinates.__init__`. The latter is useful, e.g., for creating
coordinates with no default index.
Parameters
----------
coords: dict-like, optional
Mapping where keys are coordinate names and values are objects that
can be converted into a :py:class:`~xarray.Variable` object
(see :py:func:`~xarray.as_variable`). If another
:py:class:`~xarray.Coordinates` object is passed, its indexes
will be added to the new created object.
indexes: dict-like, optional
Mapping where keys are coordinate names and values are
:py:class:`~xarray.indexes.Index` objects. If None (default),
pandas indexes will be created for each dimension coordinate.
Passing an empty dictionary will skip this default behavior.
Examples
--------
Create a dimension coordinate with a default (pandas) index:
>>> xr.Coordinates({"x": [1, 2]})
Coordinates:
* x (x) int64 16B 1 2
Create a dimension coordinate with no index:
>>> xr.Coordinates(coords={"x": [1, 2]}, indexes={})
Coordinates:
x (x) int64 16B 1 2
Create a new Coordinates object from existing dataset coordinates
(indexes are passed):
>>> ds = xr.Dataset(coords={"x": [1, 2]})
>>> xr.Coordinates(ds.coords)
Coordinates:
* x (x) int64 16B 1 2
Create indexed coordinates from a ``pandas.MultiIndex`` object:
>>> midx = pd.MultiIndex.from_product([["a", "b"], [0, 1]])
>>> xr.Coordinates.from_pandas_multiindex(midx, "x")
Coordinates:
* x (x) object 32B MultiIndex
* x_level_0 (x) object 32B 'a' 'a' 'b' 'b'
* x_level_1 (x) int64 32B 0 1 0 1
Create a new Dataset object by passing a Coordinates object:
>>> midx_coords = xr.Coordinates.from_pandas_multiindex(midx, "x")
>>> xr.Dataset(coords=midx_coords)
<xarray.Dataset> Size: 96B
Dimensions: (x: 4)
Coordinates:
* x (x) object 32B MultiIndex
* x_level_0 (x) object 32B 'a' 'a' 'b' 'b'
* x_level_1 (x) int64 32B 0 1 0 1
Data variables:
*empty*
"""
_data: DataWithCoords
__slots__ = ("_data",)
def __init__(
self,
coords: Mapping[Any, Any] | None = None,
indexes: Mapping[Any, Index] | None = None,
) -> None:
# When coordinates are constructed directly, an internal Dataset is
# created so that it is compatible with the DatasetCoordinates and
# DataArrayCoordinates classes serving as a proxy for the data.
# TODO: refactor DataArray / Dataset so that Coordinates store the data.
from xarray.core.dataset import Dataset
if coords is None:
coords = {}
variables: dict[Hashable, Variable]
default_indexes: dict[Hashable, PandasIndex] = {}
coords_obj_indexes: dict[Hashable, Index] = {}
if isinstance(coords, Coordinates):
if indexes is not None:
raise ValueError(
"passing both a ``Coordinates`` object and a mapping of indexes "
"to ``Coordinates.__init__`` is not allowed "
"(this constructor does not support merging them)"
)
variables = {k: v.copy() for k, v in coords.variables.items()}
coords_obj_indexes = dict(coords.xindexes)
else:
variables = {}
for name, data in coords.items():
var = as_variable(data, name=name, auto_convert=False)
if var.dims == (name,) and indexes is None:
index, index_vars = create_default_index_implicit(var, list(coords))
default_indexes.update(dict.fromkeys(index_vars, index))
variables.update(index_vars)
else:
variables[name] = var
if indexes is None:
indexes = {}
else:
indexes = dict(indexes)
indexes.update(default_indexes)
indexes.update(coords_obj_indexes)
no_coord_index = set(indexes) - set(variables)
if no_coord_index:
raise ValueError(
f"no coordinate variables found for these indexes: {no_coord_index}"
)
for k, idx in indexes.items():
if not isinstance(idx, Index):
raise TypeError(f"'{k}' is not an `xarray.indexes.Index` object")
# maybe convert to base variable
for k, v in variables.items():
if k not in indexes:
variables[k] = v.to_base_variable()
self._data = Dataset._construct_direct(
coord_names=set(variables), variables=variables, indexes=indexes
)
@classmethod
def _construct_direct(
cls,
coords: dict[Any, Variable],
indexes: dict[Any, Index],
dims: dict[Any, int] | None = None,
) -> Self:
from xarray.core.dataset import Dataset
obj = object.__new__(cls)
obj._data = Dataset._construct_direct(
coord_names=set(coords),
variables=coords,
indexes=indexes,
dims=dims,
)
return obj
@classmethod
def from_xindex(cls, index: Index) -> Self:
"""Create Xarray coordinates from an existing Xarray index.
Parameters
----------
index : Index
Xarray index object. The index must support generating new
coordinate variables from itself.
Returns
-------
coords : Coordinates
A collection of Xarray indexed coordinates created from the index.
"""
variables = index.create_variables()
if not variables:
raise ValueError(
"`Coordinates.from_xindex()` only supports index objects that can generate "
"new coordinate variables from scratch. The given index (shown below) did not "
f"create any coordinate.\n{index!r}"
)
indexes = dict.fromkeys(variables, index)
return cls(coords=variables, indexes=indexes)
@classmethod
def from_pandas_multiindex(cls, midx: pd.MultiIndex, dim: Hashable) -> Self:
"""Wrap a pandas multi-index as Xarray coordinates (dimension + levels).
The returned coordinate variables can be directly assigned to a
:py:class:`~xarray.Dataset` or :py:class:`~xarray.DataArray` via the
``coords`` argument of their constructor.
Parameters
----------
midx : :py:class:`pandas.MultiIndex`
Pandas multi-index object.
dim : str
Dimension name.
Returns
-------
coords : Coordinates
A collection of Xarray indexed coordinates created from the multi-index.
"""
xr_idx = PandasMultiIndex(midx, dim)
variables = xr_idx.create_variables()
indexes = dict.fromkeys(variables, xr_idx)
return cls(coords=variables, indexes=indexes)
@property
def _names(self) -> set[Hashable]:
return self._data._coord_names
@property
def dims(self) -> Frozen[Hashable, int] | tuple[Hashable, ...]:
"""Mapping from dimension names to lengths or tuple of dimension names."""
return self._data.dims
@property
def sizes(self) -> Frozen[Hashable, int]:
"""Mapping from dimension names to lengths."""
return self._data.sizes
@property
def dtypes(self) -> Frozen[Hashable, np.dtype]:
"""Mapping from coordinate names to dtypes.
Cannot be modified directly.
See Also
--------
Dataset.dtypes
"""
return Frozen({n: v.dtype for n, v in self._data.variables.items()})
@property
def variables(self) -> Mapping[Hashable, Variable]:
"""Low level interface to Coordinates contents as dict of Variable objects.
This dictionary is frozen to prevent mutation.
"""
return self._data.variables
def to_dataset(self) -> Dataset:
"""Convert these coordinates into a new Dataset."""
names = [name for name in self._data._variables if name in self._names]
return self._data._copy_listed(names)
def __getitem__(self, key: Hashable) -> DataArray:
return self._data[key]
def __delitem__(self, key: Hashable) -> None:
# redirect to DatasetCoordinates.__delitem__
del self._data.coords[key]
def equals(self, other: Self) -> bool:
"""Two Coordinates objects are equal if they have matching variables,
all of which are equal.
See Also
--------
Coordinates.identical
"""
if not isinstance(other, Coordinates):
return False
return self.to_dataset().equals(other.to_dataset())
def identical(self, other: Self) -> bool:
"""Like equals, but also checks all variable attributes.
See Also
--------
Coordinates.equals
"""
if not isinstance(other, Coordinates):
return False
return self.to_dataset().identical(other.to_dataset())
def _update_coords(
self, coords: dict[Hashable, Variable], indexes: dict[Hashable, Index]
) -> None:
# redirect to DatasetCoordinates._update_coords
self._data.coords._update_coords(coords, indexes)
def _drop_coords(self, coord_names):
# redirect to DatasetCoordinates._drop_coords
self._data.coords._drop_coords(coord_names)
def _merge_raw(self, other, reflexive):
"""For use with binary arithmetic."""
if other is None:
variables = dict(self.variables)
indexes = dict(self.xindexes)
else:
coord_list = [self, other] if not reflexive else [other, self]
variables, indexes = merge_coordinates_without_align(coord_list)
return variables, indexes
@contextmanager
def _merge_inplace(self, other):
"""For use with in-place binary arithmetic."""
if other is None:
yield
else:
# don't include indexes in prioritized, because we didn't align
# first and we want indexes to be checked
prioritized = {
k: (v, None)
for k, v in self.variables.items()
if k not in self.xindexes
}
variables, indexes = merge_coordinates_without_align(
[self, other], prioritized
)
yield
self._update_coords(variables, indexes)
def merge(self, other: Mapping[Any, Any] | None) -> Dataset:
"""Merge two sets of coordinates to create a new Dataset
The method implements the logic used for joining coordinates in the
result of a binary operation performed on xarray objects:
- If two index coordinates conflict (are not equal), an exception is
raised. You must align your data before passing it to this method.
- If an index coordinate and a non-index coordinate conflict, the non-
index coordinate is dropped.
- If two non-index coordinates conflict, both are dropped.
Parameters
----------
other : dict-like, optional
A :py:class:`Coordinates` object or any mapping that can be turned
into coordinates.
Returns
-------
merged : Dataset
A new Dataset with merged coordinates.
"""
from xarray.core.dataset import Dataset
if other is None:
return self.to_dataset()
if not isinstance(other, Coordinates):
other = Dataset(coords=other).coords
coords, indexes = merge_coordinates_without_align([self, other])
coord_names = set(coords)
return Dataset._construct_direct(
variables=coords, coord_names=coord_names, indexes=indexes
)
def __or__(self, other: Mapping[Any, Any] | None) -> Coordinates:
"""Merge two sets of coordinates to create a new Coordinates object
The method implements the logic used for joining coordinates in the
result of a binary operation performed on xarray objects:
- If two index coordinates conflict (are not equal), an exception is
raised. You must align your data before passing it to this method.
- If an index coordinate and a non-index coordinate conflict, the non-
index coordinate is dropped.
- If two non-index coordinates conflict, both are dropped.
Parameters
----------
other : dict-like, optional
A :py:class:`Coordinates` object or any mapping that can be turned
into coordinates.
Returns
-------
merged : Coordinates
A new Coordinates object with merged coordinates.
See Also
--------
Coordinates.merge
"""
return self.merge(other).coords
def __setitem__(self, key: Hashable, value: Any) -> None:
self.update({key: value})
def update(self, other: Mapping[Any, Any]) -> None:
"""Update this Coordinates variables with other coordinate variables."""
if not len(other):
return
other_coords: Coordinates
if isinstance(other, Coordinates):
# Coordinates object: just pass it (default indexes won't be created)
other_coords = other
else:
other_coords = create_coords_with_default_indexes(
getattr(other, "variables", other)
)
# Discard original indexed coordinates prior to merge allows to:
# - fail early if the new coordinates don't preserve the integrity of existing
# multi-coordinate indexes
# - drop & replace coordinates without alignment (note: we must keep indexed
# coordinates extracted from the DataArray objects passed as values to
# `other` - if any - as those are still used for aligning the old/new coordinates)
coords_to_align = drop_indexed_coords(set(other_coords) & set(other), self)
coords, indexes = merge_coords(
[coords_to_align, other_coords],
priority_arg=1,
indexes=coords_to_align.xindexes,
)
# special case for PandasMultiIndex: updating only its dimension coordinate
# is still allowed but depreciated.
# It is the only case where we need to actually drop coordinates here (multi-index levels)
# TODO: remove when removing PandasMultiIndex's dimension coordinate.
self._drop_coords(self._names - coords_to_align._names)
self._update_coords(coords, indexes)
def assign(self, coords: Mapping | None = None, **coords_kwargs: Any) -> Self:
"""Assign new coordinates (and indexes) to a Coordinates object, returning
a new object with all the original coordinates in addition to the new ones.
Parameters
----------
coords : mapping of dim to coord, optional
A mapping whose keys are the names of the coordinates and values are the
coordinates to assign. The mapping will generally be a dict or
:class:`Coordinates`.
* If a value is a standard data value — for example, a ``DataArray``,
scalar, or array — the data is simply assigned as a coordinate.
* A coordinate can also be defined and attached to an existing dimension
using a tuple with the first element the dimension name and the second
element the values for this new coordinate.
**coords_kwargs
The keyword arguments form of ``coords``.
One of ``coords`` or ``coords_kwargs`` must be provided.
Returns
-------
new_coords : Coordinates
A new Coordinates object with the new coordinates (and indexes)
in addition to all the existing coordinates.
Examples
--------
>>> coords = xr.Coordinates()
>>> coords
Coordinates:
*empty*
>>> coords.assign(x=[1, 2])
Coordinates:
* x (x) int64 16B 1 2
>>> midx = pd.MultiIndex.from_product([["a", "b"], [0, 1]])
>>> coords.assign(xr.Coordinates.from_pandas_multiindex(midx, "y"))
Coordinates:
* y (y) object 32B MultiIndex
* y_level_0 (y) object 32B 'a' 'a' 'b' 'b'
* y_level_1 (y) int64 32B 0 1 0 1
"""
# TODO: this doesn't support a callable, which is inconsistent with `DataArray.assign_coords`
coords = either_dict_or_kwargs(coords, coords_kwargs, "assign")
new_coords = self.copy()
new_coords.update(coords)
return new_coords
def _overwrite_indexes(
self,
indexes: Mapping[Any, Index],
variables: Mapping[Any, Variable] | None = None,
) -> Self:
results = self.to_dataset()._overwrite_indexes(indexes, variables)
# TODO: remove cast once we get rid of DatasetCoordinates
# and DataArrayCoordinates (i.e., Dataset and DataArray encapsulate Coordinates)
return cast(Self, results.coords)
def _reindex_callback(
self,
aligner: Aligner,
dim_pos_indexers: dict[Hashable, Any],
variables: dict[Hashable, Variable],
indexes: dict[Hashable, Index],
fill_value: Any,
exclude_dims: frozenset[Hashable],
exclude_vars: frozenset[Hashable],
) -> Self:
"""Callback called from ``Aligner`` to create a new reindexed Coordinate."""
aligned = self.to_dataset()._reindex_callback(
aligner,
dim_pos_indexers,
variables,
indexes,
fill_value,
exclude_dims,
exclude_vars,
)
# TODO: remove cast once we get rid of DatasetCoordinates
# and DataArrayCoordinates (i.e., Dataset and DataArray encapsulate Coordinates)
return cast(Self, aligned.coords)
def _ipython_key_completions_(self):
"""Provide method for the key-autocompletions in IPython."""
return self._data._ipython_key_completions_()
def copy(
self,
deep: bool = False,
memo: dict[int, Any] | None = None,
) -> Self:
"""Return a copy of this Coordinates object."""
# do not copy indexes (may corrupt multi-coordinate indexes)
# TODO: disable variables deepcopy? it may also be problematic when they
# encapsulate index objects like pd.Index
variables = {
k: v._copy(deep=deep, memo=memo) for k, v in self.variables.items()
}
# TODO: getting an error with `self._construct_direct`, possibly because of how
# a subclass implements `_construct_direct`. (This was originally the same
# runtime code, but we switched the type definitions in #8216, which
# necessitates the cast.)
return cast(
Self,
Coordinates._construct_direct(
coords=variables, indexes=dict(self.xindexes), dims=dict(self.sizes)
),
)
def drop_vars(
self,
names: str
| Iterable[Hashable]
| Callable[
[Coordinates | Dataset | DataArray | DataTree],
str | Iterable[Hashable],
],
*,
errors: ErrorOptions = "raise",
) -> Self:
"""Drop variables from this Coordinates object.
Note that indexes that depend on these variables will also be dropped.
Parameters
----------
names : hashable or iterable or callable
Name(s) of variables to drop. If a callable, this is object is passed as its
only argument and its result is used.
errors : {"raise", "ignore"}, default: "raise"
Error treatment.
- ``'raise'``: raises a :py:class:`ValueError` error if any of the variable
passed are not in the dataset
- ``'ignore'``: any given names that are in the dataset are dropped and no
error is raised.
"""
return cast(Self, self.to_dataset().drop_vars(names, errors=errors).coords)
def drop_dims(
self,
drop_dims: str | Iterable[Hashable],
*,
errors: ErrorOptions = "raise",
) -> Self:
"""Drop dimensions and associated variables from this dataset.
Parameters
----------
drop_dims : str or Iterable of Hashable
Dimension or dimensions to drop.
errors : {"raise", "ignore"}, default: "raise"
If 'raise', raises a ValueError error if any of the
dimensions passed are not in the dataset. If 'ignore', any given
dimensions that are in the dataset are dropped and no error is raised.
Returns
-------
obj : Coordinates
Coordinates object without the given dimensions (or any coordinates
containing those dimensions).
"""
return cast(Self, self.to_dataset().drop_dims(drop_dims, errors=errors).coords)
def rename_dims(
self,
dims_dict: Mapping[Any, Hashable] | None = None,
**dims: Hashable,
) -> Self:
"""Returns a new object with renamed dimensions only.
Parameters
----------
dims_dict : dict-like, optional
Dictionary whose keys are current dimension names and
whose values are the desired names. The desired names must
not be the name of an existing dimension or Variable in the Coordinates.
**dims : optional
Keyword form of ``dims_dict``.
One of dims_dict or dims must be provided.
Returns
-------
renamed : Coordinates
Coordinates object with renamed dimensions.
"""
return cast(Self, self.to_dataset().rename_dims(dims_dict, **dims).coords)
def rename_vars(
self,
name_dict: Mapping[Any, Hashable] | None = None,
**names: Hashable,
) -> Coordinates:
"""Returns a new object with renamed variables.
Parameters
----------
name_dict : dict-like, optional
Dictionary whose keys are current variable or coordinate names and
whose values are the desired names.
**names : optional
Keyword form of ``name_dict``.
One of name_dict or names must be provided.
Returns
-------
renamed : Coordinates
Coordinates object with renamed variables
"""
return cast(Self, self.to_dataset().rename_vars(name_dict, **names).coords)
| Coordinates |
python | doocs__leetcode | solution/1100-1199/1105.Filling Bookcase Shelves/Solution.py | {
"start": 0,
"end": 474
} | class ____:
def minHeightShelves(self, books: List[List[int]], shelfWidth: int) -> int:
n = len(books)
f = [0] * (n + 1)
for i, (w, h) in enumerate(books, 1):
f[i] = f[i - 1] + h
for j in range(i - 1, 0, -1):
w += books[j - 1][0]
if w > shelfWidth:
break
h = max(h, books[j - 1][1])
f[i] = min(f[i], f[j - 1] + h)
return f[n]
| Solution |
python | gevent__gevent | src/gevent/_util.py | {
"start": 277,
"end": 5100
} | class ____(object):
"""
A special object you must never pass to any gevent API.
Used as a marker object for keyword arguments that cannot have the
builtin None (because that might be a valid value).
"""
__slots__ = ()
def __repr__(self):
return '<default value>'
_NONE = _NONE()
WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__qualname__', '__doc__',
'__annotations__')
WRAPPER_UPDATES = ('__dict__',)
def update_wrapper(wrapper,
wrapped,
assigned=WRAPPER_ASSIGNMENTS,
updated=WRAPPER_UPDATES):
"""
Based on code from the standard library ``functools``, but
doesn't perform any of the troublesome imports.
functools imports RLock from _thread for purposes of the
``lru_cache``, making it problematic to use from gevent.
The other imports are somewhat heavy: abc, collections, types.
"""
for attr in assigned:
try:
value = getattr(wrapped, attr)
except AttributeError:
pass
else:
setattr(wrapper, attr, value)
for attr in updated:
getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
# Issue #17482: set __wrapped__ last so we don't inadvertently copy it
# from the wrapped function when updating __dict__
wrapper.__wrapped__ = wrapped
# Return the wrapper so this can be used as a decorator via partial()
return wrapper
def copy_globals(source,
globs,
only_names=None,
ignore_missing_names=False,
names_to_ignore=(),
dunder_names_to_keep=('__implements__', '__all__', '__imports__'),
cleanup_globs=True):
"""
Copy attributes defined in ``source.__dict__`` to the dictionary
in globs (which should be the caller's :func:`globals`).
Names that start with ``__`` are ignored (unless they are in
*dunder_names_to_keep*). Anything found in *names_to_ignore* is
also ignored.
If *only_names* is given, only those attributes will be
considered. In this case, *ignore_missing_names* says whether or
not to raise an :exc:`AttributeError` if one of those names can't
be found.
If *cleanup_globs* has a true value, then common things imported but
not used at runtime are removed, including this function.
Returns a list of the names copied; this should be assigned to ``__imports__``.
"""
if only_names:
if ignore_missing_names:
items = ((k, getattr(source, k, _NONE)) for k in only_names)
else:
items = ((k, getattr(source, k)) for k in only_names)
else:
items = iteritems(source.__dict__)
copied = []
for key, value in items:
if value is _NONE:
continue
if key in names_to_ignore:
continue
if key.startswith("__") and key not in dunder_names_to_keep:
continue
globs[key] = value
copied.append(key)
if cleanup_globs:
if 'copy_globals' in globs:
del globs['copy_globals']
return copied
def import_c_accel(globs, cname):
"""
Import the C-accelerator for the *cname*
and copy its globals.
The *cname* should be hardcoded to match the expected
C accelerator module.
Unless PURE_PYTHON is set (in the environment or automatically
on PyPy), then the C-accelerator is required.
"""
if not cname.startswith('gevent._gevent_c'):
# Old module code that hasn't been updated yet.
cname = cname.replace('gevent._',
'gevent._gevent_c')
name = globs.get('__name__')
if not name or name == cname:
# Do nothing if we're being exec'd as a file (no name)
# or we're running from the C extension
return
from gevent._compat import pure_python_module
if pure_python_module(name):
return
import importlib
import warnings
with warnings.catch_warnings():
# Python 3.7 likes to produce
# "ImportWarning: can't resolve
# package from __spec__ or __package__, falling back on
# __name__ and __path__"
# when we load cython compiled files. This is probably a bug in
# Cython, but it doesn't seem to have any consequences, it's
# just annoying to see and can mess up our unittests.
warnings.simplefilter('ignore', ImportWarning)
mod = importlib.import_module(cname)
# By adopting the entire __dict__, we get a more accurate
# __file__ and module repr, plus we don't leak any imported
# things we no longer need.
globs.clear()
globs.update(mod.__dict__)
if 'import_c_accel' in globs:
del globs['import_c_accel']
| _NONE |
python | scipy__scipy | scipy/stats/tests/test_generation/reference_distributions.py | {
"start": 14537,
"end": 14851
} | class ____(ReferenceDistribution):
def __init__(self, *, skew):
super().__init__(skew=skew)
def _pdf(self, x, skew):
b = 2 / skew
a = b**2
c = -b
res = abs(b)/mp.gamma(a) * (b*(x-c))**(a-1) * mp.exp(-b*(x-c))
return res if abs(res.real) == res else 0
| Pearson3 |
python | langchain-ai__langchain | libs/core/langchain_core/outputs/llm_result.py | {
"start": 361,
"end": 3894
} | class ____(BaseModel):
"""A container for results of an LLM call.
Both chat models and LLMs generate an LLMResult object. This object contains the
generated outputs and any additional information that the model provider wants to
return.
"""
generations: list[
list[Generation | ChatGeneration | GenerationChunk | ChatGenerationChunk]
]
"""Generated outputs.
The first dimension of the list represents completions for different input prompts.
The second dimension of the list represents different candidate generations for a
given prompt.
- When returned from **an LLM**, the type is `list[list[Generation]]`.
- When returned from a **chat model**, the type is `list[list[ChatGeneration]]`.
ChatGeneration is a subclass of Generation that has a field for a structured chat
message.
"""
llm_output: dict | None = None
"""For arbitrary LLM provider specific output.
This dictionary is a free-form dictionary that can contain any information that the
provider wants to return. It is not standardized and is provider-specific.
Users should generally avoid relying on this field and instead rely on accessing
relevant information from standardized fields present in AIMessage.
"""
run: list[RunInfo] | None = None
"""List of metadata info for model call for each input.
See `langchain_core.outputs.run_info.RunInfo` for details.
"""
type: Literal["LLMResult"] = "LLMResult"
"""Type is used exclusively for serialization purposes."""
def flatten(self) -> list[LLMResult]:
"""Flatten generations into a single list.
Unpack list[list[Generation]] -> list[LLMResult] where each returned LLMResult
contains only a single Generation. If token usage information is available,
it is kept only for the LLMResult corresponding to the top-choice
Generation, to avoid over-counting of token usage downstream.
Returns:
List of LLMResults where each returned LLMResult contains a single
Generation.
"""
llm_results = []
for i, gen_list in enumerate(self.generations):
# Avoid double counting tokens in OpenAICallback
if i == 0:
llm_results.append(
LLMResult(
generations=[gen_list],
llm_output=self.llm_output,
)
)
else:
if self.llm_output is not None:
llm_output = deepcopy(self.llm_output)
llm_output["token_usage"] = {}
else:
llm_output = None
llm_results.append(
LLMResult(
generations=[gen_list],
llm_output=llm_output,
)
)
return llm_results
def __eq__(self, other: object) -> bool:
"""Check for `LLMResult` equality by ignoring any metadata related to runs.
Args:
other: Another `LLMResult` object to compare against.
Returns:
`True` if the generations and `llm_output` are equal, `False` otherwise.
"""
if not isinstance(other, LLMResult):
return NotImplemented
return (
self.generations == other.generations
and self.llm_output == other.llm_output
)
__hash__ = None # type: ignore[assignment]
| LLMResult |
python | imageio__imageio | imageio/plugins/example.py | {
"start": 238,
"end": 5499
} | class ____(Format):
"""The dummy format is an example format that does nothing.
It will never indicate that it can read or write a file. When
explicitly asked to read, it will simply read the bytes. When
explicitly asked to write, it will raise an error.
This documentation is shown when the user does ``help('thisformat')``.
Parameters for reading
----------------------
Specify arguments in numpy doc style here.
Parameters for saving
---------------------
Specify arguments in numpy doc style here.
"""
def _can_read(self, request):
# This method is called when the format manager is searching
# for a format to read a certain image. Return True if this format
# can do it.
#
# The format manager is aware of the extensions and the modes
# that each format can handle. It will first ask all formats
# that *seem* to be able to read it whether they can. If none
# can, it will ask the remaining formats if they can: the
# extension might be missing, and this allows formats to provide
# functionality for certain extensions, while giving preference
# to other plugins.
#
# If a format says it can, it should live up to it. The format
# would ideally check the request.firstbytes and look for a
# header of some kind.
#
# The request object has:
# request.filename: a representation of the source (only for reporting)
# request.firstbytes: the first 256 bytes of the file.
# request.mode[0]: read or write mode
if request.extension in self.extensions:
return True
def _can_write(self, request):
# This method is called when the format manager is searching
# for a format to write a certain image. It will first ask all
# formats that *seem* to be able to write it whether they can.
# If none can, it will ask the remaining formats if they can.
#
# Return True if the format can do it.
# In most cases, this code does suffice:
if request.extension in self.extensions:
return True
# -- reader
class Reader(Format.Reader):
def _open(self, some_option=False, length=1):
# Specify kwargs here. Optionally, the user-specified kwargs
# can also be accessed via the request.kwargs object.
#
# The request object provides two ways to get access to the
# data. Use just one:
# - Use request.get_file() for a file object (preferred)
# - Use request.get_local_filename() for a file on the system
self._fp = self.request.get_file()
self._length = length # passed as an arg in this case for testing
self._data = None
def _close(self):
# Close the reader.
# Note that the request object will close self._fp
pass
def _get_length(self):
# Return the number of images. Can be np.inf
return self._length
def _get_data(self, index):
# Return the data and meta data for the given index
if index >= self._length:
raise IndexError("Image index %i > %i" % (index, self._length))
# Read all bytes
if self._data is None:
self._data = self._fp.read()
# Put in a numpy array
im = np.frombuffer(self._data, "uint8")
im.shape = len(im), 1
# Return array and dummy meta data
return im, {}
def _get_meta_data(self, index):
# Get the meta data for the given index. If index is None, it
# should return the global meta data.
return {} # This format does not support meta data
# -- writer
class Writer(Format.Writer):
def _open(self, flags=0):
# Specify kwargs here. Optionally, the user-specified kwargs
# can also be accessed via the request.kwargs object.
#
# The request object provides two ways to write the data.
# Use just one:
# - Use request.get_file() for a file object (preferred)
# - Use request.get_local_filename() for a file on the system
self._fp = self.request.get_file()
def _close(self):
# Close the reader.
# Note that the request object will close self._fp
pass
def _append_data(self, im, meta):
# Process the given data and meta data.
raise RuntimeError("The dummy format cannot write image data.")
def set_meta_data(self, meta):
# Process the given meta data (global for all images)
# It is not mandatory to support this.
raise RuntimeError("The dummy format cannot write meta data.")
# Register. You register an *instance* of a Format class. Here specify:
format = DummyFormat(
"dummy", # short name
"An example format that does nothing.", # one line descr.
".foobar .nonexistentext", # list of extensions
"iI", # modes, characters in iIvV
)
formats.add_format(format)
| DummyFormat |
python | django__django | tests/string_lookup/models.py | {
"start": 482,
"end": 612
} | class ____(models.Model):
parent = models.OneToOneField("Base", models.CASCADE)
name = models.CharField(max_length=50)
| Child |
python | apache__airflow | providers/common/sql/src/airflow/providers/common/sql/operators/sql.py | {
"start": 4024,
"end": 7086
} | class ____(BaseOperator):
"""
This is a base class for generic SQL Operator to get a DB Hook.
The provided method is .get_db_hook(). The default behavior will try to
retrieve the DB hook based on connection type.
You can customize the behavior by overriding the .get_db_hook() method.
:param conn_id: reference to a specific database
"""
conn_id_field = "conn_id"
template_fields: Sequence[str] = ("conn_id", "database", "hook_params")
def __init__(
self,
*,
conn_id: str | None = None,
database: str | None = None,
hook_params: dict | None = None,
retry_on_failure: bool = True,
**kwargs,
):
super().__init__(**kwargs)
self.conn_id = conn_id
self.database = database
self.hook_params = hook_params or {}
self.retry_on_failure = retry_on_failure
@classmethod
# TODO: can be removed once Airflow min version for this provider is 3.0.0 or higher
def get_hook(cls, conn_id: str, hook_params: dict | None = None) -> BaseHook:
"""
Return default hook for this connection id.
:param conn_id: connection id
:param hook_params: hook parameters
:return: default hook for this connection
"""
hook_params = hook_params or {}
connection = BaseHook.get_connection(conn_id)
conn_params = connection.extra_dejson
for conn_param in conn_params:
if conn_param not in hook_params:
hook_params[conn_param] = conn_params[conn_param]
return connection.get_hook(hook_params=hook_params)
@cached_property
def _hook(self):
"""Get DB Hook based on connection type."""
conn_id = getattr(self, self.conn_id_field)
self.log.debug("Get connection for %s", conn_id)
hook = self.get_hook(conn_id=conn_id, hook_params=self.hook_params)
if not isinstance(hook, DbApiHook):
raise AirflowException(
f"You are trying to use `common-sql` with {hook.__class__.__name__},"
" but its provider does not support it. Please upgrade the provider to a version that"
" supports `common-sql`. The hook class should be a subclass of"
" `airflow.providers.common.sql.hooks.sql.DbApiHook`."
f" Got {hook.__class__.__name__} Hook with class hierarchy: {hook.__class__.mro()}"
)
if self.database:
if hook.conn_type == "postgres":
hook.database = self.database
else:
hook.schema = self.database
return hook
def get_db_hook(self) -> DbApiHook:
"""
Get the database hook for the connection.
:return: the database hook object.
"""
return self._hook
def _raise_exception(self, exception_string: str) -> NoReturn:
if self.retry_on_failure:
raise AirflowException(exception_string)
raise AirflowFailException(exception_string)
| BaseSQLOperator |
python | apache__airflow | providers/standard/src/airflow/providers/standard/operators/python.py | {
"start": 23845,
"end": 40617
} | class ____(_BasePythonVirtualenvOperator):
"""
Run a function in a virtualenv that is created and destroyed automatically.
The function (has certain caveats) must be defined using def, and not be
part of a class. All imports must happen inside the function
and no variables outside the scope may be referenced. A global scope
variable named virtualenv_string_args will be available (populated by
string_args). In addition, one can pass stuff through op_args and op_kwargs, and one
can use a return value.
Note that if your virtualenv runs in a different Python major version than Airflow,
you cannot use return values, op_args, op_kwargs, or use any macros that are being provided to
Airflow through plugins. You can use string_args though.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:PythonVirtualenvOperator`
:param python_callable: A python function with no references to outside variables,
defined with def, which will be run in a virtual environment.
:param requirements: Either a list of requirement strings, or a (templated)
"requirements file" as specified by pip.
:param python_version: The Python version to run the virtual environment with. Note that
both 2 and 2.7 are acceptable forms.
:param serializer: Which serializer use to serialize the args and result. It can be one of the following:
- ``"pickle"``: (default) Use pickle for serialization. Included in the Python Standard Library.
- ``"cloudpickle"``: Use cloudpickle for serialize more complex types,
this requires to include cloudpickle in your requirements.
- ``"dill"``: Use dill for serialize more complex types,
this requires to include dill in your requirements.
:param system_site_packages: Whether to include
system_site_packages in your virtual environment.
See virtualenv documentation for more information.
:param pip_install_options: a list of pip install options when installing requirements
See 'pip install -h' for available options
:param op_args: A list of positional arguments to pass to python_callable.
:param op_kwargs: A dict of keyword arguments to pass to python_callable.
:param string_args: Strings that are present in the global var virtualenv_string_args,
available to python_callable at runtime as a list[str]. Note that args are split
by newline.
:param templates_dict: a dictionary where the values are templates that
will get templated by the Airflow engine sometime between
``__init__`` and ``execute`` takes place and are made available
in your callable's context after the template has been applied
:param templates_exts: a list of file extensions to resolve while
processing templated fields, for examples ``['.sql', '.hql']``
:param expect_airflow: expect Airflow to be installed in the target environment. If true, the operator
will raise warning if Airflow is not installed, and it will attempt to load Airflow
macros when starting.
:param skip_on_exit_code: If python_callable exits with this exit code, leave the task
in ``skipped`` state (default: None). If set to ``None``, any non-zero
exit code will be treated as a failure.
:param index_urls: an optional list of index urls to load Python packages from.
If not provided the system pip conf will be used to source packages from.
:param index_urls_from_connection_ids: An optional list of ``PackageIndex`` connection IDs.
Will be appended to ``index_urls``.
:param venv_cache_path: Optional path to the virtual environment parent folder in which the
virtual environment will be cached, creates a sub-folder venv-{hash} whereas hash will be replaced
with a checksum of requirements. If not provided the virtual environment will be created and deleted
in a temp folder for every execution.
:param env_vars: A dictionary containing additional environment variables to set for the virtual
environment when it is executed.
:param inherit_env: Whether to inherit the current environment variables when executing the virtual
environment. If set to ``True``, the virtual environment will inherit the environment variables
of the parent process (``os.environ``). If set to ``False``, the virtual environment will be
executed with a clean environment.
"""
template_fields: Sequence[str] = tuple(
{"requirements", "index_urls", "index_urls_from_connection_ids", "venv_cache_path"}.union(
PythonOperator.template_fields
)
)
template_ext: Sequence[str] = (".txt",)
def __init__(
self,
*,
python_callable: Callable,
requirements: None | Iterable[str] | str = None,
python_version: str | None = None,
serializer: _SerializerTypeDef | None = None,
system_site_packages: bool = True,
pip_install_options: list[str] | None = None,
op_args: Collection[Any] | None = None,
op_kwargs: Mapping[str, Any] | None = None,
string_args: Iterable[str] | None = None,
templates_dict: dict | None = None,
templates_exts: list[str] | None = None,
expect_airflow: bool = True,
skip_on_exit_code: int | Container[int] | None = None,
index_urls: None | Collection[str] | str = None,
index_urls_from_connection_ids: None | Collection[str] | str = None,
venv_cache_path: None | os.PathLike[str] = None,
env_vars: dict[str, str] | None = None,
inherit_env: bool = True,
**kwargs,
):
if (
python_version
and str(python_version)[0] != str(sys.version_info.major)
and (op_args or op_kwargs)
):
raise AirflowException(
"Passing op_args or op_kwargs is not supported across different Python "
"major versions for PythonVirtualenvOperator. Please use string_args."
f"Sys version: {sys.version_info}. Virtual environment version: {python_version}"
)
if python_version is not None and not isinstance(python_version, str):
raise AirflowException(
"Passing non-string types (e.g. int or float) as python_version not supported"
)
if not requirements:
self.requirements: list[str] = []
elif isinstance(requirements, str):
self.requirements = [requirements]
else:
self.requirements = list(requirements)
self.python_version = python_version
self.system_site_packages = system_site_packages
self.pip_install_options = pip_install_options
if isinstance(index_urls, str):
self.index_urls: list[str] | None = [index_urls]
elif isinstance(index_urls, Collection):
self.index_urls = list(index_urls)
else:
self.index_urls = None
if isinstance(index_urls_from_connection_ids, str):
self.index_urls_from_connection_ids: list[str] | None = [index_urls_from_connection_ids]
elif isinstance(index_urls_from_connection_ids, Collection):
self.index_urls_from_connection_ids = list(index_urls_from_connection_ids)
else:
self.index_urls_from_connection_ids = None
self.venv_cache_path = venv_cache_path
super().__init__(
python_callable=python_callable,
serializer=serializer,
op_args=op_args,
op_kwargs=op_kwargs,
string_args=string_args,
templates_dict=templates_dict,
templates_exts=templates_exts,
expect_airflow=expect_airflow,
skip_on_exit_code=skip_on_exit_code,
env_vars=env_vars,
inherit_env=inherit_env,
**kwargs,
)
def _requirements_list(self, exclude_cloudpickle: bool = False) -> list[str]:
"""Prepare a list of requirements that need to be installed for the virtual environment."""
requirements = [str(dependency) for dependency in self.requirements]
if not self.system_site_packages:
if (
self.serializer == "cloudpickle"
and not exclude_cloudpickle
and "cloudpickle" not in requirements
):
requirements.append("cloudpickle")
elif self.serializer == "dill" and "dill" not in requirements:
requirements.append("dill")
requirements.sort() # Ensure a hash is stable
return requirements
def _prepare_venv(self, venv_path: Path) -> None:
"""Prepare the requirements and installs the virtual environment."""
requirements_file = venv_path / "requirements.txt"
requirements_file.write_text("\n".join(self._requirements_list()))
prepare_virtualenv(
venv_directory=str(venv_path),
python_bin=f"python{self.python_version}" if self.python_version else "python",
system_site_packages=self.system_site_packages,
requirements_file_path=str(requirements_file),
pip_install_options=self.pip_install_options,
index_urls=self.index_urls,
)
def _calculate_cache_hash(self, exclude_cloudpickle: bool = False) -> tuple[str, str]:
"""
Generate the hash of the cache folder to use.
The following factors are used as input for the hash:
- (sorted) list of requirements
- pip install options
- flag of system site packages
- python version
- Variable to override the hash with a cache key
- Index URLs
Returns a hash and the data dict which is the base for the hash as text.
"""
hash_dict = {
"requirements_list": self._requirements_list(exclude_cloudpickle=exclude_cloudpickle),
"pip_install_options": self.pip_install_options,
"index_urls": self.index_urls,
"cache_key": str(Variable.get("PythonVirtualenvOperator.cache_key", "")),
"python_version": self.python_version,
"system_site_packages": self.system_site_packages,
}
hash_text = json.dumps(hash_dict, sort_keys=True)
hash_object = hashlib_wrapper.md5(hash_text.encode())
requirements_hash = hash_object.hexdigest()
return requirements_hash[:8], hash_text
def _ensure_venv_cache_exists(self, venv_cache_path: Path) -> Path:
"""Ensure a valid virtual environment is set up and will create inplace."""
cache_hash, hash_data = self._calculate_cache_hash()
venv_path = venv_cache_path / f"venv-{cache_hash}"
self.log.info("Python virtual environment will be cached in %s", venv_path)
venv_path.parent.mkdir(parents=True, exist_ok=True)
with open(f"{venv_path}.lock", "w") as f:
# Ensure that cache is not build by parallel workers
import fcntl
fcntl.flock(f, fcntl.LOCK_EX)
hash_marker = venv_path / "install_complete_marker.json"
try:
if venv_path.exists():
if hash_marker.exists():
previous_hash_data = hash_marker.read_text(encoding="utf8")
if previous_hash_data == hash_data:
self.log.info("Reusing cached Python virtual environment in %s", venv_path)
return venv_path
_, hash_data_before_upgrade = self._calculate_cache_hash(exclude_cloudpickle=True)
if previous_hash_data == hash_data_before_upgrade:
self.log.warning(
"Found a previous virtual environment in with outdated dependencies %s, "
"deleting and re-creating.",
venv_path,
)
else:
self.log.error(
"Unicorn alert: Found a previous virtual environment in %s "
"with the same hash but different parameters. Previous setup: '%s' / "
"Requested venv setup: '%s'. Please report a bug to airflow!",
venv_path,
previous_hash_data,
hash_data,
)
else:
self.log.warning(
"Found a previous (probably partial installed) virtual environment in %s, "
"deleting and re-creating.",
venv_path,
)
shutil.rmtree(venv_path)
venv_path.mkdir(parents=True)
self._prepare_venv(venv_path)
hash_marker.write_text(hash_data, encoding="utf8")
except Exception as e:
shutil.rmtree(venv_path)
raise AirflowException(f"Unable to create new virtual environment in {venv_path}") from e
self.log.info("New Python virtual environment created in %s", venv_path)
return venv_path
def _cleanup_python_pycache_dir(self, cache_dir_path: Path) -> None:
try:
shutil.rmtree(cache_dir_path)
self.log.debug("The directory %s has been deleted.", cache_dir_path)
except FileNotFoundError:
self.log.warning("Fail to delete %s. The directory does not exist.", cache_dir_path)
except PermissionError:
self.log.warning("Permission denied to delete the directory %s.", cache_dir_path)
def _retrieve_index_urls_from_connection_ids(self):
"""Retrieve index URLs from Package Index connections."""
if self.index_urls is None:
self.index_urls = []
for conn_id in self.index_urls_from_connection_ids:
conn_url = PackageIndexHook(conn_id).get_connection_url()
self.index_urls.append(conn_url)
def execute_callable(self):
if self.index_urls_from_connection_ids:
self._retrieve_index_urls_from_connection_ids()
if self.venv_cache_path:
venv_path = self._ensure_venv_cache_exists(Path(self.venv_cache_path))
python_path = venv_path / "bin" / "python"
return self._execute_python_callable_in_subprocess(python_path)
with TemporaryDirectory(prefix="venv") as tmp_dir:
tmp_path = Path(tmp_dir)
custom_pycache_prefix = Path(sys.pycache_prefix or "")
r_path = tmp_path.relative_to(tmp_path.anchor)
venv_python_cache_dir = Path.cwd() / custom_pycache_prefix / r_path
self._prepare_venv(tmp_path)
python_path = tmp_path / "bin" / "python"
result = self._execute_python_callable_in_subprocess(python_path)
self._cleanup_python_pycache_dir(venv_python_cache_dir)
return result
def _iter_serializable_context_keys(self):
yield from self.BASE_SERIALIZABLE_CONTEXT_KEYS
found_airflow = found_pendulum = False
if self.system_site_packages:
# If we're using system packages, assume both are present
found_airflow = found_pendulum = True
else:
for raw_str in chain.from_iterable(req.splitlines() for req in self.requirements):
line = raw_str.strip()
# Skip blank lines and full‐line comments
if not line or line.startswith("#"):
continue
# Strip off any inline comment
# e.g. turn "foo==1.2.3 # comment" → "foo==1.2.3"
req_str = re.sub(r"#.*$", "", line).strip()
try:
req = Requirement(req_str)
except (InvalidRequirement, InvalidSpecifier, InvalidVersion) as e:
raise ValueError(f"Invalid requirement '{raw_str}': {e}") from e
if req.name == "apache-airflow":
found_airflow = found_pendulum = True
break
elif req.name == "pendulum":
found_pendulum = True
if found_airflow:
yield from self.AIRFLOW_SERIALIZABLE_CONTEXT_KEYS
yield from self.PENDULUM_SERIALIZABLE_CONTEXT_KEYS
elif found_pendulum:
yield from self.PENDULUM_SERIALIZABLE_CONTEXT_KEYS
| PythonVirtualenvOperator |
python | getsentry__sentry | src/sentry/feedback/migrations/0005_feedback_fk_not_db_contstr.py | {
"start": 348,
"end": 2028
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = False
dependencies = [
("feedback", "0001_squashed_0004_index_together"),
("sentry", "0980_integrations_json_field"),
]
operations = [
migrations.AlterField(
model_name="feedback",
name="environment",
field=sentry.db.models.fields.foreignkey.FlexibleForeignKey(
db_constraint=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="sentry.environment",
),
),
SafeDeleteModel(
name="feedback",
deletion_action=DeletionAction.MOVE_TO_PENDING,
),
]
| Migration |
python | PyCQA__pyflakes | pyflakes/test/test_undefined_names.py | {
"start": 107,
"end": 23099
} | class ____(TestCase):
def test_undefined(self):
self.flakes('bar', m.UndefinedName)
def test_definedInListComp(self):
self.flakes('[a for a in range(10) if a]')
def test_undefinedInListComp(self):
self.flakes('''
[a for a in range(10)]
a
''',
m.UndefinedName)
def test_undefinedExceptionName(self):
"""Exception names can't be used after the except: block.
The exc variable is unused inside the exception handler."""
self.flakes('''
try:
raise ValueError('ve')
except ValueError as exc:
pass
exc
''', m.UndefinedName, m.UnusedVariable)
def test_namesDeclaredInExceptBlocks(self):
"""Locals declared in except: blocks can be used after the block.
This shows the example in test_undefinedExceptionName is
different."""
self.flakes('''
try:
raise ValueError('ve')
except ValueError as exc:
e = exc
e
''')
@skip('error reporting disabled due to false positives below')
def test_undefinedExceptionNameObscuringLocalVariable(self):
"""Exception names obscure locals, can't be used after.
Last line will raise UnboundLocalError on Python 3 after exiting
the except: block. Note next two examples for false positives to
watch out for."""
self.flakes('''
exc = 'Original value'
try:
raise ValueError('ve')
except ValueError as exc:
pass
exc
''',
m.UndefinedName)
def test_undefinedExceptionNameObscuringLocalVariable2(self):
"""Exception names are unbound after the `except:` block.
Last line will raise UnboundLocalError.
The exc variable is unused inside the exception handler.
"""
self.flakes('''
try:
raise ValueError('ve')
except ValueError as exc:
pass
print(exc)
exc = 'Original value'
''', m.UndefinedName, m.UnusedVariable)
def test_undefinedExceptionNameObscuringLocalVariableFalsePositive1(self):
"""Exception names obscure locals, can't be used after. Unless.
Last line will never raise UnboundLocalError because it's only
entered if no exception was raised."""
self.flakes('''
exc = 'Original value'
try:
raise ValueError('ve')
except ValueError as exc:
print('exception logged')
raise
exc
''', m.UnusedVariable)
def test_delExceptionInExcept(self):
"""The exception name can be deleted in the except: block."""
self.flakes('''
try:
pass
except Exception as exc:
del exc
''')
def test_undefinedExceptionNameObscuringLocalVariableFalsePositive2(self):
"""Exception names obscure locals, can't be used after. Unless.
Last line will never raise UnboundLocalError because `error` is
only falsy if the `except:` block has not been entered."""
self.flakes('''
exc = 'Original value'
error = None
try:
raise ValueError('ve')
except ValueError as exc:
error = 'exception logged'
if error:
print(error)
else:
exc
''', m.UnusedVariable)
@skip('error reporting disabled due to false positives below')
def test_undefinedExceptionNameObscuringGlobalVariable(self):
"""Exception names obscure globals, can't be used after.
Last line will raise UnboundLocalError because the existence of that
exception name creates a local scope placeholder for it, obscuring any
globals, etc."""
self.flakes('''
exc = 'Original value'
def func():
try:
pass # nothing is raised
except ValueError as exc:
pass # block never entered, exc stays unbound
exc
''',
m.UndefinedLocal)
@skip('error reporting disabled due to false positives below')
def test_undefinedExceptionNameObscuringGlobalVariable2(self):
"""Exception names obscure globals, can't be used after.
Last line will raise NameError on Python 3 because the name is
locally unbound after the `except:` block, even if it's
nonlocal. We should issue an error in this case because code
only working correctly if an exception isn't raised, is invalid.
Unless it's explicitly silenced, see false positives below."""
self.flakes('''
exc = 'Original value'
def func():
global exc
try:
raise ValueError('ve')
except ValueError as exc:
pass # block never entered, exc stays unbound
exc
''',
m.UndefinedLocal)
def test_undefinedExceptionNameObscuringGlobalVariableFalsePositive1(self):
"""Exception names obscure globals, can't be used after. Unless.
Last line will never raise NameError because it's only entered
if no exception was raised."""
self.flakes('''
exc = 'Original value'
def func():
global exc
try:
raise ValueError('ve')
except ValueError as exc:
print('exception logged')
raise
exc
''', m.UnusedVariable)
def test_undefinedExceptionNameObscuringGlobalVariableFalsePositive2(self):
"""Exception names obscure globals, can't be used after. Unless.
Last line will never raise NameError because `error` is only
falsy if the `except:` block has not been entered."""
self.flakes('''
exc = 'Original value'
def func():
global exc
error = None
try:
raise ValueError('ve')
except ValueError as exc:
error = 'exception logged'
if error:
print(error)
else:
exc
''', m.UnusedVariable)
def test_functionsNeedGlobalScope(self):
self.flakes('''
class a:
def b():
fu
fu = 1
''')
def test_builtins(self):
self.flakes('range(10)')
def test_builtinWindowsError(self):
"""
C{WindowsError} is sometimes a builtin name, so no warning is emitted
for using it.
"""
self.flakes('WindowsError')
def test_moduleAnnotations(self):
"""
Use of the C{__annotations__} in module scope should not emit
an undefined name warning when version is greater than or equal to 3.6.
"""
self.flakes('__annotations__')
def test_magicGlobalsFile(self):
"""
Use of the C{__file__} magic global should not emit an undefined name
warning.
"""
self.flakes('__file__')
def test_magicGlobalsBuiltins(self):
"""
Use of the C{__builtins__} magic global should not emit an undefined
name warning.
"""
self.flakes('__builtins__')
def test_magicGlobalsName(self):
"""
Use of the C{__name__} magic global should not emit an undefined name
warning.
"""
self.flakes('__name__')
def test_magicGlobalsPath(self):
"""
Use of the C{__path__} magic global should not emit an undefined name
warning, if you refer to it from a file called __init__.py.
"""
self.flakes('__path__', m.UndefinedName)
self.flakes('__path__', filename='package/__init__.py')
def test_magicModuleInClassScope(self):
"""
Use of the C{__module__} magic builtin should not emit an undefined
name warning if used in class scope.
"""
self.flakes('__module__', m.UndefinedName)
self.flakes('''
class Foo:
__module__
''')
self.flakes('''
class Foo:
def bar(self):
__module__
''', m.UndefinedName)
def test_magicQualnameInClassScope(self):
"""
Use of the C{__qualname__} magic builtin should not emit an undefined
name warning if used in class scope.
"""
self.flakes('__qualname__', m.UndefinedName)
self.flakes('''
class Foo:
__qualname__
''')
self.flakes('''
class Foo:
def bar(self):
__qualname__
''', m.UndefinedName)
def test_globalImportStar(self):
"""Can't find undefined names with import *."""
self.flakes('from fu import *; bar',
m.ImportStarUsed, m.ImportStarUsage)
def test_definedByGlobal(self):
"""
"global" can make an otherwise undefined name in another function
defined.
"""
self.flakes('''
def a(): global fu; fu = 1
def b(): fu
''')
self.flakes('''
def c(): bar
def b(): global bar; bar = 1
''')
def test_definedByGlobalMultipleNames(self):
"""
"global" can accept multiple names.
"""
self.flakes('''
def a(): global fu, bar; fu = 1; bar = 2
def b(): fu; bar
''')
def test_globalInGlobalScope(self):
"""
A global statement in the global scope is ignored.
"""
self.flakes('''
global x
def foo():
print(x)
''', m.UndefinedName)
def test_global_reset_name_only(self):
"""A global statement does not prevent other names being undefined."""
# Only different undefined names are reported.
# See following test that fails where the same name is used.
self.flakes('''
def f1():
s
def f2():
global m
''', m.UndefinedName, m.UnusedIndirectAssignment)
@skip("todo")
def test_unused_global(self):
"""An unused global statement does not define the name."""
self.flakes('''
def f1():
m
def f2():
global m
''', m.UndefinedName)
def test_del(self):
"""Del deletes bindings."""
self.flakes('a = 1; del a; a', m.UndefinedName)
def test_delGlobal(self):
"""Del a global binding from a function."""
self.flakes('''
a = 1
def f():
global a
del a
a
''')
def test_delUndefined(self):
"""Del an undefined name."""
self.flakes('del a', m.UndefinedName)
def test_delConditional(self):
"""
Ignores conditional bindings deletion.
"""
self.flakes('''
context = None
test = True
if False:
del(test)
assert(test)
''')
def test_delConditionalNested(self):
"""
Ignored conditional bindings deletion even if they are nested in other
blocks.
"""
self.flakes('''
context = None
test = True
if False:
with context():
del(test)
assert(test)
''')
def test_delWhile(self):
"""
Ignore bindings deletion if called inside the body of a while
statement.
"""
self.flakes('''
def test():
foo = 'bar'
while False:
del foo
assert(foo)
''')
def test_delWhileTestUsage(self):
"""
Ignore bindings deletion if called inside the body of a while
statement and name is used inside while's test part.
"""
self.flakes('''
def _worker():
o = True
while o is not True:
del o
o = False
''')
def test_delWhileNested(self):
"""
Ignore bindings deletions if node is part of while's test, even when
del is in a nested block.
"""
self.flakes('''
context = None
def _worker():
o = True
while o is not True:
while True:
with context():
del o
o = False
''')
def test_globalFromNestedScope(self):
"""Global names are available from nested scopes."""
self.flakes('''
a = 1
def b():
def c():
a
''')
def test_laterRedefinedGlobalFromNestedScope(self):
"""
Test that referencing a local name that shadows a global, before it is
defined, generates a warning.
"""
self.flakes('''
a = 1
def fun():
a
a = 2
return a
''', m.UndefinedLocal)
def test_laterRedefinedGlobalFromNestedScope2(self):
"""
Test that referencing a local name in a nested scope that shadows a
global declared in an enclosing scope, before it is defined, generates
a warning.
"""
self.flakes('''
a = 1
def fun():
global a
def fun2():
a
a = 2
return a
''', m.UndefinedLocal, m.UnusedIndirectAssignment)
def test_intermediateClassScopeIgnored(self):
"""
If a name defined in an enclosing scope is shadowed by a local variable
and the name is used locally before it is bound, an unbound local
warning is emitted, even if there is a class scope between the enclosing
scope and the local scope.
"""
self.flakes('''
def f():
x = 1
class g:
def h(self):
a = x
x = None
print(x, a)
print(x)
''', m.UndefinedLocal)
def test_doubleNestingReportsClosestName(self):
"""
Test that referencing a local name in a nested scope that shadows a
variable declared in two different outer scopes before it is defined
in the innermost scope generates an UnboundLocal warning which
refers to the nearest shadowed name.
"""
exc = self.flakes('''
def a():
x = 1
def b():
x = 2 # line 5
def c():
x
x = 3
return x
return x
return x
''', m.UndefinedLocal).messages[0]
# _DoctestMixin.flakes adds two lines preceding the code above.
expected_line_num = 7 if self.withDoctest else 5
self.assertEqual(exc.message_args, ('x', expected_line_num))
def test_laterRedefinedGlobalFromNestedScope3(self):
"""
Test that referencing a local name in a nested scope that shadows a
global, before it is defined, generates a warning.
"""
self.flakes('''
def fun():
a = 1
def fun2():
a
a = 1
return a
return a
''', m.UndefinedLocal)
def test_undefinedAugmentedAssignment(self):
self.flakes(
'''
def f(seq):
a = 0
seq[a] += 1
seq[b] /= 2
c[0] *= 2
a -= 3
d += 4
e[any] = 5
''',
m.UndefinedName, # b
m.UndefinedName, # c
m.UndefinedName, m.UnusedVariable, # d
m.UndefinedName, # e
)
def test_nestedClass(self):
"""Nested classes can access enclosing scope."""
self.flakes('''
def f(foo):
class C:
bar = foo
def f(self):
return foo
return C()
f(123).f()
''')
def test_badNestedClass(self):
"""Free variables in nested classes must bind at class creation."""
self.flakes('''
def f():
class C:
bar = foo
foo = 456
return foo
f()
''', m.UndefinedName)
def test_definedAsStarArgs(self):
"""Star and double-star arg names are defined."""
self.flakes('''
def f(a, *b, **c):
print(a, b, c)
''')
def test_definedAsStarUnpack(self):
"""Star names in unpack are defined."""
self.flakes('''
a, *b = range(10)
print(a, b)
''')
self.flakes('''
*a, b = range(10)
print(a, b)
''')
self.flakes('''
a, *b, c = range(10)
print(a, b, c)
''')
def test_usedAsStarUnpack(self):
"""
Star names in unpack are used if RHS is not a tuple/list literal.
"""
self.flakes('''
def f():
a, *b = range(10)
''')
self.flakes('''
def f():
(*a, b) = range(10)
''')
self.flakes('''
def f():
[a, *b, c] = range(10)
''')
def test_unusedAsStarUnpack(self):
"""
Star names in unpack are unused if RHS is a tuple/list literal.
"""
self.flakes('''
def f():
a, *b = any, all, 4, 2, 'un'
''', m.UnusedVariable, m.UnusedVariable)
self.flakes('''
def f():
(*a, b) = [bool, int, float, complex]
''', m.UnusedVariable, m.UnusedVariable)
self.flakes('''
def f():
[a, *b, c] = 9, 8, 7, 6, 5, 4
''', m.UnusedVariable, m.UnusedVariable, m.UnusedVariable)
def test_keywordOnlyArgs(self):
"""Keyword-only arg names are defined."""
self.flakes('''
def f(*, a, b=None):
print(a, b)
''')
self.flakes('''
import default_b
def f(*, a, b=default_b):
print(a, b)
''')
def test_keywordOnlyArgsUndefined(self):
"""Typo in kwonly name."""
self.flakes('''
def f(*, a, b=default_c):
print(a, b)
''', m.UndefinedName)
def test_annotationUndefined(self):
"""Undefined annotations."""
self.flakes('''
from abc import note1, note2, note3, note4, note5
def func(a: note1, *args: note2,
b: note3=12, **kw: note4) -> note5: pass
''')
self.flakes('''
def func():
d = e = 42
def func(a: {1, d}) -> (lambda c: e): pass
''')
def test_metaClassUndefined(self):
self.flakes('''
from abc import ABCMeta
class A(metaclass=ABCMeta): pass
''')
def test_definedInGenExp(self):
"""
Using the loop variable of a generator expression results in no
warnings.
"""
self.flakes('(a for a in [1, 2, 3] if a)')
self.flakes('(b for b in (a for a in [1, 2, 3] if a) if b)')
def test_undefinedInGenExpNested(self):
"""
The loop variables of generator expressions nested together are
not defined in the other generator.
"""
self.flakes('(b for b in (a for a in [1, 2, 3] if b) if b)',
m.UndefinedName)
self.flakes('(b for b in (a for a in [1, 2, 3] if a) if a)',
m.UndefinedName)
def test_undefinedWithErrorHandler(self):
"""
Some compatibility code checks explicitly for NameError.
It should not trigger warnings.
"""
self.flakes('''
try:
socket_map
except NameError:
socket_map = {}
''')
self.flakes('''
try:
_memoryview.contiguous
except (NameError, AttributeError):
raise RuntimeError("Python >= 3.3 is required")
''')
# If NameError is not explicitly handled, generate a warning
self.flakes('''
try:
socket_map
except:
socket_map = {}
''', m.UndefinedName)
self.flakes('''
try:
socket_map
except Exception:
socket_map = {}
''', m.UndefinedName)
def test_definedInClass(self):
"""
Defined name for generator expressions and dict/set comprehension.
"""
self.flakes('''
class A:
T = range(10)
Z = (x for x in T)
L = [x for x in T]
B = dict((i, str(i)) for i in T)
''')
self.flakes('''
class A:
T = range(10)
X = {x for x in T}
Y = {x:x for x in T}
''')
def test_definedInClassNested(self):
"""Defined name for nested generator expressions in a class."""
self.flakes('''
class A:
T = range(10)
Z = (x for x in (a for a in T))
''')
def test_undefinedInLoop(self):
"""
The loop variable is defined after the expression is computed.
"""
self.flakes('''
for i in range(i):
print(i)
''', m.UndefinedName)
self.flakes('''
[42 for i in range(i)]
''', m.UndefinedName)
self.flakes('''
(42 for i in range(i))
''', m.UndefinedName)
def test_definedFromLambdaInDictionaryComprehension(self):
"""
Defined name referenced from a lambda function within a dict/set
comprehension.
"""
self.flakes('''
{lambda: id(x) for x in range(10)}
''')
def test_definedFromLambdaInGenerator(self):
"""
Defined name referenced from a lambda function within a generator
expression.
"""
self.flakes('''
any(lambda: id(x) for x in range(10))
''')
def test_undefinedFromLambdaInDictionaryComprehension(self):
"""
Undefined name referenced from a lambda function within a dict/set
comprehension.
"""
self.flakes('''
{lambda: id(y) for x in range(10)}
''', m.UndefinedName)
def test_undefinedFromLambdaInComprehension(self):
"""
Undefined name referenced from a lambda function within a generator
expression.
"""
self.flakes('''
any(lambda: id(y) for x in range(10))
''', m.UndefinedName)
def test_dunderClass(self):
code = '''
class Test(object):
def __init__(self):
print(__class__.__name__)
self.x = 1
t = Test()
'''
self.flakes(code)
| Test |
python | run-llama__llama_index | llama-index-core/llama_index/core/retrievers/recursive_retriever.py | {
"start": 620,
"end": 8314
} | class ____(BaseRetriever):
"""
Recursive retriever.
This retriever will recursively explore links from nodes to other
retrievers/query engines.
For any retrieved nodes, if any of the nodes are IndexNodes,
then it will explore the linked retriever/query engine, and query that.
Args:
root_id (str): The root id of the query graph.
retriever_dict (Optional[Dict[str, BaseRetriever]]): A dictionary
of id to retrievers.
query_engine_dict (Optional[Dict[str, BaseQueryEngine]]): A dictionary of
id to query engines.
"""
def __init__(
self,
root_id: str,
retriever_dict: Dict[str, BaseRetriever],
query_engine_dict: Optional[Dict[str, BaseQueryEngine]] = None,
node_dict: Optional[Dict[str, BaseNode]] = None,
callback_manager: Optional[CallbackManager] = None,
query_response_tmpl: Optional[str] = None,
verbose: bool = False,
) -> None:
"""Init params."""
self._root_id = root_id
if root_id not in retriever_dict:
raise ValueError(
f"Root id {root_id} not in retriever_dict, it must be a retriever."
)
self._retriever_dict = retriever_dict
self._query_engine_dict = query_engine_dict or {}
self._node_dict = node_dict or {}
# make sure keys don't overlap
if set(self._retriever_dict.keys()) & set(self._query_engine_dict.keys()):
raise ValueError("Retriever and query engine ids must not overlap.")
self._query_response_tmpl = query_response_tmpl or DEFAULT_QUERY_RESPONSE_TMPL
super().__init__(callback_manager, verbose=verbose)
def _deduplicate_nodes(
self, nodes_with_score: List[NodeWithScore]
) -> List[NodeWithScore]:
"""
Deduplicate nodes according to node id.
Keep the node with the highest score/first returned.
"""
node_ids = set()
deduplicate_nodes = []
for node_with_score in nodes_with_score:
node = node_with_score.node
if node.id_ not in node_ids:
node_ids.add(node.id_)
deduplicate_nodes.append(node_with_score)
return deduplicate_nodes
def _query_retrieved_nodes(
self, query_bundle: QueryBundle, nodes_with_score: List[NodeWithScore]
) -> Tuple[List[NodeWithScore], List[NodeWithScore]]:
"""
Query for retrieved nodes.
If node is an IndexNode, then recursively query the retriever/query engine.
If node is a TextNode, then simply return the node.
"""
nodes_to_add = []
additional_nodes = []
visited_ids = set()
# dedup index nodes that reference same index id
new_nodes_with_score = []
for node_with_score in nodes_with_score:
node = node_with_score.node
if isinstance(node, IndexNode):
if node.index_id not in visited_ids:
visited_ids.add(node.index_id)
new_nodes_with_score.append(node_with_score)
else:
new_nodes_with_score.append(node_with_score)
nodes_with_score = new_nodes_with_score
# recursively retrieve
for node_with_score in nodes_with_score:
node = node_with_score.node
if isinstance(node, IndexNode):
if self._verbose:
print_text(
f"Retrieved node with id, entering: {node.index_id}\n",
color="pink",
)
cur_retrieved_nodes, cur_additional_nodes = self._retrieve_rec(
query_bundle,
query_id=node.index_id,
cur_similarity=node_with_score.score,
)
else:
assert isinstance(node, TextNode)
if self._verbose:
print_text(
f"Retrieving text node: {node.get_content()}\n",
color="pink",
)
cur_retrieved_nodes = [node_with_score]
cur_additional_nodes = []
nodes_to_add.extend(cur_retrieved_nodes)
additional_nodes.extend(cur_additional_nodes)
# dedup nodes in case some nodes could be retrieved from multiple sources
nodes_to_add = self._deduplicate_nodes(nodes_to_add)
additional_nodes = self._deduplicate_nodes(additional_nodes)
return nodes_to_add, additional_nodes
def _get_object(self, query_id: str) -> RQN_TYPE:
"""Fetch retriever or query engine."""
node = self._node_dict.get(query_id, None)
if node is not None:
return node
retriever = self._retriever_dict.get(query_id, None)
if retriever is not None:
return retriever
query_engine = self._query_engine_dict.get(query_id, None)
if query_engine is not None:
return query_engine
raise ValueError(
f"Query id {query_id} not found in either `retriever_dict` "
"or `query_engine_dict`."
)
def _retrieve_rec(
self,
query_bundle: QueryBundle,
query_id: Optional[str] = None,
cur_similarity: Optional[float] = None,
) -> Tuple[List[NodeWithScore], List[NodeWithScore]]:
"""Query recursively."""
if self._verbose:
print_text(
f"Retrieving with query id {query_id}: {query_bundle.query_str}\n",
color="blue",
)
query_id = query_id or self._root_id
cur_similarity = cur_similarity or 1.0
obj = self._get_object(query_id)
if isinstance(obj, BaseNode):
nodes_to_add = [NodeWithScore(node=obj, score=cur_similarity)]
additional_nodes: List[NodeWithScore] = []
elif isinstance(obj, BaseRetriever):
with self.callback_manager.event(
CBEventType.RETRIEVE,
payload={EventPayload.QUERY_STR: query_bundle.query_str},
) as event:
nodes = obj.retrieve(query_bundle)
event.on_end(payload={EventPayload.NODES: nodes})
nodes_to_add, additional_nodes = self._query_retrieved_nodes(
query_bundle, nodes
)
elif isinstance(obj, BaseQueryEngine):
sub_resp = obj.query(query_bundle)
if self._verbose:
print_text(
f"Got response: {sub_resp!s}\n",
color="green",
)
# format with both the query and the response
node_text = self._query_response_tmpl.format(
query_str=query_bundle.query_str, response=str(sub_resp)
)
node = TextNode(text=node_text)
nodes_to_add = [NodeWithScore(node=node, score=cur_similarity)]
additional_nodes = sub_resp.source_nodes
else:
raise ValueError("Must be a retriever or query engine.")
return nodes_to_add, additional_nodes
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
retrieved_nodes, _ = self._retrieve_rec(query_bundle, query_id=None)
return retrieved_nodes
def retrieve_all(
self, query_bundle: QueryBundle
) -> Tuple[List[NodeWithScore], List[NodeWithScore]]:
"""
Retrieve all nodes.
Unlike default `retrieve` method, this also fetches additional sources.
"""
return self._retrieve_rec(query_bundle, query_id=None)
| RecursiveRetriever |
python | django-haystack__django-haystack | test_haystack/elasticsearch_tests/test_elasticsearch_backend.py | {
"start": 7561,
"end": 7997
} | class ____(UnifiedIndex):
spy_args = None
def get_index(self, model_klass):
if self.spy_args is not None:
self.spy_args.setdefault("get_index", []).append(model_klass)
return super().get_index(model_klass)
@contextmanager
def spy(self):
try:
self.spy_args = {}
yield self.spy_args
finally:
self.spy_args = None
| ElasticSearchMockUnifiedIndex |
python | ray-project__ray | python/ray/tests/spark/test_databricks_hook.py | {
"start": 735,
"end": 2946
} | class ____:
@classmethod
def setup_class(cls):
os.environ["SPARK_WORKER_CORES"] = "2"
cls.spark = (
SparkSession.builder.master("local-cluster[1, 2, 1024]")
.config("spark.task.cpus", "1")
.config("spark.task.maxFailures", "1")
.config("spark.executorEnv.RAY_ON_SPARK_WORKER_CPU_CORES", "2")
.getOrCreate()
)
@classmethod
def teardown_class(cls):
time.sleep(10) # Wait all background spark job canceled.
cls.spark.stop()
os.environ.pop("SPARK_WORKER_CORES")
def test_hook(self, monkeypatch):
monkeypatch.setattr(
"ray.util.spark.databricks_hook._DATABRICKS_DEFAULT_TMP_ROOT_DIR", "/tmp"
)
monkeypatch.setenv("DATABRICKS_RUNTIME_VERSION", "12.2")
monkeypatch.setenv("DATABRICKS_RAY_ON_SPARK_AUTOSHUTDOWN_MINUTES", "0.5")
db_api_entry = MockDbApiEntry()
monkeypatch.setattr(
"ray.util.spark.databricks_hook.get_db_entry_point", lambda: db_api_entry
)
monkeypatch.setattr(
"ray.util.spark.databricks_hook.get_databricks_display_html_function",
lambda: lambda x: print(x),
)
try:
setup_ray_cluster(
max_worker_nodes=2,
num_cpus_worker_node=1,
num_gpus_worker_node=0,
head_node_options={"include_dashboard": False},
)
cluster = ray.util.spark.cluster_init._active_ray_cluster
assert not cluster.is_shutdown
wait_for_condition(
lambda: cluster.is_shutdown,
timeout=45,
retry_interval_ms=10000,
)
assert cluster.is_shutdown
assert ray.util.spark.cluster_init._active_ray_cluster is None
finally:
if ray.util.spark.cluster_init._active_ray_cluster is not None:
# if the test raised error and does not destroy cluster,
# destroy it here.
ray.util.spark.cluster_init._active_ray_cluster.shutdown()
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| TestDatabricksHook |
python | apache__airflow | shared/logging/src/airflow_shared/logging/structlog.py | {
"start": 5651,
"end": 5941
} | class ____(structlog.BytesLogger):
__slots__ = ("name",)
def __init__(self, name: str | None = None, file: BinaryIO | None = None):
self.name = name
if file is not None:
file = make_file_io_non_caching(file)
super().__init__(file)
| NamedBytesLogger |
python | getsentry__sentry | src/sentry/dynamic_sampling/models/base.py | {
"start": 119,
"end": 276
} | class ____(ABC):
@abstractmethod
def validate(self) -> bool:
# By default, we want each model value to be valid.
return True
| ModelInput |
python | streamlit__streamlit | lib/streamlit/testing/v1/element_tree.py | {
"start": 20516,
"end": 23254
} | class ____(Widget, Generic[T]):
"""A representation of button_group that is used by ``st.feedback``."""
_value: list[T] | None
proto: ButtonGroupProto = field(repr=False)
options: list[ButtonGroupProto.Option]
form_id: str
def __init__(self, proto: ButtonGroupProto, root: ElementTree) -> None:
super().__init__(proto, root)
self.type = "button_group"
self.options = list(proto.options)
@property
def _widget_state(self) -> WidgetState:
"""Protobuf message representing the state of the widget, including
any interactions that have happened.
Should be the same as the frontend would produce for those interactions.
"""
ws = WidgetState()
ws.id = self.id
ws.int_array_value.data[:] = self.indices
return ws
@property
def value(self) -> list[T]:
"""The currently selected values from the options. (list)""" # noqa: D400
if self._value is not None:
return self._value
state = self.root.session_state
assert state
return cast("list[T]", state[self.id])
@property
def indices(self) -> Sequence[int]:
"""The indices of the currently selected values from the options. (list)""" # noqa: D400
return [self.options.index(self.format_func(v)) for v in self.value]
@property
def format_func(self) -> Callable[[Any], Any]:
"""The widget's formatting function for displaying options. (callable)""" # noqa: D400
ss = self.root.session_state
return cast("Callable[[Any], Any]", ss[TESTING_KEY][self.id])
def set_value(self, v: list[T]) -> ButtonGroup[T]:
"""Set the value of the multiselect widget. (list)""" # noqa: D400
self._value = v
return self
def select(self, v: T) -> ButtonGroup[T]:
"""
Add a selection to the widget. Do nothing if the value is already selected.\
If testing a multiselect widget with repeated options, use ``set_value``\
instead.
"""
current = self.value
if v in current:
return self
new = current.copy()
new.append(v)
self.set_value(new)
return self
def unselect(self, v: T) -> ButtonGroup[T]:
"""
Remove a selection from the widget. Do nothing if the value is not\
already selected. If a value is selected multiple times, the first\
instance is removed.
"""
current = self.value
if v not in current:
return self
new = current.copy()
while v in new:
new.remove(v)
self.set_value(new)
return self
@dataclass(repr=False)
| ButtonGroup |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/common.py | {
"start": 1724,
"end": 1912
} | class ____(StrictBaseModel, Generic[T]):
"""Base class for bulk actions."""
action: BulkAction = Field(..., description="The action to be performed on the entities.")
| BulkBaseAction |
python | joke2k__faker | faker/providers/automotive/nl_BE/__init__.py | {
"start": 48,
"end": 372
} | class ____(AutomotiveProvider):
"""Implement automotive provider for `nl_BE` locale.
https://nl.wikipedia.org/wiki/Belgisch_kenteken
"""
license_formats = (
"???-###", # 1973-2008
"###-???", # 2008-2010
# New formats after 2010
"1-???-###",
"2-???-###",
)
| Provider |
python | getsentry__sentry | tests/sentry/middleware/integrations/parsers/test_github.py | {
"start": 12580,
"end": 16924
} | class ____(TestCase):
factory = RequestFactory()
path = reverse("sentry-integration-github-webhook")
@pytest.fixture(autouse=True)
def setup(self):
with override_options({"github.webhook-type-routing.enabled": True}):
yield
def setUp(self) -> None:
super().setUp()
self.organization = self.create_organization(
name="Test Org",
slug="test-org",
region="us",
owner=self.create_user(email="test@example.com"),
)
self.integration = self.create_integration(
provider="github",
external_id="1",
name="Test Integration",
organization=self.organization,
)
@responses.activate
def test_overwatch_forwarder(self) -> None:
with (
override_options({"overwatch.enabled-regions": ["us"]}),
override_settings(
OVERWATCH_REGION_URLS={"us": "https://us.example.com/api"},
OVERWATCH_WEBHOOK_SECRET="test-secret",
),
):
responses.add(
responses.POST,
"https://us.example.com/api/webhooks/sentry",
status=200,
)
request = self.factory.post(
self.path,
data={"installation": {"id": "1"}, "action": "created"},
content_type="application/json",
headers={
"x-github-event": GithubWebhookType.PULL_REQUEST.value,
"x-github-hook-installation-target-id": "123",
},
)
parser = GithubRequestParser(
request=request,
response_handler=lambda _: HttpResponse(status=200, content="passthrough"),
)
response = parser.get_response()
assert isinstance(response, HttpResponse)
assert response.status_code == status.HTTP_202_ACCEPTED
assert response.content == b""
assert len(responses.calls) == 1
assert responses.calls[0].request.url == "https://us.example.com/api/webhooks/sentry"
assert responses.calls[0].request.method == "POST"
json_body = orjson.loads(responses.calls[0].request.body)
assert json_body["organizations"] == [
{
"name": "Test Org",
"slug": "test-org",
"id": self.organization.id,
"region": "us",
"github_integration_id": self.integration.id,
"organization_integration_id": self.organization_integration.id,
}
]
assert json_body["webhook_body"] == {"installation": {"id": "1"}, "action": "created"}
assert json_body["app_id"] == 123
assert json_body["webhook_headers"]["X-Github-Event"] == "pull_request"
assert json_body["integration_provider"] == "github"
assert json_body["region"] == "us"
assert json_body["event_type"] == "github"
@responses.activate
def test_overwatch_forwarder_missing_region_config(self) -> None:
with (
override_options({"overwatch.enabled-regions": ["us"]}),
override_settings(
OVERWATCH_REGION_URLS={"de": "https://de.example.com/api"},
OVERWATCH_WEBHOOK_SECRET="test-secret",
),
):
request = self.factory.post(
self.path,
data={"installation": {"id": "1"}, "action": "created"},
content_type="application/json",
headers={
"x-github-event": GithubWebhookType.PULL_REQUEST.value,
"x-github-hook-installation-target-id": "1",
},
)
parser = GithubRequestParser(
request=request,
response_handler=lambda _: HttpResponse(status=200, content="passthrough"),
)
response = parser.get_response()
assert isinstance(response, HttpResponse)
assert response.status_code == status.HTTP_202_ACCEPTED
assert response.content == b""
assert len(responses.calls) == 0
| GithubRequestParserOverwatchForwarderTest |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-hwp/llama_index/readers/hwp/base.py | {
"start": 197,
"end": 3196
} | class ____(BaseReader):
"""
Hwp Reader. Reads contents from Hwp file.
Args: None.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.FILE_HEADER_SECTION = "FileHeader"
self.HWP_SUMMARY_SECTION = "\x05HwpSummaryInformation"
self.SECTION_NAME_LENGTH = len("Section")
self.BODYTEXT_SECTION = "BodyText"
self.HWP_TEXT_TAGS = [67]
def load_data(
self, file: Path, extra_info: Optional[Dict] = None
) -> List[Document]:
"""
Load data and extract table from Hwp file.
Args:
file (Path): Path for the Hwp file.
Returns:
List[Document].
"""
import olefile
load_file = olefile.OleFileIO(file)
file_dir = load_file.listdir()
if self.is_valid(file_dir) is False:
raise Exception("Not Valid HwpFile")
result_text = self._get_text(load_file, file_dir)
result = self._text_to_document(text=result_text, extra_info=extra_info)
return [result]
def is_valid(self, dirs):
if [self.FILE_HEADER_SECTION] not in dirs:
return False
return [self.HWP_SUMMARY_SECTION] in dirs
def get_body_sections(self, dirs):
m = []
for d in dirs:
if d[0] == self.BODYTEXT_SECTION:
m.append(int(d[1][self.SECTION_NAME_LENGTH :]))
return ["BodyText/Section" + str(x) for x in sorted(m)]
def _text_to_document(
self, text: str, extra_info: Optional[Dict] = None
) -> Document:
return Document(text=text, extra_info=extra_info or {})
def get_text(self):
return self.text
# 전체 text 추출
def _get_text(self, load_file, file_dir):
sections = self.get_body_sections(file_dir)
text = ""
for section in sections:
text += self.get_text_from_section(load_file, section)
text += "\n"
self.text = text
return self.text
def is_compressed(self, load_file):
header = load_file.openstream("FileHeader")
header_data = header.read()
return (header_data[36] & 1) == 1
def get_text_from_section(self, load_file, section):
bodytext = load_file.openstream(section)
data = bodytext.read()
unpacked_data = (
zlib.decompress(data, -15) if self.is_compressed(load_file) else data
)
size = len(unpacked_data)
i = 0
text = ""
while i < size:
header = struct.unpack_from("<I", unpacked_data, i)[0]
rec_type = header & 0x3FF
(header >> 10) & 0x3FF
rec_len = (header >> 20) & 0xFFF
if rec_type in self.HWP_TEXT_TAGS:
rec_data = unpacked_data[i + 4 : i + 4 + rec_len]
text += rec_data.decode("utf-16")
text += "\n"
i += 4 + rec_len
return text
| HWPReader |
python | kubernetes-client__python | kubernetes/client/models/v1_resource_attributes.py | {
"start": 383,
"end": 11116
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'field_selector': 'V1FieldSelectorAttributes',
'group': 'str',
'label_selector': 'V1LabelSelectorAttributes',
'name': 'str',
'namespace': 'str',
'resource': 'str',
'subresource': 'str',
'verb': 'str',
'version': 'str'
}
attribute_map = {
'field_selector': 'fieldSelector',
'group': 'group',
'label_selector': 'labelSelector',
'name': 'name',
'namespace': 'namespace',
'resource': 'resource',
'subresource': 'subresource',
'verb': 'verb',
'version': 'version'
}
def __init__(self, field_selector=None, group=None, label_selector=None, name=None, namespace=None, resource=None, subresource=None, verb=None, version=None, local_vars_configuration=None): # noqa: E501
"""V1ResourceAttributes - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._field_selector = None
self._group = None
self._label_selector = None
self._name = None
self._namespace = None
self._resource = None
self._subresource = None
self._verb = None
self._version = None
self.discriminator = None
if field_selector is not None:
self.field_selector = field_selector
if group is not None:
self.group = group
if label_selector is not None:
self.label_selector = label_selector
if name is not None:
self.name = name
if namespace is not None:
self.namespace = namespace
if resource is not None:
self.resource = resource
if subresource is not None:
self.subresource = subresource
if verb is not None:
self.verb = verb
if version is not None:
self.version = version
@property
def field_selector(self):
"""Gets the field_selector of this V1ResourceAttributes. # noqa: E501
:return: The field_selector of this V1ResourceAttributes. # noqa: E501
:rtype: V1FieldSelectorAttributes
"""
return self._field_selector
@field_selector.setter
def field_selector(self, field_selector):
"""Sets the field_selector of this V1ResourceAttributes.
:param field_selector: The field_selector of this V1ResourceAttributes. # noqa: E501
:type: V1FieldSelectorAttributes
"""
self._field_selector = field_selector
@property
def group(self):
"""Gets the group of this V1ResourceAttributes. # noqa: E501
Group is the API Group of the Resource. \"*\" means all. # noqa: E501
:return: The group of this V1ResourceAttributes. # noqa: E501
:rtype: str
"""
return self._group
@group.setter
def group(self, group):
"""Sets the group of this V1ResourceAttributes.
Group is the API Group of the Resource. \"*\" means all. # noqa: E501
:param group: The group of this V1ResourceAttributes. # noqa: E501
:type: str
"""
self._group = group
@property
def label_selector(self):
"""Gets the label_selector of this V1ResourceAttributes. # noqa: E501
:return: The label_selector of this V1ResourceAttributes. # noqa: E501
:rtype: V1LabelSelectorAttributes
"""
return self._label_selector
@label_selector.setter
def label_selector(self, label_selector):
"""Sets the label_selector of this V1ResourceAttributes.
:param label_selector: The label_selector of this V1ResourceAttributes. # noqa: E501
:type: V1LabelSelectorAttributes
"""
self._label_selector = label_selector
@property
def name(self):
"""Gets the name of this V1ResourceAttributes. # noqa: E501
Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all. # noqa: E501
:return: The name of this V1ResourceAttributes. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1ResourceAttributes.
Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all. # noqa: E501
:param name: The name of this V1ResourceAttributes. # noqa: E501
:type: str
"""
self._name = name
@property
def namespace(self):
"""Gets the namespace of this V1ResourceAttributes. # noqa: E501
Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview # noqa: E501
:return: The namespace of this V1ResourceAttributes. # noqa: E501
:rtype: str
"""
return self._namespace
@namespace.setter
def namespace(self, namespace):
"""Sets the namespace of this V1ResourceAttributes.
Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview # noqa: E501
:param namespace: The namespace of this V1ResourceAttributes. # noqa: E501
:type: str
"""
self._namespace = namespace
@property
def resource(self):
"""Gets the resource of this V1ResourceAttributes. # noqa: E501
Resource is one of the existing resource types. \"*\" means all. # noqa: E501
:return: The resource of this V1ResourceAttributes. # noqa: E501
:rtype: str
"""
return self._resource
@resource.setter
def resource(self, resource):
"""Sets the resource of this V1ResourceAttributes.
Resource is one of the existing resource types. \"*\" means all. # noqa: E501
:param resource: The resource of this V1ResourceAttributes. # noqa: E501
:type: str
"""
self._resource = resource
@property
def subresource(self):
"""Gets the subresource of this V1ResourceAttributes. # noqa: E501
Subresource is one of the existing resource types. \"\" means none. # noqa: E501
:return: The subresource of this V1ResourceAttributes. # noqa: E501
:rtype: str
"""
return self._subresource
@subresource.setter
def subresource(self, subresource):
"""Sets the subresource of this V1ResourceAttributes.
Subresource is one of the existing resource types. \"\" means none. # noqa: E501
:param subresource: The subresource of this V1ResourceAttributes. # noqa: E501
:type: str
"""
self._subresource = subresource
@property
def verb(self):
"""Gets the verb of this V1ResourceAttributes. # noqa: E501
Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \"*\" means all. # noqa: E501
:return: The verb of this V1ResourceAttributes. # noqa: E501
:rtype: str
"""
return self._verb
@verb.setter
def verb(self, verb):
"""Sets the verb of this V1ResourceAttributes.
Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \"*\" means all. # noqa: E501
:param verb: The verb of this V1ResourceAttributes. # noqa: E501
:type: str
"""
self._verb = verb
@property
def version(self):
"""Gets the version of this V1ResourceAttributes. # noqa: E501
Version is the API Version of the Resource. \"*\" means all. # noqa: E501
:return: The version of this V1ResourceAttributes. # noqa: E501
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this V1ResourceAttributes.
Version is the API Version of the Resource. \"*\" means all. # noqa: E501
:param version: The version of this V1ResourceAttributes. # noqa: E501
:type: str
"""
self._version = version
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ResourceAttributes):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ResourceAttributes):
return True
return self.to_dict() != other.to_dict()
| V1ResourceAttributes |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/init_ops_test.py | {
"start": 14176,
"end": 16437
} | class ____(test.TestCase):
@test_util.run_deprecated_v1
def testTruncatedNormalDistribution(self):
shape = [100, 100]
expect_mean = 0.
expect_var = 1. / shape[0]
init = init_ops.variance_scaling_initializer(
distribution="truncated_normal")
with self.session(), \
test.mock.patch.object(
random_ops, "truncated_normal", wraps=random_ops.truncated_normal) \
as mock_truncated_normal:
x = init(shape).eval()
self.assertTrue(mock_truncated_normal.called)
self.assertNear(np.mean(x), expect_mean, err=1e-2)
self.assertNear(np.var(x), expect_var, err=1e-2)
@test_util.run_deprecated_v1
def testNormalDistribution(self):
shape = [100, 100]
expect_mean = 0.
expect_var = 1. / shape[0]
init = init_ops.variance_scaling_initializer(distribution="normal")
with self.session(), \
test.mock.patch.object(
random_ops, "truncated_normal", wraps=random_ops.truncated_normal) \
as mock_truncated_normal:
x = init(shape).eval()
self.assertTrue(mock_truncated_normal.called)
self.assertNear(np.mean(x), expect_mean, err=1e-2)
self.assertNear(np.var(x), expect_var, err=1e-2)
@test_util.run_deprecated_v1
def testUntruncatedNormalDistribution(self):
shape = [100, 100]
expect_mean = 0.
expect_var = 1. / shape[0]
init = init_ops.variance_scaling_initializer(
distribution="untruncated_normal")
with self.session(), \
test.mock.patch.object(
random_ops, "random_normal", wraps=random_ops.random_normal) \
as mock_random_normal:
x = init(shape).eval()
self.assertTrue(mock_random_normal.called)
self.assertNear(np.mean(x), expect_mean, err=1e-2)
self.assertNear(np.var(x), expect_var, err=1e-2)
@test_util.run_deprecated_v1
def testUniformDistribution(self):
shape = [100, 100]
expect_mean = 0.
expect_var = 1. / shape[0]
init = init_ops.variance_scaling_initializer(distribution="uniform")
with self.session():
x = init(shape).eval()
self.assertNear(np.mean(x), expect_mean, err=1e-2)
self.assertNear(np.var(x), expect_var, err=1e-2)
# TODO(vrv): move to sequence_ops_test?
| VarianceScalingInitializationTest |
python | ansible__ansible | test/lib/ansible_test/_internal/coverage_util.py | {
"start": 922,
"end": 1489
} | class ____:
"""Details about a coverage version and its supported Python versions."""
coverage_version: str
schema_version: int
min_python: tuple[int, int]
max_python: tuple[int, int]
COVERAGE_VERSIONS = (
# IMPORTANT: Keep this in sync with the ansible-test.txt requirements file.
CoverageVersion('7.10.7', 7, (3, 9), (3, 14)),
)
"""
This tuple specifies the coverage version to use for Python version ranges.
"""
CONTROLLER_COVERAGE_VERSION = COVERAGE_VERSIONS[0]
"""The coverage version supported on the controller."""
| CoverageVersion |
python | ApeWorX__ape | tests/conftest.py | {
"start": 12969,
"end": 14151
} | class ____:
"""
Same CLI commands are better tested using a python subprocess,
such as `ape test` commands because duplicate pytest main methods
do not run well together, or `ape plugins` commands, which may
modify installed plugins.
"""
def __init__(
self, root_cmd: Optional[Sequence[str]] = None, data_folder: Optional[Path] = None
):
self.root_cmd = root_cmd or []
self.data_folder = data_folder
def invoke(
self,
*subcommand: str,
input=None,
timeout: int = 40,
env: Optional[dict] = None,
):
subcommand = subcommand or ()
cmd_ls = [*self.root_cmd, *subcommand]
env = {**dict(os.environ), **(env or {})}
if self.data_folder:
env["APE_DATA_FOLDER"] = str(self.data_folder)
completed_process = subprocess.run(
cmd_ls,
capture_output=True,
env=env,
input=input,
text=True,
timeout=timeout,
check=False,
)
result = SubprocessResult(completed_process)
sys.stdin = sys.__stdin__
return result
| SubprocessRunner |
python | kamyu104__LeetCode-Solutions | Python/find-the-divisibility-array-of-a-string.py | {
"start": 42,
"end": 376
} | class ____(object):
def divisibilityArray(self, word, m):
"""
:type word: str
:type m: int
:rtype: List[int]
"""
result = []
curr = 0
for c in word:
curr = (curr*10+(ord(c)-ord('0')))%m
result.append(int(curr == 0))
return result
| Solution |
python | lepture__authlib | authlib/jose/rfc8037/jws_eddsa.py | {
"start": 119,
"end": 717
} | class ____(JWSAlgorithm):
name = "EdDSA"
description = "Edwards-curve Digital Signature Algorithm for JWS"
def prepare_key(self, raw_data):
return OKPKey.import_key(raw_data)
def sign(self, msg, key):
op_key = key.get_op_key("sign")
return op_key.sign(msg)
def verify(self, msg, sig, key):
op_key = key.get_op_key("verify")
try:
op_key.verify(sig, msg)
return True
except InvalidSignature:
return False
def register_jws_rfc8037(cls):
cls.register_algorithm(EdDSAAlgorithm())
| EdDSAAlgorithm |
python | rq__rq | tests/test_queue.py | {
"start": 37146,
"end": 37652
} | class ____(RQTestCase):
def test_enqueue_at(self):
"""enqueue_at() creates a job in ScheduledJobRegistry"""
queue = Queue(connection=self.connection)
scheduled_time = datetime.now(timezone.utc) + timedelta(seconds=10)
job = queue.enqueue_at(scheduled_time, say_hello)
registry = ScheduledJobRegistry(queue=queue)
self.assertIn(job, registry)
self.assertEqual(registry.get_expiration_time(job), scheduled_time.replace(microsecond=0))
| TestJobScheduling |
python | jina-ai__jina | tests/helper.py | {
"start": 114,
"end": 1162
} | class ____(Executor):
@requests(on='/')
def process(self, docs: DocumentArray, **kwargs):
for doc in docs:
doc.text = doc.text + 'world'
doc.tags['processed'] = True
def _validate_dummy_custom_gateway_response(port, expected):
import requests
resp = requests.get(f'http://127.0.0.1:{port}/').json()
assert resp == expected
def _validate_custom_gateway_process(port, text, expected):
import requests
resp = requests.get(f'http://127.0.0.1:{port}/stream?text={text}').json()
assert resp == expected
# set_pod_parser returns a parser for worker runtime, which expects list of ports (because external executors
# can provide multiple ports and hosts). However this parser is not compatible with ContainerPod, Pod and worker runtime.
# Should we add a seperate parser for Pod?
def _generate_pod_args(cli_split: list = []):
args = set_pod_parser().parse_args(cli_split)
args.host = args.host[0]
args.port_monitoring = args.port_monitoring[0]
return args
| ProcessExecutor |
python | wandb__wandb | wandb/integration/metaflow/metaflow.py | {
"start": 790,
"end": 9411
} | class ____:
def __init__(self, flow):
# do this to avoid recursion problem with __setattr__
self.__dict__.update(
{
"flow": flow,
"inputs": {},
"outputs": {},
"base": set(dir(flow)),
"params": {p: getattr(flow, p) for p in current.parameter_names},
}
)
def __setattr__(self, key, val):
self.outputs[key] = val
return setattr(self.flow, key, val)
def __getattr__(self, key):
if key not in self.base and key not in self.outputs:
self.inputs[key] = getattr(self.flow, key)
return getattr(self.flow, key)
def _track_scalar(
name: str,
data: Union[dict, list, set, str, int, float, bool],
run,
testing: bool = False,
) -> Optional[str]:
if testing:
return "scalar"
run.log({name: data})
return None
def _track_path(
name: str,
data: Path,
run,
testing: bool = False,
) -> Optional[str]:
if testing:
return "Path"
artifact = wandb.Artifact(name, type="dataset")
if data.is_dir():
artifact.add_dir(data)
elif data.is_file():
artifact.add_file(data)
run.log_artifact(artifact)
wandb.termlog(f"Logging artifact: {name} ({type(data)})")
return None
def _track_generic(
name: str,
data,
run,
testing: bool = False,
) -> Optional[str]:
if testing:
return "generic"
artifact = wandb.Artifact(name, type="other")
with artifact.new_file(f"{name}.pkl", "wb") as f:
pickle.dump(data, f)
run.log_artifact(artifact)
wandb.termlog(f"Logging artifact: {name} ({type(data)})")
return None
def wandb_track(
name: str,
data,
datasets: bool = False,
models: bool = False,
others: bool = False,
run: Optional[wandb.Run] = None,
testing: bool = False,
) -> Optional[str]:
"""Track data as wandb artifacts based on type and flags."""
# Check for pandas DataFrame
if data_pandas and data_pandas.is_dataframe(data) and datasets:
return data_pandas.track_dataframe(name, data, run, testing)
# Check for PyTorch Module
if data_pytorch and data_pytorch.is_nn_module(data) and models:
return data_pytorch.track_nn_module(name, data, run, testing)
# Check for scikit-learn BaseEstimator
if data_sklearn and data_sklearn.is_estimator(data) and models:
return data_sklearn.track_estimator(name, data, run, testing)
# Check for Path objects
if isinstance(data, Path) and datasets:
return _track_path(name, data, run, testing)
# Check for scalar types
if isinstance(data, (dict, list, set, str, int, float, bool)):
return _track_scalar(name, data, run, testing)
# Generic fallback
if others:
return _track_generic(name, data, run, testing)
# No action taken
return None
def wandb_use(
name: str,
data,
datasets: bool = False,
models: bool = False,
others: bool = False,
run=None,
testing: bool = False,
) -> Optional[str]:
"""Use wandb artifacts based on data type and flags."""
# Skip scalar types - nothing to use
if isinstance(data, (dict, list, set, str, int, float, bool)):
return None
try:
# Check for pandas DataFrame
if data_pandas and data_pandas.is_dataframe(data) and datasets:
return data_pandas.use_dataframe(name, run, testing)
# Check for PyTorch Module
elif data_pytorch and data_pytorch.is_nn_module(data) and models:
return data_pytorch.use_nn_module(name, run, testing)
# Check for scikit-learn BaseEstimator
elif data_sklearn and data_sklearn.is_estimator(data) and models:
return data_sklearn.use_estimator(name, run, testing)
# Check for Path objects
elif isinstance(data, Path) and datasets:
return _use_path(name, data, run, testing)
# Generic fallback
elif others:
return _use_generic(name, data, run, testing)
else:
return None
except wandb.CommError:
wandb.termwarn(
f"This artifact ({name}, {type(data)}) does not exist in the wandb datastore!"
" If you created an instance inline (e.g. sklearn.ensemble.RandomForestClassifier),"
" then you can safely ignore this. Otherwise you may want to check your internet connection!"
)
return None
def _use_path(
name: str,
data: Path,
run,
testing: bool = False,
) -> Optional[str]:
if testing:
return "datasets"
run.use_artifact(f"{name}:latest")
wandb.termlog(f"Using artifact: {name} ({type(data)})")
return None
def _use_generic(
name: str,
data,
run,
testing: bool = False,
) -> Optional[str]:
if testing:
return "others"
run.use_artifact(f"{name}:latest")
wandb.termlog(f"Using artifact: {name} ({type(data)})")
return None
def coalesce(*arg):
return next((a for a in arg if a is not None), None)
def wandb_log(
func=None,
/,
datasets: bool = False,
models: bool = False,
others: bool = False,
settings: Optional[wandb.Settings] = None,
):
"""Automatically log parameters and artifacts to W&B.
This decorator can be applied to a flow, step, or both:
- Decorating a step enables or disables logging within that step
- Decorating a flow is equivalent to decorating all steps
- Decorating a step after decorating its flow overwrites the flow decoration
Args:
func: The step method or flow class to decorate.
datasets: Whether to log `pd.DataFrame` and `pathlib.Path`
types. Defaults to False.
models: Whether to log `nn.Module` and `sklearn.base.BaseEstimator`
types. Defaults to False.
others: If `True`, log anything pickle-able. Defaults to False.
settings: Custom settings to pass to `wandb.init`.
If `run_group` is `None`, it is set to `{flow_name}/{run_id}`.
If `run_job_type` is `None`, it is set to `{run_job_type}/{step_name}`.
"""
@wraps(func)
def decorator(func):
# If you decorate a class, apply the decoration to all methods in that class
if inspect.isclass(func):
cls = func
for attr in cls.__dict__:
if callable(getattr(cls, attr)):
if not hasattr(attr, "_base_func"):
setattr(cls, attr, decorator(getattr(cls, attr)))
return cls
# prefer the earliest decoration (i.e. method decoration overrides class decoration)
if hasattr(func, "_base_func"):
return func
@wraps(func)
def wrapper(self, *args, settings=settings, **kwargs):
if not isinstance(settings, wandb.sdk.wandb_settings.Settings):
settings = wandb.Settings()
settings.update_from_dict(
{
"run_group": coalesce(
settings.run_group, f"{current.flow_name}/{current.run_id}"
),
"run_job_type": coalesce(settings.run_job_type, current.step_name),
}
)
with wandb.init(settings=settings) as run:
with wb_telemetry.context(run=run) as tel:
tel.feature.metaflow = True
proxy = ArtifactProxy(self)
run.config.update(proxy.params)
func(proxy, *args, **kwargs)
for name, data in proxy.inputs.items():
wandb_use(
name,
data,
datasets=datasets,
models=models,
others=others,
run=run,
)
for name, data in proxy.outputs.items():
wandb_track(
name,
data,
datasets=datasets,
models=models,
others=others,
run=run,
)
wrapper._base_func = func
# Add for testing visibility
wrapper._kwargs = {
"datasets": datasets,
"models": models,
"others": others,
"settings": settings,
}
return wrapper
if func is None:
return decorator
else:
return decorator(func)
| ArtifactProxy |
python | weaviate__weaviate-python-client | weaviate/auth.py | {
"start": 202,
"end": 971
} | class ____:
"""Authenticate for the Client Credential flow using client secrets.
Acquire the client secret from your identify provider and set the appropriate scope. The client includes hardcoded
scopes for Azure, otherwise it needs to be supplied.
Scopes can be given as:
- List of strings: ["scope1", "scope2"]
- space separated string: "scope1 scope2"
"""
client_secret: str
scope: Optional[SCOPES] = None
def __post_init__(self) -> None:
if self.scope is None:
self.scope_list: List[str] = []
elif isinstance(self.scope, str):
self.scope_list = self.scope.split(" ")
elif isinstance(self.scope, list):
self.scope_list = self.scope
@dataclass
| _ClientCredentials |
python | django__django | django/test/client.py | {
"start": 7211,
"end": 13120
} | class ____(BaseHandler):
"""An async version of ClientHandler."""
def __init__(self, enforce_csrf_checks=True, *args, **kwargs):
self.enforce_csrf_checks = enforce_csrf_checks
super().__init__(*args, **kwargs)
async def __call__(self, scope):
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._middleware_chain is None:
self.load_middleware(is_async=True)
# Extract body file from the scope, if provided.
if "_body_file" in scope:
body_file = scope.pop("_body_file")
else:
body_file = FakePayload("")
request_started.disconnect(close_old_connections)
await request_started.asend(sender=self.__class__, scope=scope)
request_started.connect(close_old_connections)
# Wrap FakePayload body_file to allow large read() in test environment.
request = ASGIRequest(scope, LimitedStream(body_file, len(body_file)))
# Sneaky little hack so that we can easily get round
# CsrfViewMiddleware. This makes life easier, and is probably required
# for backwards compatibility with external tests against admin views.
request._dont_enforce_csrf_checks = not self.enforce_csrf_checks
# Request goes through middleware.
response = await self.get_response_async(request)
# Simulate behaviors of most web servers.
conditional_content_removal(request, response)
# Attach the originating ASGI request to the response so that it could
# be later retrieved.
response.asgi_request = request
# Emulate a server by calling the close method on completion.
if response.streaming:
if response.is_async:
response.streaming_content = aclosing_iterator_wrapper(
response.streaming_content, response.close
)
else:
response.streaming_content = closing_iterator_wrapper(
response.streaming_content, response.close
)
else:
request_finished.disconnect(close_old_connections)
# Will fire request_finished.
await sync_to_async(response.close, thread_sensitive=False)()
request_finished.connect(close_old_connections)
return response
def store_rendered_templates(store, signal, sender, template, context, **kwargs):
"""
Store templates and contexts that are rendered.
The context is copied so that it is an accurate representation at the time
of rendering.
"""
store.setdefault("templates", []).append(template)
if "context" not in store:
store["context"] = ContextList()
store["context"].append(copy(context))
def encode_multipart(boundary, data):
"""
Encode multipart POST data from a dictionary of form values.
The key will be used as the form data name; the value will be transmitted
as content. If the value is a file, the contents of the file will be sent
as an application/octet-stream; otherwise, str(value) will be sent.
"""
lines = []
def to_bytes(s):
return force_bytes(s, settings.DEFAULT_CHARSET)
# Not by any means perfect, but good enough for our purposes.
def is_file(thing):
return hasattr(thing, "read") and callable(thing.read)
# Each bit of the multipart form data could be either a form value or a
# file, or a *list* of form values and/or files. Remember that HTTP field
# names can be duplicated!
for key, value in data.items():
if value is None:
raise TypeError(
"Cannot encode None for key '%s' as POST data. Did you mean "
"to pass an empty string or omit the value?" % key
)
elif is_file(value):
lines.extend(encode_file(boundary, key, value))
elif not isinstance(value, str) and isinstance(value, Iterable):
for item in value:
if is_file(item):
lines.extend(encode_file(boundary, key, item))
else:
lines.extend(
to_bytes(val)
for val in [
"--%s" % boundary,
'Content-Disposition: form-data; name="%s"' % key,
"",
item,
]
)
else:
lines.extend(
to_bytes(val)
for val in [
"--%s" % boundary,
'Content-Disposition: form-data; name="%s"' % key,
"",
value,
]
)
lines.extend(
[
to_bytes("--%s--" % boundary),
b"",
]
)
return b"\r\n".join(lines)
def encode_file(boundary, key, file):
def to_bytes(s):
return force_bytes(s, settings.DEFAULT_CHARSET)
# file.name might not be a string. For example, it's an int for
# tempfile.TemporaryFile().
file_has_string_name = hasattr(file, "name") and isinstance(file.name, str)
filename = os.path.basename(file.name) if file_has_string_name else ""
if hasattr(file, "content_type"):
content_type = file.content_type
elif filename:
content_type = mimetypes.guess_type(filename)[0]
else:
content_type = None
if content_type is None:
content_type = "application/octet-stream"
filename = filename or key
return [
to_bytes("--%s" % boundary),
to_bytes(
'Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename)
),
to_bytes("Content-Type: %s" % content_type),
b"",
to_bytes(file.read()),
]
| AsyncClientHandler |
python | scipy__scipy | scipy/ndimage/tests/test_morphology.py | {
"start": 132710,
"end": 137867
} | class ____:
def _setup(self, xp):
a = np.zeros((5, 5), dtype=bool)
a[1:4, 1:4] = True
a[4, 4] = True
self.array = xp.asarray(a)
self.sq3x3 = xp.ones((3, 3))
self.opened_old = ndimage.binary_opening(self.array, self.sq3x3,
1, None, 0)
self.closed_old = ndimage.binary_closing(self.array, self.sq3x3,
1, None, 0)
def test_opening_new_arguments(self, xp):
self._setup(xp)
opened_new = ndimage.binary_opening(self.array, self.sq3x3, 1, None,
0, None, 0, False)
xp_assert_equal(opened_new, self.opened_old)
def test_closing_new_arguments(self, xp):
self._setup(xp)
closed_new = ndimage.binary_closing(self.array, self.sq3x3, 1, None,
0, None, 0, False)
xp_assert_equal(closed_new, self.closed_old)
@make_xp_test_case(ndimage.binary_erosion)
def test_binary_erosion_noninteger_iterations(xp):
# regression test for gh-9905, gh-9909: ValueError for
# non integer iterations
data = xp.ones([1])
assert_raises(TypeError, ndimage.binary_erosion, data, iterations=0.5)
assert_raises(TypeError, ndimage.binary_erosion, data, iterations=1.5)
@make_xp_test_case(ndimage.binary_dilation)
def test_binary_dilation_noninteger_iterations(xp):
# regression test for gh-9905, gh-9909: ValueError for
# non integer iterations
data = xp.ones([1])
assert_raises(TypeError, ndimage.binary_dilation, data, iterations=0.5)
assert_raises(TypeError, ndimage.binary_dilation, data, iterations=1.5)
@make_xp_test_case(ndimage.binary_opening)
def test_binary_opening_noninteger_iterations(xp):
# regression test for gh-9905, gh-9909: ValueError for
# non integer iterations
data = xp.ones([1])
assert_raises(TypeError, ndimage.binary_opening, data, iterations=0.5)
assert_raises(TypeError, ndimage.binary_opening, data, iterations=1.5)
@make_xp_test_case(ndimage.binary_closing)
def test_binary_closing_noninteger_iterations(xp):
# regression test for gh-9905, gh-9909: ValueError for
# non integer iterations
data = xp.ones([1])
assert_raises(TypeError, ndimage.binary_closing, data, iterations=0.5)
assert_raises(TypeError, ndimage.binary_closing, data, iterations=1.5)
@xfail_xp_backends(
"cupy", reason="CuPy: NotImplementedError: only brute_force iteration"
)
@make_xp_test_case(ndimage.binary_erosion)
def test_binary_closing_noninteger_brute_force_passes_when_true(xp):
# regression test for gh-9905, gh-9909: ValueError for non integer iterations
data = xp.ones([1])
xp_assert_equal(ndimage.binary_erosion(data, iterations=2, brute_force=1.5),
ndimage.binary_erosion(data, iterations=2, brute_force=bool(1.5))
)
xp_assert_equal(ndimage.binary_erosion(data, iterations=2, brute_force=0.0),
ndimage.binary_erosion(data, iterations=2, brute_force=bool(0.0))
)
@skip_xp_backends(np_only=True, exceptions=["cupy"],
reason="inplace output= is numpy-specific")
@xfail_xp_backends("cupy", reason="NotImplementedError: only brute_force iteration")
@pytest.mark.parametrize(
'func',
[
make_xp_pytest_param(ndimage.binary_erosion),
make_xp_pytest_param(ndimage.binary_dilation),
make_xp_pytest_param(ndimage.binary_opening),
make_xp_pytest_param(ndimage.binary_closing),
],
)
@pytest.mark.parametrize('iterations', [1, 5])
@pytest.mark.parametrize('brute_force', [False, True])
def test_binary_input_as_output(func, iterations, brute_force, xp):
rstate = np.random.RandomState(123)
data = rstate.randint(low=0, high=2, size=100).astype(bool)
data = xp.asarray(data)
# input data is not modified
data_orig = data.copy()
expected = func(data, brute_force=brute_force, iterations=iterations)
xp_assert_equal(data, data_orig)
# data should now contain the expected result
func(data, brute_force=brute_force, iterations=iterations, output=data)
xp_assert_equal(data, expected)
@skip_xp_backends(np_only=True, exceptions=["cupy"],
reason="inplace output= is numpy-specific")
@make_xp_test_case(ndimage.binary_hit_or_miss)
def test_binary_hit_or_miss_input_as_output(xp):
rstate = np.random.RandomState(123)
data = rstate.randint(low=0, high=2, size=100).astype(bool)
data = xp.asarray(data)
# input data is not modified
data_orig = data.copy()
expected = ndimage.binary_hit_or_miss(data)
xp_assert_equal(data, data_orig)
# data should now contain the expected result
ndimage.binary_hit_or_miss(data, output=data)
xp_assert_equal(data, expected)
@make_xp_test_case(ndimage.distance_transform_cdt)
def test_distance_transform_cdt_invalid_metric(xp):
msg = 'invalid metric provided'
with pytest.raises(ValueError, match=msg):
ndimage.distance_transform_cdt(xp.ones((5, 5)),
metric="garbage")
| TestBinaryOpeningClosing |
python | huggingface__transformers | src/transformers/models/deberta/modeling_deberta.py | {
"start": 25467,
"end": 29427
} | class ____(DebertaPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.embeddings = DebertaEmbeddings(config)
self.encoder = DebertaEncoder(config)
self.z_steps = 0
self.config = config
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, new_embeddings):
self.embeddings.word_embeddings = new_embeddings
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
embedding_output = self.embeddings(
input_ids=input_ids,
token_type_ids=token_type_ids,
position_ids=position_ids,
mask=attention_mask,
inputs_embeds=inputs_embeds,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask,
output_hidden_states=True,
output_attentions=output_attentions,
return_dict=return_dict,
)
encoded_layers = encoder_outputs[1]
if self.z_steps > 1:
hidden_states = encoded_layers[-2]
layers = [self.encoder.layer[-1] for _ in range(self.z_steps)]
query_states = encoded_layers[-1]
rel_embeddings = self.encoder.get_rel_embedding()
attention_mask = self.encoder.get_attention_mask(attention_mask)
rel_pos = self.encoder.get_rel_pos(embedding_output)
for layer in layers[1:]:
query_states = layer(
hidden_states,
attention_mask,
output_attentions=False,
query_states=query_states,
relative_pos=rel_pos,
rel_embeddings=rel_embeddings,
)
encoded_layers.append(query_states)
sequence_output = encoded_layers[-1]
if not return_dict:
return (sequence_output,) + encoder_outputs[(1 if output_hidden_states else 2) :]
return BaseModelOutput(
last_hidden_state=sequence_output,
hidden_states=encoder_outputs.hidden_states if output_hidden_states else None,
attentions=encoder_outputs.attentions,
)
| DebertaModel |
python | davidhalter__jedi | test/static_analysis/class_simple.py | {
"start": 78,
"end": 152
} | class ____(Base.Nested):
pass
X().foo()
#! 4 attribute-error
X().bar()
| X |
python | getsentry__sentry | src/sentry/rules/conditions/event_attribute.py | {
"start": 7448,
"end": 7708
} | class ____(AttributeHandler):
minimum_path_length = 1
@classmethod
def _handle(cls, path: list[str], event: GroupEvent) -> list[str]:
return [str(event.get_tag("environment"))]
@attribute_registry.register("type")
| EnvironmentAttributeHandler |
python | gevent__gevent | src/greentest/3.10/test_ftplib.py | {
"start": 39285,
"end": 42464
} | class ____(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(20)
self.port = socket_helper.bind_port(self.sock)
self.server_thread = threading.Thread(target=self.server)
self.server_thread.daemon = True
self.server_thread.start()
# Wait for the server to be ready.
self.evt.wait()
self.evt.clear()
self.old_port = ftplib.FTP.port
ftplib.FTP.port = self.port
def tearDown(self):
ftplib.FTP.port = self.old_port
self.server_thread.join()
# Explicitly clear the attribute to prevent dangling thread
self.server_thread = None
def server(self):
# This method sets the evt 3 times:
# 1) when the connection is ready to be accepted.
# 2) when it is safe for the caller to close the connection
# 3) when we have closed the socket
self.sock.listen()
# (1) Signal the caller that we are ready to accept the connection.
self.evt.set()
try:
conn, addr = self.sock.accept()
except TimeoutError:
pass
else:
conn.sendall(b"1 Hola mundo\n")
conn.shutdown(socket.SHUT_WR)
# (2) Signal the caller that it is safe to close the socket.
self.evt.set()
conn.close()
finally:
self.sock.close()
def testTimeoutDefault(self):
# default -- use global socket timeout
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP(HOST)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutNone(self):
# no timeout -- do not use global socket timeout
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP(HOST, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(ftp.sock.gettimeout())
self.evt.wait()
ftp.close()
def testTimeoutValue(self):
# a value
ftp = ftplib.FTP(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
# bpo-39259
with self.assertRaises(ValueError):
ftplib.FTP(HOST, timeout=0)
def testTimeoutConnect(self):
ftp = ftplib.FTP()
ftp.connect(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDifferentOrder(self):
ftp = ftplib.FTP(timeout=30)
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDirectAccess(self):
ftp = ftplib.FTP()
ftp.timeout = 30
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
| TestTimeouts |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_data_validation08.py | {
"start": 315,
"end": 1033
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("data_validation08.xlsx")
def test_create_file(self):
"""Test the creation of a XlsxWriter file with data validation."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.data_validation(
"C2",
{
"validate": "any",
"input_title": "This is the input title",
"input_message": "This is the input message",
},
)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | kamyu104__LeetCode-Solutions | Python/minimum-operations-to-make-all-array-elements-equal.py | {
"start": 75,
"end": 612
} | class ____(object):
def minOperations(self, nums, queries):
"""
:type nums: List[int]
:type queries: List[int]
:rtype: List[int]
"""
nums.sort()
prefix = [0]*(len(nums)+1)
for i in xrange(len(nums)):
prefix[i+1] = prefix[i]+nums[i]
result = [0]*len(queries)
for i, q in enumerate(queries):
j = bisect.bisect_left(nums, q)
result[i] = (q*j-prefix[j])+((prefix[-1]-prefix[j])-q*(len(nums)-j))
return result
| Solution |
python | python-visualization__folium | folium/features.py | {
"start": 67559,
"end": 68991
} | class ____(MacroElement):
"""
When one clicks on a Map that contains a ClickForLatLng,
the coordinates of the pointer's position are copied to clipboard.
Parameters
==========
format_str : str, default 'lat + "," + lng'
The javascript string used to format the text copied to clipboard.
eg:
format_str = 'lat + "," + lng' >> 46.558860,3.397397
format_str = '"[" + lat + "," + lng + "]"' >> [46.558860,3.397397]
alert : bool, default True
Whether there should be an alert when something has been copied to clipboard.
"""
_template = Template(
"""
{% macro script(this, kwargs) %}
function getLatLng(e){
var lat = e.latlng.lat.toFixed(6),
lng = e.latlng.lng.toFixed(6);
var txt = {{this.format_str}};
navigator.clipboard.writeText(txt);
{% if this.alert %}alert("Copied to clipboard : \\n " + txt);{% endif %}
};
{{this._parent.get_name()}}.on('click', getLatLng);
{% endmacro %}
"""
) # noqa
def __init__(self, format_str: Optional[str] = None, alert: bool = True):
super().__init__()
self._name = "ClickForLatLng"
self.format_str = format_str or 'lat + "," + lng'
self.alert = alert
| ClickForLatLng |
python | getsentry__sentry | src/sentry/sentry_metrics/indexer/postgres/models.py | {
"start": 1032,
"end": 1362
} | class ____(BaseIndexer):
__relocation_scope__ = RelocationScope.Excluded
class Meta:
db_table = "sentry_stringindexer"
app_label = "sentry"
constraints = [
models.UniqueConstraint(fields=["string", "organization_id"], name="unique_org_string"),
]
@region_silo_model
| StringIndexer |
python | apache__airflow | task-sdk/src/airflow/sdk/api/client.py | {
"start": 27298,
"end": 29374
} | class ____:
"""
Operations related to Human in the loop. Require Airflow 3.1+.
:meta: private
"""
__slots__ = ("client",)
def __init__(self, client: Client) -> None:
self.client = client
def add_response(
self,
*,
ti_id: uuid.UUID,
options: list[str],
subject: str,
body: str | None = None,
defaults: list[str] | None = None,
multiple: bool = False,
params: dict[str, dict[str, Any]] | None = None,
assigned_users: list[HITLUser] | None = None,
) -> HITLDetailRequest:
"""Add a Human-in-the-loop response that waits for human response for a specific Task Instance."""
payload = CreateHITLDetailPayload(
ti_id=ti_id,
options=options,
subject=subject,
body=body,
defaults=defaults,
multiple=multiple,
params=params,
assigned_users=assigned_users,
)
resp = self.client.post(
f"/hitlDetails/{ti_id}",
content=payload.model_dump_json(),
)
return HITLDetailRequest.model_validate_json(resp.read())
def update_response(
self,
*,
ti_id: uuid.UUID,
chosen_options: list[str],
params_input: dict[str, Any],
) -> HITLDetailResponse:
"""Update an existing Human-in-the-loop response."""
payload = UpdateHITLDetail(
ti_id=ti_id,
chosen_options=chosen_options,
params_input=params_input,
)
resp = self.client.patch(
f"/hitlDetails/{ti_id}",
content=payload.model_dump_json(),
)
return HITLDetailResponse.model_validate_json(resp.read())
def get_detail_response(self, ti_id: uuid.UUID) -> HITLDetailResponse:
"""Get content part of a Human-in-the-loop response for a specific Task Instance."""
resp = self.client.get(f"/hitlDetails/{ti_id}")
return HITLDetailResponse.model_validate_json(resp.read())
| HITLOperations |
python | pytorch__pytorch | torch/_dynamo/source.py | {
"start": 39174,
"end": 43436
} | class ____(Source):
def name(self) -> str:
return ""
def guard_source(self) -> GuardSource:
return GuardSource.BACKWARD_STATE
def get_local_source_name(
source: Source, *, only_allow_input: bool = False
) -> Optional[str]:
if isinstance(source, ChainedSource):
return get_local_source_name(source.base, only_allow_input=only_allow_input)
if not isinstance(source, LocalSource):
return None
if only_allow_input and not source.is_input:
return None
return source.local_name
def is_from_local_source(source: Source, *, only_allow_input: bool = False) -> bool:
return get_local_source_name(source, only_allow_input=only_allow_input) is not None
def is_from_global_source(source: Source) -> bool:
return get_global_source_name(source) is not None
def get_global_source_name(source: Source) -> Optional[str]:
if isinstance(source, ChainedSource):
return get_global_source_name(source.base)
if not isinstance(source, GlobalSource):
return None
return source.global_name
def is_from_nonlocal_source(source: Source) -> bool:
if isinstance(source, ChainedSource):
return is_from_nonlocal_source(source.base)
return (
isinstance(source, LocalSource)
and source.is_derefed_cell_contents
and not source.is_input
)
def is_from_closure_source(source: Source) -> bool:
if isinstance(source, ClosureSource):
return True
if isinstance(source, ChainedSource):
return is_from_closure_source(source.base)
return False
def is_from_source(source: Source, target: Source) -> bool:
if isinstance(source, ChainedSource):
return is_from_source(source.base, target)
return source == target
@functools.lru_cache
def is_from_unspecialized_nn_module_source(source: Source) -> bool:
if isinstance(source, UnspecializedNNModuleSource):
return True
if isinstance(source, ChainedSource):
return is_from_unspecialized_nn_module_source(source.base)
return False
@functools.lru_cache
def is_from_unspecialized_builtin_nn_module_source(source: Source) -> bool:
if isinstance(source, UnspecializedBuiltinNNModuleSource):
return True
if isinstance(source, ChainedSource):
return is_from_unspecialized_builtin_nn_module_source(source.base)
return False
@functools.lru_cache
def is_from_unspecialized_param_buffer_source(source: Source) -> bool:
if isinstance(source, UnspecializedParamBufferSource):
return True
if isinstance(source, ChainedSource):
return is_from_unspecialized_param_buffer_source(source.base)
return False
@functools.lru_cache
def is_from_flatten_script_object_source(source: Source) -> bool:
if isinstance(source, FlattenScriptObjectSource):
return True
elif isinstance(source, ChainedSource):
return is_from_flatten_script_object_source(source.base)
return False
@functools.lru_cache
def is_from_optimizer_source(source: Source) -> bool:
if isinstance(source, OptimizerSource):
return True
if isinstance(source, ChainedSource):
return is_from_optimizer_source(source.base)
return False
# TODO: can probably write a generic "test this on everything in the chain"
# helper
@functools.lru_cache
def is_from_defaults(source: Source) -> bool:
if isinstance(source, DefaultsSource):
return True
# Accessed with func.__kwdefaults__["foo"]
if (
isinstance(source, DictGetItemSource)
and isinstance(source.base, AttrSource)
and source.base.member == "__kwdefaults__"
):
return True
# Accessed with func.__defaults__[0]
if (
isinstance(source, GetItemSource)
and isinstance(source.base, AttrSource)
and source.base.member == "__defaults__"
):
return True
if isinstance(source, ChainedSource):
return is_from_defaults(source.base)
return False
@functools.lru_cache
def is_from_skip_guard_source(source: Source) -> bool:
if isinstance(source, SkipGuardSource):
return True
if isinstance(source, ChainedSource):
return is_from_skip_guard_source(source.base)
return False
| BackwardStateSource |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/core_tests/host_representation_tests/test_repo_handle.py | {
"start": 457,
"end": 2007
} | class ____(InProcessCodeLocation):
@property
def repository_code_pointer_dict(self) -> Mapping[str, Optional[CodePointer]]:
return {}
@dg.asset
def my_asset():
pass
defs = dg.Definitions(assets=[my_asset])
def test_repo_handle_without_code_pointers():
origin = InProcessCodeLocationOrigin(
loadable_target_origin=LoadableTargetOrigin(
executable_path=sys.executable,
python_file=__file__,
working_directory=os.path.dirname(__file__),
attribute=None,
),
container_image=None,
entry_point=None,
container_context=None,
location_name=None,
)
with instance_for_test() as instance:
location = InProcesssCodeLocationWithoutCodePointers(origin=origin, instance=instance)
assert location.repository_code_pointer_dict == {}
assert len(location.get_repositories()) == 1
repo = next(iter(location.get_repositories().values()))
assert repo.handle is not None
assert repo.handle.repository_python_origin is None
with pytest.raises(Exception, match="Repository does not have a RepositoryPythonOrigin"):
repo.handle.get_python_origin()
job = next(iter(repo.get_all_jobs()))
assert job.handle is not None
assert job.handle.get_remote_origin() is not None
with pytest.raises(Exception, match="Repository does not have a RepositoryPythonOrigin"):
job.handle.get_python_origin()
| InProcesssCodeLocationWithoutCodePointers |
python | pytorch__pytorch | torch/jit/quantized.py | {
"start": 1169,
"end": 1463
} | class ____(QuantizedRNNCellBase):
def __init__(self, other):
super().__init__(other)
raise RuntimeError(
"torch.jit.QuantizedLSTMCell is no longer supported. "
"Please use the torch.ao.nn.quantized.dynamic.LSTMCell instead."
)
| QuantizedLSTMCell |
python | huggingface__transformers | src/transformers/models/convbert/modeling_convbert.py | {
"start": 13399,
"end": 14344
} | class ____(nn.Module):
def __init__(self, input_size, output_size, num_groups):
super().__init__()
self.input_size = input_size
self.output_size = output_size
self.num_groups = num_groups
self.group_in_dim = self.input_size // self.num_groups
self.group_out_dim = self.output_size // self.num_groups
self.weight = nn.Parameter(torch.empty(self.num_groups, self.group_in_dim, self.group_out_dim))
self.bias = nn.Parameter(torch.empty(output_size))
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
batch_size = list(hidden_states.size())[0]
x = torch.reshape(hidden_states, [-1, self.num_groups, self.group_in_dim])
x = x.permute(1, 0, 2)
x = torch.matmul(x, self.weight)
x = x.permute(1, 0, 2)
x = torch.reshape(x, [batch_size, -1, self.output_size])
x = x + self.bias
return x
| GroupedLinearLayer |
python | kamyu104__LeetCode-Solutions | Python/check-if-the-rectangle-corner-is-reachable.py | {
"start": 2531,
"end": 3969
} | class ____(object):
def canReachCorner(self, X, Y, circles):
"""
:type X: int
:type Y: int
:type circles: List[List[int]]
:rtype: bool
"""
def check(x1, y1, r1, x2, y2, r2):
return (x1-x2)**2+(y1-y2)**2 <= (r1+r2)**2
def iter_dfs(src, dst):
lookup = [False]*len(adj)
lookup[src] = True
stk = [src]
while stk:
u = stk.pop()
if u == dst:
return True
for v in adj[u]:
if lookup[v]:
continue
lookup[v] = True
stk.append(v)
return False
adj = [[] for _ in xrange(len(circles)+2)]
for u in xrange(len(circles)):
x1, y1, r1 = circles[u]
if x1-r1 <= 0 or y1+r1 >= Y:
adj[u].append(len(circles))
adj[len(circles)].append(u)
if x1+r1 >= X or y1-r1 <= 0:
adj[u].append(len(circles)+1)
adj[len(circles)+1].append(u)
for v in xrange(u):
x2, y2, r2 = circles[v]
if not check(x1, y1, r1, x2, y2, r2):
continue
adj[u].append(v)
adj[v].append(u)
return not iter_dfs(len(circles), len(circles)+1)
# Time: O(n^2)
# Space: O(n^2)
# bfs
| Solution3 |
python | django__django | tests/servers/tests.py | {
"start": 1598,
"end": 1850
} | class ____(LiveServerThread):
server_class = CloseConnectionTestServer
def _create_server(self, connections_override=None):
return super()._create_server(connections_override=self.connections_override)
| CloseConnectionTestLiveServerThread |
python | simplejson__simplejson | simplejson/tests/test_item_sort_key.py | {
"start": 90,
"end": 1376
} | class ____(TestCase):
def test_simple_first(self):
a = {'a': 1, 'c': 5, 'jack': 'jill', 'pick': 'axe', 'array': [1, 5, 6, 9], 'tuple': (83, 12, 3), 'crate': 'dog', 'zeak': 'oh'}
self.assertEqual(
'{"a": 1, "c": 5, "crate": "dog", "jack": "jill", "pick": "axe", "zeak": "oh", "array": [1, 5, 6, 9], "tuple": [83, 12, 3]}',
json.dumps(a, item_sort_key=json.simple_first))
def test_case(self):
a = {'a': 1, 'c': 5, 'Jack': 'jill', 'pick': 'axe', 'Array': [1, 5, 6, 9], 'tuple': (83, 12, 3), 'crate': 'dog', 'zeak': 'oh'}
self.assertEqual(
'{"Array": [1, 5, 6, 9], "Jack": "jill", "a": 1, "c": 5, "crate": "dog", "pick": "axe", "tuple": [83, 12, 3], "zeak": "oh"}',
json.dumps(a, item_sort_key=itemgetter(0)))
self.assertEqual(
'{"a": 1, "Array": [1, 5, 6, 9], "c": 5, "crate": "dog", "Jack": "jill", "pick": "axe", "tuple": [83, 12, 3], "zeak": "oh"}',
json.dumps(a, item_sort_key=lambda kv: kv[0].lower()))
def test_item_sort_key_value(self):
# https://github.com/simplejson/simplejson/issues/173
a = {'a': 1, 'b': 0}
self.assertEqual(
'{"b": 0, "a": 1}',
json.dumps(a, item_sort_key=lambda kv: kv[1]))
| TestItemSortKey |
python | tiangolo__fastapi | docs_src/extra_models/tutorial003.py | {
"start": 217,
"end": 644
} | class ____(BaseItem):
type: str = "plane"
size: int
items = {
"item1": {"description": "All my friends drive a low rider", "type": "car"},
"item2": {
"description": "Music is my aeroplane, it's my aeroplane",
"type": "plane",
"size": 5,
},
}
@app.get("/items/{item_id}", response_model=Union[PlaneItem, CarItem])
async def read_item(item_id: str):
return items[item_id]
| PlaneItem |
python | pytorch__pytorch | test/torch_np/numpy_tests/linalg/test_linalg.py | {
"start": 10256,
"end": 11056
} | class ____:
TEST_CASES = CASES
def check_cases(self, require=None, exclude=None):
"""
Run func on each of the cases with all of the tags in require, and none
of the tags in exclude
"""
if require is None:
require = set()
if exclude is None:
exclude = set()
for case in self.TEST_CASES:
# filter by require and exclude
if case.tags & require != require:
continue
if case.tags & exclude:
continue
try:
case.check(self.do)
except Exception as e:
msg = f"In test case: {case!r}\n\n"
msg += traceback.format_exc()
raise AssertionError(msg) from e
| LinalgTestCase |
python | readthedocs__readthedocs.org | readthedocs/filetreediff/dataclasses.py | {
"start": 477,
"end": 618
} | class ____:
"""A file in a file tree manifest."""
path: str
main_content_hash: str
@dataclass(slots=True)
| FileTreeDiffManifestFile |
python | getsentry__sentry | tests/sentry/auth_v2/endpoints/test_auth_merge_user_accounts.py | {
"start": 324,
"end": 1949
} | class ____(APITestCase):
endpoint = "sentry-api-0-auth-merge-accounts"
method = "get"
def test_simple(self) -> None:
user1 = self.create_user(username="mifu1", email="mifu@example.com")
user2 = self.create_user(username="mifu2", email="mifu@example.com")
# unrelated user
self.create_user(username="unrelated-mifu", email="michelle@example.com")
self.login_as(user1)
response = self.get_success_response()
assert len(response.data) == 2
assert response.data[0]["username"] == user1.username
assert response.data[1]["username"] == user2.username
def test_with_orgs(self) -> None:
user1 = self.create_user(username="powerful mifu", email="mifu@example.com")
user2 = self.create_user(username="transcendent mifu", email="mifu@example.com")
self.create_user(username="garden variety mifu", email="mifu@example.com")
org1 = self.create_organization(name="hojicha")
org2 = self.create_organization(name="matcha")
org3 = self.create_organization(name="oolong")
self.create_member(user=user1, organization=org1)
self.create_member(user=user1, organization=org2)
self.create_member(user=user2, organization=org3)
self.login_as(user1)
response = self.get_success_response()
assert response.data[0]["organizations"] == [org1.name, org2.name]
assert response.data[1]["organizations"] == [org3.name]
assert response.data[2]["organizations"] == []
@control_silo_test
@override_settings(IS_DEV=True)
| ListUserAccountsWithSharedEmailTest |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 68245,
"end": 68440
} | class ____(BaseModel, extra="forbid"):
"""
Exact match on any of the given values
"""
any: "AnyVariants" = Field(..., description="Exact match on any of the given values")
| MatchAny |
python | psf__requests | src/requests/auth.py | {
"start": 2851,
"end": 3095
} | class ____(HTTPBasicAuth):
"""Attaches HTTP Proxy Authentication to a given Request object."""
def __call__(self, r):
r.headers["Proxy-Authorization"] = _basic_auth_str(self.username, self.password)
return r
| HTTPProxyAuth |
python | getsentry__sentry | src/sentry/search/eap/trace_metrics/config.py | {
"start": 658,
"end": 789
} | class ____:
metric_name: str
metric_type: MetricType
metric_unit: str | None
@dataclass(frozen=True, kw_only=True)
| Metric |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/coercions.py | {
"start": 34684,
"end": 36105
} | class ____(RoleImpl):
__slots__ = ()
_skip_clauseelement_for_target_match = True
def _literal_coercion(self, element, *, argname=None, **kw):
self._raise_for_expected(element, argname)
def _implicit_coercions(
self,
element: Any,
resolved: Any,
argname: Optional[str] = None,
*,
legacy: bool = False,
**kw: Any,
) -> Any:
if isinstance(element, roles.JoinTargetRole):
# note that this codepath no longer occurs as of
# #6550, unless JoinTargetImpl._skip_clauseelement_for_target_match
# were set to False.
return element
elif legacy and resolved._is_select_base:
util.warn_deprecated(
"Implicit coercion of SELECT and textual SELECT "
"constructs into FROM clauses is deprecated; please call "
".subquery() on any Core select or ORM Query object in "
"order to produce a subquery object.",
version="1.4",
)
# TODO: doing _implicit_subquery here causes tests to fail,
# how was this working before? probably that ORM
# join logic treated it as a select and subquery would happen
# in _ORMJoin->Join
return resolved
else:
self._raise_for_expected(element, argname, resolved)
| JoinTargetImpl |
python | pandas-dev__pandas | asv_bench/benchmarks/tslibs/timestamp.py | {
"start": 1263,
"end": 2451
} | class ____:
params = [_tzs]
param_names = ["tz"]
def setup(self, tz):
self.ts = Timestamp("2017-08-25 08:16:14", tzinfo=tz)
def time_tz(self, tz):
self.ts.tz
def time_dayofweek(self, tz):
self.ts.dayofweek
def time_dayofyear(self, tz):
self.ts.dayofyear
def time_week(self, tz):
self.ts.week
def time_quarter(self, tz):
self.ts.quarter
def time_days_in_month(self, tz):
self.ts.days_in_month
def time_is_month_start(self, tz):
self.ts.is_month_start
def time_is_month_end(self, tz):
self.ts.is_month_end
def time_is_quarter_start(self, tz):
self.ts.is_quarter_start
def time_is_quarter_end(self, tz):
self.ts.is_quarter_end
def time_is_year_start(self, tz):
self.ts.is_year_start
def time_is_year_end(self, tz):
self.ts.is_year_end
def time_is_leap_year(self, tz):
self.ts.is_leap_year
def time_microsecond(self, tz):
self.ts.microsecond
def time_month_name(self, tz):
self.ts.month_name()
def time_weekday_name(self, tz):
self.ts.day_name()
| TimestampProperties |
python | openai__openai-python | src/openai/types/beta/threads/message_list_params.py | {
"start": 207,
"end": 1296
} | class ____(TypedDict, total=False):
after: str
"""A cursor for use in pagination.
`after` is an object ID that defines your place in the list. For instance, if
you make a list request and receive 100 objects, ending with obj_foo, your
subsequent call can include after=obj_foo in order to fetch the next page of the
list.
"""
before: str
"""A cursor for use in pagination.
`before` is an object ID that defines your place in the list. For instance, if
you make a list request and receive 100 objects, starting with obj_foo, your
subsequent call can include before=obj_foo in order to fetch the previous page
of the list.
"""
limit: int
"""A limit on the number of objects to be returned.
Limit can range between 1 and 100, and the default is 20.
"""
order: Literal["asc", "desc"]
"""Sort order by the `created_at` timestamp of the objects.
`asc` for ascending order and `desc` for descending order.
"""
run_id: str
"""Filter messages by the run ID that generated them."""
| MessageListParams |
python | openai__openai-python | src/openai/types/responses/response_output_text_param.py | {
"start": 2002,
"end": 2428
} | class ____(TypedDict, total=False):
file_id: Required[str]
"""The ID of the file."""
index: Required[int]
"""The index of the file in the list of files."""
type: Required[Literal["file_path"]]
"""The type of the file path. Always `file_path`."""
Annotation: TypeAlias = Union[
AnnotationFileCitation, AnnotationURLCitation, AnnotationContainerFileCitation, AnnotationFilePath
]
| AnnotationFilePath |
python | apache__airflow | helm-tests/tests/helm_tests/airflow_core/test_worker.py | {
"start": 48338,
"end": 55739
} | class ____:
"""Tests worker service account."""
def test_should_add_component_specific_labels(self):
docs = render_chart(
values={
"executor": "CeleryExecutor",
"workers": {
"serviceAccount": {"create": True},
"labels": {"test_label": "test_label_value"},
},
},
show_only=["templates/workers/worker-serviceaccount.yaml"],
)
assert "test_label" in jmespath.search("metadata.labels", docs[0])
assert jmespath.search("metadata.labels", docs[0])["test_label"] == "test_label_value"
@pytest.mark.parametrize(
"executor",
[
"CeleryKubernetesExecutor",
"CeleryExecutor,KubernetesExecutor",
"KubernetesExecutor",
"LocalKubernetesExecutor",
],
)
@pytest.mark.parametrize(
("workers_values", "obj"),
[
({"serviceAccount": {"create": True}}, "worker"),
(
{
"useWorkerDedicatedServiceAccounts": True,
"kubernetes": {"serviceAccount": {"create": True}},
},
"worker-kubernetes",
),
],
)
def test_should_create_worker_service_account_for_specific_kubernetes_executors(
self, executor, workers_values, obj
):
docs = render_chart(
values={
"executor": executor,
"workers": workers_values,
},
show_only=[f"templates/workers/{obj}-serviceaccount.yaml"],
)
assert len(docs) == 1
assert jmespath.search("kind", docs[0]) == "ServiceAccount"
@pytest.mark.parametrize(
"executor",
[
"CeleryExecutor",
"CeleryKubernetesExecutor",
"CeleryExecutor,KubernetesExecutor",
],
)
@pytest.mark.parametrize(
("workers_values", "obj"),
[
({"serviceAccount": {"create": True}}, "worker"),
(
{
"useWorkerDedicatedServiceAccounts": True,
"celery": {"serviceAccount": {"create": True}},
},
"worker-celery",
),
],
)
def test_should_create_worker_service_account_for_specific_celery_executors(
self, executor, workers_values, obj
):
docs = render_chart(
values={
"executor": executor,
"workers": workers_values,
},
show_only=[f"templates/workers/{obj}-serviceaccount.yaml"],
)
assert len(docs) == 1
assert jmespath.search("kind", docs[0]) == "ServiceAccount"
def test_worker_service_account_creation_for_local_executor(self):
docs = render_chart(
values={
"executor": "LocalExecutor",
"workers": {
"serviceAccount": {"create": True},
},
},
show_only=["templates/workers/worker-serviceaccount.yaml"],
)
assert len(docs) == 0
@pytest.mark.parametrize(
"executor",
[
"CeleryExecutor",
"CeleryKubernetesExecutor",
"CeleryExecutor,KubernetesExecutor",
"KubernetesExecutor",
"LocalKubernetesExecutor",
],
)
def test_worker_service_account_labels_per_executor(self, executor):
docs = render_chart(
values={
"executor": executor,
"workers": {
"serviceAccount": {"create": True},
"labels": {"test_label": "test_label_value"},
},
},
show_only=["templates/workers/worker-serviceaccount.yaml"],
)
assert len(docs) == 1
assert jmespath.search("kind", docs[0]) == "ServiceAccount"
assert "test_label" in jmespath.search("metadata.labels", docs[0])
assert jmespath.search("metadata.labels", docs[0])["test_label"] == "test_label_value"
def test_default_automount_service_account_token(self):
docs = render_chart(
values={
"workers": {
"serviceAccount": {"create": True},
},
},
show_only=["templates/workers/worker-serviceaccount.yaml"],
)
assert jmespath.search("automountServiceAccountToken", docs[0]) is True
@pytest.mark.parametrize(
("workers_values", "obj"),
[
(
{"useWorkerDedicatedServiceAccounts": True, "celery": {"serviceAccount": {"create": True}}},
"worker-celery",
),
(
{
"useWorkerDedicatedServiceAccounts": True,
"kubernetes": {"serviceAccount": {"create": True}},
},
"worker-kubernetes",
),
],
)
def test_dedicated_service_account_token_automount(self, workers_values, obj):
docs = render_chart(
values={
"executor": "CeleryExecutor,KubernetesExecutor",
"workers": workers_values,
},
show_only=[f"templates/workers/{obj}-serviceaccount.yaml"],
)
assert len(docs) == 1
assert jmespath.search("automountServiceAccountToken", docs[0]) is True
@pytest.mark.parametrize(
("workers_values", "obj"),
[
({"serviceAccount": {"create": True, "automountServiceAccountToken": False}}, "worker"),
(
{
"useWorkerDedicatedServiceAccounts": True,
"celery": {"serviceAccount": {"create": True, "automountServiceAccountToken": False}},
},
"worker-celery",
),
(
{
"serviceAccount": {"create": True, "automountServiceAccountToken": True},
"useWorkerDedicatedServiceAccounts": True,
"celery": {"serviceAccount": {"create": True, "automountServiceAccountToken": False}},
},
"worker-celery",
),
(
{
"useWorkerDedicatedServiceAccounts": True,
"kubernetes": {"serviceAccount": {"create": True, "automountServiceAccountToken": False}},
},
"worker-kubernetes",
),
(
{
"serviceAccount": {"create": True, "automountServiceAccountToken": True},
"useWorkerDedicatedServiceAccounts": True,
"kubernetes": {"serviceAccount": {"create": True, "automountServiceAccountToken": False}},
},
"worker-kubernetes",
),
],
)
def test_overridden_automount_service_account_token(self, workers_values, obj):
docs = render_chart(
values={
"executor": "CeleryExecutor,KubernetesExecutor",
"workers": workers_values,
},
show_only=[f"templates/workers/{obj}-serviceaccount.yaml"],
)
assert jmespath.search("automountServiceAccountToken", docs[0]) is False
| TestWorkerServiceAccount |
python | getsentry__sentry | src/sentry/api/serializers/models/orgauthtoken.py | {
"start": 134,
"end": 750
} | class ____(Serializer):
def serialize(self, obj, attrs, user, **kwargs):
token = kwargs["token"]
data = {
"id": str(obj.id),
"name": obj.name,
"scopes": obj.get_scopes(),
"tokenLastCharacters": obj.token_last_characters,
"dateCreated": obj.date_added,
"dateLastUsed": obj.date_last_used,
"projectLastUsedId": (
str(obj.project_last_used_id) if obj.project_last_used_id else None
),
}
if token:
data["token"] = token
return data
| OrgAuthTokenSerializer |
python | huggingface__transformers | src/transformers/models/dpt/modeling_dpt.py | {
"start": 36564,
"end": 38216
} | class ____(nn.Module):
"""
Output head consisting of 3 convolutional layers. It progressively halves the feature dimension and upsamples
the predictions to the input resolution after the first convolutional layer (details can be found in the paper's
supplementary material).
"""
def __init__(self, config: DPTConfig):
super().__init__()
self.config = config
self.projection = None
if config.add_projection:
self.projection = nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
features = config.fusion_hidden_size
self.head = nn.Sequential(
nn.Conv2d(features, features // 2, kernel_size=3, stride=1, padding=1),
nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True),
nn.Conv2d(features // 2, 32, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
nn.ReLU(),
)
def forward(self, hidden_states: list[torch.Tensor]) -> torch.Tensor:
# use last features
hidden_states = hidden_states[self.config.head_in_index]
if self.projection is not None:
hidden_states = self.projection(hidden_states)
hidden_states = nn.ReLU()(hidden_states)
predicted_depth = self.head(hidden_states)
predicted_depth = predicted_depth.squeeze(dim=1)
return predicted_depth
@auto_docstring(
custom_intro="""
DPT Model with a depth estimation head on top (consisting of 3 convolutional layers) e.g. for KITTI, NYUv2.
"""
)
| DPTDepthEstimationHead |
python | huggingface__transformers | src/transformers/models/deepseek_v2/modular_deepseek_v2.py | {
"start": 20731,
"end": 21285
} | class ____(LlamaDecoderLayer):
def __init__(self, config: DeepseekV2Config, layer_idx: int):
super().__init__(config, layer_idx)
self.self_attn = DeepseekV2Attention(config=config, layer_idx=layer_idx)
self.mlp = DeepseekV2Moe(config) if layer_idx >= config.first_k_dense_replace else DeepseekV2MLP(config)
self.input_layernorm = DeepseekV2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = DeepseekV2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
| DeepseekV2DecoderLayer |
python | tiangolo__fastapi | tests/test_skip_defaults.py | {
"start": 279,
"end": 363
} | class ____(Model):
y: int
z: int = 0
w: Optional[int] = None
| ModelSubclass |
python | django__django | tests/one_to_one/models.py | {
"start": 1485,
"end": 1630
} | class ____(models.Model):
link = models.OneToOneField(ManualPrimaryKey, models.CASCADE)
name = models.CharField(max_length=50)
| RelatedModel |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/dlp.py | {
"start": 111851,
"end": 115598
} | class ____(GoogleCloudBaseOperator):
"""
Updates a job trigger.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDLPUpdateJobTriggerOperator`
:param job_trigger_id: The ID of the DLP job trigger to be updated.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. If set to None or missing, the default
project_id from the Google Cloud connection is used.
:param job_trigger: New JobTrigger value.
:param update_mask: Mask to control which fields get updated.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"job_trigger_id",
"project_id",
"job_trigger",
"update_mask",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudDLPJobTriggerDetailsLink(),)
def __init__(
self,
*,
job_trigger_id,
project_id: str = PROVIDE_PROJECT_ID,
job_trigger: dict | JobTrigger | None = None,
update_mask: dict | FieldMask | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.job_trigger_id = job_trigger_id
self.project_id = project_id
self.job_trigger = job_trigger
self.update_mask = update_mask
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudDLPHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
trigger = hook.update_job_trigger(
job_trigger_id=self.job_trigger_id,
project_id=self.project_id,
job_trigger=self.job_trigger,
update_mask=self.update_mask,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
project_id = self.project_id or hook.project_id
if project_id:
CloudDLPJobTriggerDetailsLink.persist(
context=context,
project_id=project_id,
trigger_name=self.job_trigger_id,
)
return JobTrigger.to_dict(trigger)
| CloudDLPUpdateJobTriggerOperator |
python | django-import-export__django-import-export | tests/core/tests/test_fields.py | {
"start": 259,
"end": 363
} | class ____:
def __init__(self, name, date=None):
self.name = name
self.date = date
| Obj |
python | scrapy__scrapy | tests/test_http2_client_protocol.py | {
"start": 4634,
"end": 4747
} | class ____(LeafResource):
def render_GET(self, request: TxRequest):
return NOT_DONE_YET
| TimeoutResponse |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 47571,
"end": 47720
} | class ____(Elemwise):
_parameters = ["frame", "left", "right", "inclusive"]
_defaults = {"inclusive": "both"}
operation = M.between
| Between |
python | pytorch__pytorch | torch/_inductor/codegen/wrapper.py | {
"start": 21235,
"end": 21876
} | class ____(WrapperLine):
wrapper: PythonWrapperCodegen
kernel_name: str
kernel_body: str
metadata: Optional[str] = None
gpu: bool = True
cpp_definition: Optional[str] = None
def codegen(self, code: IndentedBuffer) -> None:
self.wrapper._define_kernel_helper(
self.kernel_name,
self.kernel_body,
metadata=self.metadata,
gpu=self.gpu,
cpp_definition=self.cpp_definition,
)
def codegen_fx(self, converter: FxConverter) -> FxConversionFunc:
return converter._generate_kernel_definition
@dataclasses.dataclass
| KernelDefinitionLine |
python | scikit-image__scikit-image | src/skimage/measure/fit.py | {
"start": 22667,
"end": 51300
} | class ____(_BaseModel):
"""Total least squares estimator for 2D ellipses.
The functional model of the ellipse is::
xt = xc + a*cos(theta)*cos(t) - b*sin(theta)*sin(t)
yt = yc + a*sin(theta)*cos(t) + b*cos(theta)*sin(t)
d = sqrt((x - xt)**2 + (y - yt)**2)
where ``(xt, yt)`` is the closest point on the ellipse to ``(x, y)``. Thus
d is the shortest distance from the point to the ellipse.
The estimator is based on a least squares minimization. The optimal
solution is computed directly, no iterations are required. This leads
to a simple, stable and robust fitting method.
Parameters
----------
center : array-like, shape (2,)
Coordinates of ellipse center.
axis_lengths : array-like, shape (2,)
Length of first axis and length of second axis. Call these ``a`` and
``b``.
theta : float
Angle of first axis.
Raises
------
ValueError
If `center` does not have length 2.
Examples
--------
>>> em = EllipseModel((10, 15), (8, 4), np.deg2rad(30))
>>> xy = em.predict_xy(np.linspace(0, 2 * np.pi, 25))
>>> ellipse = EllipseModel.from_estimate(xy)
>>> ellipse.center
array([10., 15.])
>>> ellipse.axis_lengths
array([8., 4.])
>>> round(ellipse.theta, 2)
0.52
>>> np.round(abs(ellipse.residuals(xy)), 5)
array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0.])
The estimation can fail when — for example — all the input or output
points are the same. If this happens, you will get an ellipse model for
which ``bool(model)`` is ``False``:
>>> # A successfully estimated model is truthy:
>>> if ellipse:
... print("Estimation succeeded.")
Estimation succeeded.
>>> # Not so for a degenerate model with identical points.
>>> bad_data = np.ones((4, 2))
>>> bad_ellipse = EllipseModel.from_estimate(bad_data)
>>> if not bad_ellipse:
... print("Estimation failed.")
Estimation failed.
Trying to use this failed estimation transform result will give a suitable
error:
>>> bad_ellipse.residuals(xy) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
FailedEstimationAccessError: No attribute "residuals" for failed estimation ...
"""
def _args_init(self, center, axis_lengths, theta):
"""Initialize ``EllipseModel`` instance.
Parameters
----------
center : array-like, shape (2,)
Coordinates of ellipse center.
axis_lengths : array-like, shape (2,)
Length of first axis and length of second axis. Call these ``a``
and ``b``.
theta : float
Angle of first axis.
"""
self.center, self.axis_lengths, self.theta = self._check_init_values(
center, axis_lengths, theta
)
def _check_init_values(self, center, axis_lengths, theta):
center, axis_lengths = [np.array(v) for v in (center, axis_lengths)]
if not len(center) == 2:
raise ValueError('Center coordinates should be length 2')
if not len(axis_lengths) == 2:
raise ValueError('Axis lengths should be length 2')
return center, axis_lengths, theta
def _params2init_values(self, params):
params = np.array(params)
if len(params) != 5:
raise ValueError('Input `params` should be length 5')
return self._check_init_values(params[:2], params[2:4], params[4])
@property
@deprecate_func(
deprecated_version=_PARAMS_DEP_START,
removed_version=_PARAMS_DEP_STOP,
hint='`params` attribute deprecated; use `center, axis_lengths, theta` attributes instead',
)
def params(self):
"""Return model attributes ``center, axis_lengths, theta`` as 1D array."""
return np.r_[self.center, self.axis_lengths, self.theta]
@classmethod
def from_estimate(cls, data):
"""Estimate ellipse model from data using total least squares.
Parameters
----------
data : (N, 2) array
N points with ``(x, y)`` coordinates, respectively.
Returns
-------
model : Self or `~.FailedEstimation`
An instance of the ellipse model if the estimation succeeded.
Otherwise, we return a special ``FailedEstimation`` object to
signal a failed estimation. Testing the truth value of the failed
estimation object will return ``False``. E.g.
.. code-block:: python
model = EllipseModel.from_estimate(...)
if not model:
raise RuntimeError(f"Failed estimation: {model}")
References
----------
.. [1] Halir, R.; Flusser, J. "Numerically stable direct least squares
fitting of ellipses". In Proc. 6th International Conference in
Central Europe on Computer Graphics and Visualization.
WSCG (Vol. 98, pp. 125-132).
"""
return super().from_estimate(data)
def _estimate(self, data, warn_only=True):
# Original Implementation: Ben Hammel, Nick Sullivan-Molina
# another REFERENCE: [2] http://mathworld.wolfram.com/Ellipse.html
_check_data_dim(data, dim=2)
if len(data) < 5:
return _warn_or_msg(
"Need at least 5 data points to estimate an ellipse.",
warn_only=warn_only,
)
# to prevent integer overflow, cast data to float, if it isn't already
float_type = np.promote_types(data.dtype, np.float32)
data = data.astype(float_type, copy=False)
# normalize value range to avoid misfitting due to numeric errors if
# the relative distances are small compared to absolute distances
origin = data.mean(axis=0)
data = data - origin
scale = data.std()
if scale < np.finfo(float_type).tiny:
return _warn_or_msg(
"Standard deviation of data is too small to estimate "
"ellipse with meaningful precision.",
warn_only=warn_only,
)
data /= scale
x = data[:, 0]
y = data[:, 1]
# Quadratic part of design matrix [eqn. 15] from [1]
D1 = np.vstack([x**2, x * y, y**2]).T
# Linear part of design matrix [eqn. 16] from [1]
D2 = np.vstack([x, y, np.ones_like(x)]).T
# forming scatter matrix [eqn. 17] from [1]
S1 = D1.T @ D1
S2 = D1.T @ D2
S3 = D2.T @ D2
# Constraint matrix [eqn. 18]
C1 = np.array([[0.0, 0.0, 2.0], [0.0, -1.0, 0.0], [2.0, 0.0, 0.0]])
try:
# Reduced scatter matrix [eqn. 29]
M = inv(C1) @ (S1 - S2 @ inv(S3) @ S2.T)
except np.linalg.LinAlgError: # LinAlgError: Singular matrix
return 'Singular matrix from estimation'
# M*|a b c >=l|a b c >. Find eigenvalues and eigenvectors
# from this equation [eqn. 28]
eig_vals, eig_vecs = np.linalg.eig(M)
# eigenvector must meet constraint 4ac - b^2 to be valid.
cond = 4 * np.multiply(eig_vecs[0, :], eig_vecs[2, :]) - np.power(
eig_vecs[1, :], 2
)
a1 = eig_vecs[:, (cond > 0)]
# seeks for empty matrix
if 0 in a1.shape or len(a1.ravel()) != 3:
return 'Eigenvector constraints not met'
a, b, c = a1.ravel()
# |d f g> = -S3^(-1)*S2^(T)*|a b c> [eqn. 24]
a2 = -inv(S3) @ S2.T @ a1
d, f, g = a2.ravel()
# eigenvectors are the coefficients of an ellipse in general form
# a*x^2 + 2*b*x*y + c*y^2 + 2*d*x + 2*f*y + g = 0 (eqn. 15) from [2]
b /= 2.0
d /= 2.0
f /= 2.0
# finding center of ellipse [eqn.19 and 20] from [2]
x0 = (c * d - b * f) / (b**2.0 - a * c)
y0 = (a * f - b * d) / (b**2.0 - a * c)
# Find the semi-axes lengths [eqn. 21 and 22] from [2]
numerator = a * f**2 + c * d**2 + g * b**2 - 2 * b * d * f - a * c * g
term = np.sqrt((a - c) ** 2 + 4 * b**2)
denominator1 = (b**2 - a * c) * (term - (a + c))
denominator2 = (b**2 - a * c) * (-term - (a + c))
width = np.sqrt(2 * numerator / denominator1)
height = np.sqrt(2 * numerator / denominator2)
# angle of counterclockwise rotation of major-axis of ellipse
# to x-axis [eqn. 23] from [2].
phi = 0.5 * np.arctan((2.0 * b) / (a - c))
if a > c:
phi += 0.5 * np.pi
# stabilize parameters:
# sometimes small fluctuations in data can cause
# height and width to swap
if width < height:
width, height = height, width
phi += np.pi / 2
phi %= np.pi
# Revert normalization and set parameters.
params = np.nan_to_num([x0, y0, width, height, phi]).real
params[:4] *= scale
params[:2] += origin
self.center, self.axis_lengths, self.theta = (
params[:2],
params[2:4],
params[-1],
)
return None
def residuals(self, data):
"""Determine residuals of data to model.
For each point the shortest distance to the ellipse is returned.
Parameters
----------
data : (N, 2) array
N points with ``(x, y)`` coordinates, respectively.
Returns
-------
residuals : (N,) array
Residual for each data point.
"""
_check_data_dim(data, dim=2)
xc, yc = self.center
a, b = self.axis_lengths
theta = self.theta
ctheta = math.cos(theta)
stheta = math.sin(theta)
x = data[:, 0]
y = data[:, 1]
N = data.shape[0]
def fun(t, xi, yi):
ct = math.cos(np.squeeze(t))
st = math.sin(np.squeeze(t))
xt = xc + a * ctheta * ct - b * stheta * st
yt = yc + a * stheta * ct + b * ctheta * st
return (xi - xt) ** 2 + (yi - yt) ** 2
# def Dfun(t, xi, yi):
# ct = math.cos(t)
# st = math.sin(t)
# xt = xc + a * ctheta * ct - b * stheta * st
# yt = yc + a * stheta * ct + b * ctheta * st
# dfx_t = - 2 * (xi - xt) * (- a * ctheta * st
# - b * stheta * ct)
# dfy_t = - 2 * (yi - yt) * (- a * stheta * st
# + b * ctheta * ct)
# return [dfx_t + dfy_t]
residuals = np.empty((N,), dtype=np.float64)
# initial guess for parameter t of closest point on ellipse
t0 = np.arctan2(y - yc, x - xc) - theta
# determine shortest distance to ellipse for each point
for i in range(N):
xi = x[i]
yi = y[i]
# faster without Dfun, because of the python overhead
t, _ = optimize.leastsq(fun, t0[i], args=(xi, yi))
residuals[i] = np.sqrt(fun(t, xi, yi))
return residuals
@_deprecate_model_params
def predict_xy(self, t, params=DEPRECATED):
"""Predict x- and y-coordinates using the estimated model.
Parameters
----------
t : array
Angles in circle in radians. Angles start to count from positive
x-axis to positive y-axis in a right-handed system.
Returns
-------
xy : (..., 2) array
Predicted x- and y-coordinates.
Other parameters
----------------
params : `~.DEPRECATED`, optional
Optional ellipse model parameters in the following order ``xc``,
``yc``, `a`, `b`, `theta`.
.. deprecated:: {{ start_version }}
"""
t = np.asanyarray(t)
(xc, yc), (a, b), theta = self._get_init_values(params)
ct = np.cos(t)
st = np.sin(t)
ctheta = math.cos(theta)
stheta = math.sin(theta)
x = xc + a * ctheta * ct - b * stheta * st
y = yc + a * stheta * ct + b * ctheta * st
return np.concatenate((x[..., None], y[..., None]), axis=t.ndim)
@_deprecate_estimate
def estimate(self, data):
"""Estimate ellipse model from data using total least squares.
Parameters
----------
data : (N, 2) array
N points with ``(x, y)`` coordinates, respectively.
Returns
-------
success : bool
True, if model estimation succeeds.
References
----------
.. [1] Halir, R.; Flusser, J. "Numerically stable direct least squares
fitting of ellipses". In Proc. 6th International Conference in
Central Europe on Computer Graphics and Visualization.
WSCG (Vol. 98, pp. 125-132).
"""
return self._estimate(data) is None
def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):
"""Determine number trials such that at least one outlier-free subset is
sampled for the given inlier/outlier ratio.
Parameters
----------
n_inliers : int
Number of inliers in the data.
n_samples : int
Total number of samples in the data.
min_samples : int
Minimum number of samples chosen randomly from original data.
probability : float
Probability (confidence) that one outlier-free sample is generated.
Returns
-------
trials : int
Number of trials.
"""
if probability == 0:
return 0
if n_inliers == 0:
return np.inf
inlier_ratio = n_inliers / n_samples
nom = 1 - probability
denom = 1 - inlier_ratio**min_samples
# Keep (de-)nominator in the range of [_EPSILON, 1 - _EPSILON] so that
# it is always guaranteed that the logarithm is negative and we return
# a positive number of trials.
nom = np.clip(nom, a_min=_EPSILON, a_max=1 - _EPSILON)
denom = np.clip(denom, a_min=_EPSILON, a_max=1 - _EPSILON)
return np.ceil(np.log(nom) / np.log(denom))
def add_from_estimate(cls):
"""Add ``from_estimate`` method class using ``estimate`` method"""
if hasattr(cls, 'from_estimate'):
if not inspect.ismethod(cls.from_estimate):
raise TypeError(f'Class {cls} `from_estimate` must be a ' 'class method.')
return cls
if not hasattr(cls, 'estimate'):
raise TypeError(
f'Class {cls} must have `from_estimate` class method '
'or `estimate` method.'
)
warn(
"Passing custom classes without `from_estimate` has been deprecated "
"since version 0.26 and will be removed in version 2.2. "
"Add `from_estimate` class method to custom class to avoid this "
"warning.",
category=FutureWarning,
stacklevel=3,
)
class FromEstimated(cls):
@classmethod
def from_estimate(klass, *args, **kwargs):
# Assume we can make default instance without input arguments.
instance = klass()
success = instance.estimate(*args, **kwargs)
return (
instance
if success
else FailedEstimation(f'`{cls.__name__}` estimation failed')
)
return FromEstimated
def ransac(
data,
model_class,
min_samples,
residual_threshold,
is_data_valid=None,
is_model_valid=None,
max_trials=100,
stop_sample_num=np.inf,
stop_residuals_sum=0,
stop_probability=1,
rng=None,
initial_inliers=None,
):
"""Fit a model to data with the RANSAC (random sample consensus) algorithm.
RANSAC is an iterative algorithm for the robust estimation of parameters
from a subset of inliers from the complete data set. Each iteration
performs the following tasks:
1. Select `min_samples` random samples from the original data and check
whether the set of data is valid (see `is_data_valid`).
2. Estimate a model to the random subset
(`model_cls.from_estimate(*data[random_subset]`) and check whether the
estimated model is valid (see `is_model_valid`).
3. Classify all data as inliers or outliers by calculating the residuals
to the estimated model (`model_cls.residuals(*data)`) - all data samples
with residuals smaller than the `residual_threshold` are considered as
inliers.
4. Save estimated model as best model if number of inlier samples is
maximal. In case the current estimated model has the same number of
inliers, it is only considered as the best model if it has less sum of
residuals.
These steps are performed either a maximum number of times or until one of
the special stop criteria are met. The final model is estimated using all
inlier samples of the previously determined best model.
Parameters
----------
data : list or tuple or array of shape (N,)
Data set to which the model is fitted, where N is the number of data
points and the remaining dimension are depending on model requirements.
If the model class requires multiple input data arrays (e.g. source and
destination coordinates of ``skimage.transform.AffineTransform``),
they can be optionally passed as tuple or list. Note, that in this case
the functions ``estimate(*data)``, ``residuals(*data)``,
``is_model_valid(model, *random_data)`` and
``is_data_valid(*random_data)`` must all take each data array as
separate arguments.
model_class : type
Class with the following methods:
* Either:
* ``from_estimate`` class method returning transform instance, as in
``tform = model_class.from_estimate(*data)``; the resulting
``tform`` should be truthy (``bool(tform) == True``) where
estimation succeeded, or falsey (``bool(tform) == False``) where it
failed; OR
* (deprecated) ``estimate`` instance method, returning flag to
indicate successful estimation, as in ``tform = model_class();
success = tform.estimate(*data)``. ``success == True`` when
estimation succeeded, ``success == False`` when it failed.
* ``residuals(*data)``
Your model should conform to the ``RansacModelProtocol`` — meaning
implement all of the methods / attributes specified by the
:class:``RansacModelProctocol``. An easy check to see whether that is
the case is to use ``isinstance(MyModel, RansacModelProtocol)``. See
https://docs.python.org/3/library/typing.html#typing.Protocol for more
details.
min_samples : int, in range (0, N)
The minimum number of data points to fit a model to.
residual_threshold : float, >0
Maximum distance for a data point to be classified as an inlier.
is_data_valid : Callable, optional
This function is called with the randomly selected data before the
model is fitted to it: `is_data_valid(*random_data)`.
is_model_valid : Callable, optional
This function is called with the estimated model and the randomly
selected data: `is_model_valid(model, *random_data)`, .
max_trials : int, optional
Maximum number of iterations for random sample selection.
stop_sample_num : int, optional
Stop iteration if at least this number of inliers are found.
stop_residuals_sum : float, optional
Stop iteration if sum of residuals is less than or equal to this
threshold.
stop_probability : float, optional, in range [0, 1]
RANSAC iteration stops if at least one outlier-free set of the
training data is sampled with ``probability >= stop_probability``,
depending on the current best model's inlier ratio and the number
of trials. This requires to generate at least N samples (trials):
N >= log(1 - probability) / log(1 - e**m)
where the probability (confidence) is typically set to a high value
such as 0.99, e is the current fraction of inliers w.r.t. the
total number of samples, and m is the min_samples value.
rng : {`numpy.random.Generator`, int}, optional
Pseudo-random number generator.
By default, a PCG64 generator is used (see :func:`numpy.random.default_rng`).
If `rng` is an int, it is used to seed the generator.
initial_inliers : array-like of bool, shape (N,), optional
Initial samples selection for model estimation
Returns
-------
model : object
Best model with largest consensus set.
inliers : (N,) array
Boolean mask of inliers classified as ``True``.
References
----------
.. [1] "RANSAC", Wikipedia, https://en.wikipedia.org/wiki/RANSAC
Examples
--------
Generate ellipse data without tilt and add noise:
>>> t = np.linspace(0, 2 * np.pi, 50)
>>> xc, yc = 20, 30
>>> a, b = 5, 10
>>> x = xc + a * np.cos(t)
>>> y = yc + b * np.sin(t)
>>> data = np.column_stack([x, y])
>>> rng = np.random.default_rng(203560) # do not copy this value
>>> data += rng.normal(size=data.shape)
Add some faulty data:
>>> data[0] = (100, 100)
>>> data[1] = (110, 120)
>>> data[2] = (120, 130)
>>> data[3] = (140, 130)
Estimate ellipse model using all available data:
>>> model = EllipseModel.from_estimate(data)
>>> np.round(model.center)
array([71., 75.])
>>> np.round(model.axis_lengths)
array([77., 13.])
>>> np.round(model.theta)
1.0
Next we estimate an ellipse model using RANSAC.
Note that the results are not deterministic, because the RANSAC algorithm
uses some randomness. If you need the results to be deterministic, pass a
seeded number generator with the ``rng`` argument to ``ransac``.
>>> ransac_model, inliers = ransac(data, EllipseModel, 20, 3, max_trials=50)
>>> np.abs(np.round(ransac_model.center)) # doctest: +SKIP
array([20., 30.])
>>> np.abs(np.round(ransac_model.axis_lengths)) # doctest: +SKIP
array([10., 6.])
>>> np.abs(np.round(ransac_model.theta)) # doctest: +SKIP
2.0
>>> inliers # doctest: +SKIP
array([False, False, False, False, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True], dtype=bool)
>>> sum(inliers) > 40
True
RANSAC can be used to robustly estimate a geometric
transformation. In this section, we also show how to use a
proportion of the total samples, rather than an absolute number.
>>> from skimage.transform import SimilarityTransform
>>> rng = np.random.default_rng()
>>> src = 100 * rng.random((50, 2))
>>> model0 = SimilarityTransform(scale=0.5, rotation=1,
... translation=(10, 20))
>>> dst = model0(src)
>>> dst[0] = (10000, 10000)
>>> dst[1] = (-100, 100)
>>> dst[2] = (50, 50)
>>> ratio = 0.5 # use half of the samples
>>> min_samples = int(ratio * len(src))
>>> model, inliers = ransac(
... (src, dst),
... SimilarityTransform,
... min_samples,
... 10,
... initial_inliers=np.ones(len(src), dtype=bool),
... ) # doctest: +SKIP
>>> inliers # doctest: +SKIP
array([False, False, False, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True])
"""
best_inlier_num = 0
best_inlier_residuals_sum = np.inf
best_inliers = []
validate_model = is_model_valid is not None
validate_data = is_data_valid is not None
rng = np.random.default_rng(rng)
# in case data is not pair of input and output, male it like it
if not isinstance(data, (tuple, list)):
data = (data,)
num_samples = len(data[0])
if not (0 < min_samples <= num_samples):
raise ValueError(f"`min_samples` must be in range (0, {num_samples}]")
if residual_threshold < 0:
raise ValueError("`residual_threshold` must be greater than zero")
if max_trials < 0:
raise ValueError("`max_trials` must be greater than zero")
if not (0 <= stop_probability <= 1):
raise ValueError("`stop_probability` must be in range [0, 1]")
if initial_inliers is not None and len(initial_inliers) != num_samples:
raise ValueError(
f"RANSAC received a vector of initial inliers (length "
f"{len(initial_inliers)}) that didn't match the number of "
f"samples ({num_samples}). The vector of initial inliers should "
f"have the same length as the number of samples and contain only "
f"True (this sample is an initial inlier) and False (this one "
f"isn't) values."
)
# for the first run use initial guess of inliers
spl_idxs = (
initial_inliers
if initial_inliers is not None
else rng.choice(num_samples, min_samples, replace=False)
)
# Ensure model_class has from_estimate class method.
model_class = add_from_estimate(model_class)
# Check protocol.
if not isinstance(model_class, RansacModelProtocol):
raise TypeError(
f"`model_class` {model_class} should be of (protocol) type "
"RansacModelProtocol"
)
num_trials = 0
# max_trials can be updated inside the loop, so this cannot be a for-loop
while num_trials < max_trials:
num_trials += 1
# do sample selection according data pairs
samples = [d[spl_idxs] for d in data]
# for next iteration choose random sample set and be sure that
# no samples repeat
spl_idxs = rng.choice(num_samples, min_samples, replace=False)
# optional check if random sample set is valid
if validate_data and not is_data_valid(*samples):
continue
model = model_class.from_estimate(*samples)
# backwards compatibility
if not model:
continue
# optional check if estimated model is valid
if validate_model and not is_model_valid(model, *samples):
continue
residuals = np.abs(model.residuals(*data))
# consensus set / inliers
inliers = residuals < residual_threshold
residuals_sum = residuals.dot(residuals)
# choose as new best model if number of inliers is maximal
inliers_count = np.count_nonzero(inliers)
if (
# more inliers
inliers_count > best_inlier_num
# same number of inliers but less "error" in terms of residuals
or (
inliers_count == best_inlier_num
and residuals_sum < best_inlier_residuals_sum
)
):
best_inlier_num = inliers_count
best_inlier_residuals_sum = residuals_sum
best_inliers = inliers
max_trials = min(
max_trials,
_dynamic_max_trials(
best_inlier_num, num_samples, min_samples, stop_probability
),
)
if (
best_inlier_num >= stop_sample_num
or best_inlier_residuals_sum <= stop_residuals_sum
):
break
# estimate final model using all inliers
if any(best_inliers):
# select inliers for each data array
data_inliers = [d[best_inliers] for d in data]
model = model_class.from_estimate(*data_inliers)
if validate_model and not is_model_valid(model, *data_inliers):
warn("Estimated model is not valid. Try increasing max_trials.")
else:
model = None
best_inliers = None
warn("No inliers found. Model not fitted")
# Return model from wrapper, otherwise model itself.
return getattr(model, 'model', model), best_inliers
| EllipseModel |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/depends_on_manyvariants/package.py | {
"start": 217,
"end": 634
} | class ____(Package):
"""
A package with a dependency on `manyvariants`, so that `manyvariants` can
be spliced in tests.
"""
homepage = "https://www.test.com"
has_code = False
version("1.0")
version("2.0")
depends_on("manyvariants@1.0", when="@1.0")
depends_on("manyvariants@2.0", when="@2.0")
def install(self, spec, prefix):
touch(prefix.bar)
| DependsOnManyvariants |
python | pytorch__pytorch | torch/__init__.py | {
"start": 21775,
"end": 25881
} | class ____:
"""
Like a float (including magic methods), but redirects all operations on the
wrapped node. This is used in particular to symbolically record operations
in the symbolic shape workflow.
"""
def __init__(self, node):
# This field MUST be named node; C++ binding code assumes that this
# class has a field named node that stores SymNode
self.node = node
def __truediv__(self, other):
if not isinstance(other, (builtins.int, builtins.float, SymInt, SymFloat)):
return NotImplemented
return self.__float_truediv__(sym_float(other))
def __rtruediv__(self, other):
if not isinstance(other, (builtins.int, builtins.float, SymInt, SymFloat)):
return NotImplemented
return self.__rfloat_truediv__(sym_float(other))
def __floordiv__(self, other):
if not isinstance(other, (builtins.int, builtins.float, SymInt, SymFloat)):
return NotImplemented
return sym_float(math.floor(self / sym_float(other)))
def __rfloordiv__(self, other):
if not isinstance(other, (builtins.int, builtins.float, SymInt, SymFloat)):
return NotImplemented
return sym_float(math.floor(sym_float(other) / self))
def __bool__(self):
return self.node.bool_()
def __float__(self):
return self.node.guard_float("", 0)
def __int__(self):
return self.__trunc__().__int__()
# Symbolic power does NOT work with negative base, this is to avoid
# potential complex outputs
def __pow__(self, other):
if not isinstance(other, (builtins.int, builtins.float, SymInt, SymFloat)):
return NotImplemented
torch._check(self >= 0)
return self.__float_pow__(other)
def __rpow__(self, other):
if not isinstance(other, (builtins.int, builtins.float, SymInt, SymFloat)):
return NotImplemented
torch._check(other >= 0)
return self.__rfloat_pow__(other)
# Magic methods installed by torch.fx.experimental.sym_node
def __eq__(self, other: object) -> builtins.bool:
raise TypeError("type stub not overridden")
def __lt__(self, other) -> builtins.bool:
raise TypeError("type stub not overridden")
def __gt__(self, other) -> builtins.bool:
raise TypeError("type stub not overridden")
def __le__(self, other) -> builtins.bool:
raise TypeError("type stub not overridden")
def __ge__(self, other) -> builtins.bool:
raise TypeError("type stub not overridden")
def __float_pow__(self, other) -> "SymFloat":
raise TypeError("type stub not overridden")
def __rfloat_pow__(self, other) -> "SymFloat":
raise TypeError("type stub not overridden")
def __float_truediv__(self, other) -> "SymFloat":
raise TypeError("type stub not overridden")
def __rfloat_truediv__(self, other) -> "SymFloat":
raise TypeError("type stub not overridden")
def __trunc__(self):
raise TypeError("type stub not overridden")
def __sym_max__(self, other):
raise TypeError("type stub not overridden")
def __sym_min__(self, other):
raise TypeError("type stub not overridden")
def __sym_int__(self):
raise TypeError("type stub not overridden")
def is_integer(self):
"""Return True if the float is an integer."""
raise TypeError("type stub not overridden")
def as_integer_ratio(self) -> tuple[builtins.int, builtins.int]:
"""Represent this float as an exact integer ratio"""
return builtins.float(self).as_integer_ratio()
def __repr__(self):
return self.node._graph_repr()
def _sympy_(self):
return self.node.expr
def __hash__(self):
return hash(builtins.float(self))
def conjugate(self) -> "SymFloat":
"""Returns the complex conjugate of the float."""
return self
def hex(self) -> str:
"""Returns the hexadecimal representation of the float."""
return self.node.guard_float("", 0).hex()
| SymFloat |
python | readthedocs__readthedocs.org | readthedocs/integrations/models.py | {
"start": 9677,
"end": 11511
} | class ____(TimeStampedModel):
"""Inbound webhook integration for projects."""
GITHUBAPP = "githubapp"
GITHUB_WEBHOOK = "github_webhook"
BITBUCKET_WEBHOOK = "bitbucket_webhook"
GITLAB_WEBHOOK = "gitlab_webhook"
API_WEBHOOK = "api_webhook"
WEBHOOK_INTEGRATIONS = (
(GITHUB_WEBHOOK, _("GitHub incoming webhook")),
(BITBUCKET_WEBHOOK, _("Bitbucket incoming webhook")),
(GITLAB_WEBHOOK, _("GitLab incoming webhook")),
(API_WEBHOOK, _("Generic API incoming webhook")),
)
REMOTE_ONLY_INTEGRATIONS = ((GITHUBAPP, _("GitHub App")),)
INTEGRATIONS = WEBHOOK_INTEGRATIONS + REMOTE_ONLY_INTEGRATIONS
project = models.ForeignKey(
Project,
related_name="integrations",
on_delete=models.CASCADE,
)
integration_type = models.CharField(
_("Integration type"),
max_length=32,
choices=INTEGRATIONS,
)
provider_data = models.JSONField(
_("Provider data"),
null=True,
blank=True,
)
exchanges = GenericRelation(
"HttpExchange",
related_query_name="integrations",
)
secret = models.CharField(
help_text=_("Secret used to validate the payload of the webhook"),
max_length=255,
blank=True,
null=True,
)
objects = IntegrationQuerySet.as_manager()
# Integration attributes
has_sync = False
is_remote_only = False
is_active = True
def __str__(self):
return self.get_integration_type_display()
def save(self, *args, **kwargs):
if not self.secret:
self.secret = get_random_string(length=32)
super().save(*args, **kwargs)
def get_absolute_url(self) -> str:
return reverse("projects_integrations_detail", args=(self.project.slug, self.pk))
| Integration |
python | PyCQA__pylint | tests/functional/a/alternative/alternative_union_syntax.py | {
"start": 1602,
"end": 1662
} | class ____:
my_var: int | str
@dataclass()
| CustomDataClass2 |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/dependency.py | {
"start": 10081,
"end": 12040
} | class ____(Node):
definition: "OpDefinition" # pyright: ignore[reportIncompatibleVariableOverride]
def __init__(
self,
name: str,
definition: "OpDefinition",
graph_definition: "GraphDefinition",
tags: Optional[Mapping[str, str]] = None,
hook_defs: Optional[AbstractSet[HookDefinition]] = None,
retry_policy: Optional[RetryPolicy] = None,
):
from dagster._core.definitions.op_definition import OpDefinition
check.inst_param(definition, "definition", OpDefinition)
super().__init__(name, definition, graph_definition, tags, hook_defs, retry_policy)
def get_resource_requirements(
self,
outer_container: "GraphDefinition",
parent_handle: Optional["NodeHandle"] = None,
asset_layer: Optional["AssetLayer"] = None,
) -> Iterator["ResourceRequirement"]:
from dagster._core.definitions.resource_requirement import InputManagerRequirement
cur_node_handle = NodeHandle(self.name, parent_handle)
for requirement in self.definition.get_resource_requirements(
handle=cur_node_handle,
asset_layer=asset_layer,
):
# If requirement is a root input manager requirement, but the corresponding node has an upstream output, then ignore the requirement.
if (
isinstance(requirement, InputManagerRequirement)
and outer_container.dependency_structure.has_deps(
NodeInput(self, self.definition.input_def_named(requirement.input_name))
)
and requirement.root_input
):
continue
yield requirement
for hook_def in self.hook_defs:
yield from hook_def.get_resource_requirements(self.describe_node())
def describe_node(self) -> str:
return f"op '{self.name}'"
@whitelist_for_serdes(storage_name="SolidHandle")
| OpNode |
python | jazzband__django-oauth-toolkit | tests/test_hybrid.py | {
"start": 4927,
"end": 31591
} | class ____(BaseTest):
def test_skip_authorization_completely(self):
"""
If application.skip_authorization = True, should skip the authorization page.
"""
self.client.login(username="hy_test_user", password="123456")
self.application.skip_authorization = True
self.application.save()
query_string = urlencode(
{
"client_id": self.application.client_id,
"response_type": "code",
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.org",
}
)
url = "{url}?{qs}".format(url=reverse("oauth2_provider:authorize"), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
def test_id_token_skip_authorization_completely(self):
"""
If application.skip_authorization = True, should skip the authorization page.
"""
self.client.login(username="hy_test_user", password="123456")
self.application.skip_authorization = True
self.application.save()
query_string = urlencode(
{
"client_id": self.application.client_id,
"response_type": "code",
"state": "random_state_string",
"scope": "openid",
"redirect_uri": "http://example.org",
}
)
url = "{url}?{qs}".format(url=reverse("oauth2_provider:authorize"), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
def test_pre_auth_invalid_client(self):
"""
Test error for an invalid client_id with response_type: code
"""
self.client.login(username="hy_test_user", password="123456")
query_string = urlencode(
{
"client_id": "fakeclientid",
"response_type": "code",
}
)
url = "{url}?{qs}".format(url=reverse("oauth2_provider:authorize"), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.context_data["url"],
"?error=invalid_request&error_description=Invalid+client_id+parameter+value.",
)
def test_pre_auth_valid_client(self):
"""
Test response for a valid client_id with response_type: code
"""
self.client.login(username="hy_test_user", password="123456")
query_string = urlencode(
{
"client_id": self.application.client_id,
"response_type": "code id_token",
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.org",
}
)
url = "{url}?{qs}".format(url=reverse("oauth2_provider:authorize"), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# check form is in context and form params are valid
self.assertIn("form", response.context)
form = response.context["form"]
self.assertEqual(form["redirect_uri"].value(), "http://example.org")
self.assertEqual(form["state"].value(), "random_state_string")
self.assertEqual(form["scope"].value(), "read write")
self.assertEqual(form["client_id"].value(), self.application.client_id)
def test_id_token_pre_auth_valid_client(self):
"""
Test response for a valid client_id with response_type: code
"""
self.client.login(username="hy_test_user", password="123456")
query_string = urlencode(
{
"client_id": self.application.client_id,
"response_type": "code id_token",
"state": "random_state_string",
"scope": "openid",
"redirect_uri": "http://example.org",
"nonce": "nonce",
}
)
url = "{url}?{qs}".format(url=reverse("oauth2_provider:authorize"), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# check form is in context and form params are valid
self.assertIn("form", response.context)
form = response.context["form"]
self.assertEqual(form["redirect_uri"].value(), "http://example.org")
self.assertEqual(form["state"].value(), "random_state_string")
self.assertEqual(form["scope"].value(), "openid")
self.assertEqual(form["client_id"].value(), self.application.client_id)
def test_pre_auth_valid_client_custom_redirect_uri_scheme(self):
"""
Test response for a valid client_id with response_type: code
using a non-standard, but allowed, redirect_uri scheme.
"""
self.client.login(username="hy_test_user", password="123456")
query_string = urlencode(
{
"client_id": self.application.client_id,
"response_type": "code id_token",
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "custom-scheme://example.com",
}
)
url = "{url}?{qs}".format(url=reverse("oauth2_provider:authorize"), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# check form is in context and form params are valid
self.assertIn("form", response.context)
form = response.context["form"]
self.assertEqual(form["redirect_uri"].value(), "custom-scheme://example.com")
self.assertEqual(form["state"].value(), "random_state_string")
self.assertEqual(form["scope"].value(), "read write")
self.assertEqual(form["client_id"].value(), self.application.client_id)
def test_pre_auth_approval_prompt(self):
tok = AccessToken.objects.create(
user=self.hy_test_user,
token="1234567890",
application=self.application,
expires=timezone.now() + datetime.timedelta(days=1),
scope="read write",
)
self.client.login(username="hy_test_user", password="123456")
query_string = urlencode(
{
"client_id": self.application.client_id,
"response_type": "code id_token",
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.org",
"approval_prompt": "auto",
}
)
url = "{url}?{qs}".format(url=reverse("oauth2_provider:authorize"), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
# user already authorized the application, but with different scopes: prompt them.
tok.scope = "read"
tok.save()
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_pre_auth_approval_prompt_default(self):
self.oauth2_settings.REQUEST_APPROVAL_PROMPT = "force"
self.assertEqual(self.oauth2_settings.REQUEST_APPROVAL_PROMPT, "force")
AccessToken.objects.create(
user=self.hy_test_user,
token="1234567890",
application=self.application,
expires=timezone.now() + datetime.timedelta(days=1),
scope="read write",
)
self.client.login(username="hy_test_user", password="123456")
query_string = urlencode(
{
"client_id": self.application.client_id,
"response_type": "code id_token",
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.org",
}
)
url = "{url}?{qs}".format(url=reverse("oauth2_provider:authorize"), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_pre_auth_approval_prompt_default_override(self):
self.oauth2_settings.REQUEST_APPROVAL_PROMPT = "auto"
AccessToken.objects.create(
user=self.hy_test_user,
token="1234567890",
application=self.application,
expires=timezone.now() + datetime.timedelta(days=1),
scope="read write",
)
self.client.login(username="hy_test_user", password="123456")
query_string = urlencode(
{
"client_id": self.application.client_id,
"response_type": "code",
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.org",
}
)
url = "{url}?{qs}".format(url=reverse("oauth2_provider:authorize"), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
def test_pre_auth_default_redirect(self):
"""
Test for default redirect uri if omitted from query string with response_type: code
"""
self.client.login(username="hy_test_user", password="123456")
self.application.redirect_uris = "http://localhost"
self.application.save()
query_string = urlencode(
{
"client_id": self.application.client_id,
"response_type": "code id_token",
}
)
url = "{url}?{qs}".format(url=reverse("oauth2_provider:authorize"), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
form = response.context["form"]
self.assertEqual(form["redirect_uri"].value(), "http://localhost")
def test_pre_auth_forbibben_redirect(self):
"""
Test error when passing a forbidden redirect_uri in query string with response_type: code
"""
self.client.login(username="hy_test_user", password="123456")
query_string = urlencode(
{
"client_id": self.application.client_id,
"response_type": "code",
"redirect_uri": "http://forbidden.it",
}
)
url = "{url}?{qs}".format(url=reverse("oauth2_provider:authorize"), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_pre_auth_wrong_response_type(self):
"""
Test error when passing a wrong response_type in query string
"""
self.client.login(username="hy_test_user", password="123456")
query_string = urlencode(
{
"client_id": self.application.client_id,
"response_type": "WRONG",
"redirect_uri": "http://example.org",
}
)
url = "{url}?{qs}".format(url=reverse("oauth2_provider:authorize"), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
self.assertIn("error=unsupported_response_type", response["Location"])
def test_code_post_auth_allow_code_token(self):
"""
Test authorization code is given for an allowed request with response_type: code
"""
self.client.login(username="hy_test_user", password="123456")
form_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "openid read write",
"redirect_uri": "http://example.org",
"response_type": "code token",
"allow": True,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=form_data)
self.assertEqual(response.status_code, 302)
self.assertIn("http://example.org", response["Location"])
self.assertIn("state=random_state_string", response["Location"])
self.assertIn("code=", response["Location"])
self.assertIn("access_token=", response["Location"])
def test_code_post_auth_allow_code_id_token(self):
"""
Test authorization code is given for an allowed request with response_type: code
"""
self.client.login(username="hy_test_user", password="123456")
form_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "openid read write",
"redirect_uri": "http://example.org",
"response_type": "code id_token",
"allow": True,
"nonce": "nonce",
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=form_data)
self.assertEqual(response.status_code, 302)
self.assertIn("http://example.org", response["Location"])
self.assertIn("state=random_state_string", response["Location"])
self.assertIn("code=", response["Location"])
self.assertIn("id_token=", response["Location"])
def test_code_post_auth_allow_code_id_token_token(self):
"""
Test authorization code is given for an allowed request with response_type: code
"""
self.client.login(username="hy_test_user", password="123456")
form_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "openid read write",
"redirect_uri": "http://example.org",
"response_type": "code id_token token",
"allow": True,
"nonce": "nonce",
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=form_data)
self.assertEqual(response.status_code, 302)
self.assertIn("http://example.org", response["Location"])
self.assertIn("state=random_state_string", response["Location"])
self.assertIn("code=", response["Location"])
self.assertIn("id_token=", response["Location"])
self.assertIn("access_token=", response["Location"])
def test_id_token_code_post_auth_allow(self):
"""
Test authorization code is given for an allowed request with response_type: code
"""
self.client.login(username="hy_test_user", password="123456")
form_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "openid",
"redirect_uri": "http://example.org",
"response_type": "code id_token",
"allow": True,
"nonce": "nonce",
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=form_data)
self.assertEqual(response.status_code, 302)
self.assertIn("http://example.org", response["Location"])
self.assertIn("state=random_state_string", response["Location"])
self.assertIn("code=", response["Location"])
self.assertIn("id_token=", response["Location"])
def test_code_post_auth_deny(self):
"""
Test error when resource owner deny access
"""
self.client.login(username="hy_test_user", password="123456")
form_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.org",
"response_type": "code",
"allow": False,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=form_data)
self.assertEqual(response.status_code, 302)
self.assertIn("error=access_denied", response["Location"])
def test_code_post_auth_bad_responsetype(self):
"""
Test authorization code is given for an allowed request with a response_type not supported
"""
self.client.login(username="hy_test_user", password="123456")
form_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.org",
"response_type": "UNKNOWN",
"allow": True,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=form_data)
self.assertEqual(response.status_code, 302)
self.assertIn("http://example.org?error", response["Location"])
def test_code_post_auth_forbidden_redirect_uri(self):
"""
Test authorization code is given for an allowed request with a forbidden redirect_uri
"""
self.client.login(username="hy_test_user", password="123456")
form_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://forbidden.it",
"response_type": "code",
"allow": True,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=form_data)
self.assertEqual(response.status_code, 400)
def test_code_post_auth_malicious_redirect_uri(self):
"""
Test validation of a malicious redirect_uri
"""
self.client.login(username="hy_test_user", password="123456")
form_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "/../",
"response_type": "code",
"allow": True,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=form_data)
self.assertEqual(response.status_code, 400)
def test_code_post_auth_allow_custom_redirect_uri_scheme_code_token(self):
"""
Test authorization code is given for an allowed request with response_type: code
using a non-standard, but allowed, redirect_uri scheme.
"""
self.client.login(username="hy_test_user", password="123456")
form_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "openid read write",
"redirect_uri": "custom-scheme://example.com",
"response_type": "code token",
"allow": True,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=form_data)
self.assertEqual(response.status_code, 302)
self.assertIn("custom-scheme://example.com", response["Location"])
self.assertIn("state=random_state_string", response["Location"])
self.assertIn("code=", response["Location"])
self.assertIn("access_token=", response["Location"])
def test_code_post_auth_allow_custom_redirect_uri_scheme_code_id_token(self):
"""
Test authorization code is given for an allowed request with response_type: code
using a non-standard, but allowed, redirect_uri scheme.
"""
self.client.login(username="hy_test_user", password="123456")
form_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "openid read write",
"redirect_uri": "custom-scheme://example.com",
"response_type": "code id_token",
"allow": True,
"nonce": "nonce",
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=form_data)
self.assertEqual(response.status_code, 302)
self.assertIn("custom-scheme://example.com", response["Location"])
self.assertIn("state=random_state_string", response["Location"])
self.assertIn("code=", response["Location"])
self.assertIn("id_token=", response["Location"])
def test_code_post_auth_allow_custom_redirect_uri_scheme_code_id_token_token(self):
"""
Test authorization code is given for an allowed request with response_type: code
using a non-standard, but allowed, redirect_uri scheme.
"""
self.client.login(username="hy_test_user", password="123456")
form_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "openid read write",
"redirect_uri": "custom-scheme://example.com",
"response_type": "code id_token token",
"allow": True,
"nonce": "nonce",
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=form_data)
self.assertEqual(response.status_code, 302)
self.assertIn("custom-scheme://example.com", response["Location"])
self.assertIn("state=random_state_string", response["Location"])
self.assertIn("code=", response["Location"])
self.assertIn("id_token=", response["Location"])
self.assertIn("access_token=", response["Location"])
def test_code_post_auth_deny_custom_redirect_uri_scheme(self):
"""
Test error when resource owner deny access
using a non-standard, but allowed, redirect_uri scheme.
"""
self.client.login(username="hy_test_user", password="123456")
form_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "custom-scheme://example.com",
"response_type": "code",
"allow": False,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=form_data)
self.assertEqual(response.status_code, 302)
self.assertIn("custom-scheme://example.com?", response["Location"])
self.assertIn("error=access_denied", response["Location"])
def test_code_post_auth_redirection_uri_with_querystring_code_token(self):
"""
Tests that a redirection uri with query string is allowed
and query string is retained on redirection.
See https://rfc-editor.org/rfc/rfc6749.html#section-3.1.2
"""
self.client.login(username="hy_test_user", password="123456")
form_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "openid read write",
"redirect_uri": "http://example.com?foo=bar",
"response_type": "code token",
"allow": True,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=form_data)
self.assertEqual(response.status_code, 302)
self.assertIn("http://example.com?foo=bar", response["Location"])
self.assertIn("code=", response["Location"])
self.assertIn("access_token=", response["Location"])
def test_code_post_auth_redirection_uri_with_querystring_code_id_token(self):
"""
Tests that a redirection uri with query string is allowed
and query string is retained on redirection.
See https://rfc-editor.org/rfc/rfc6749.html#section-3.1.2
"""
self.client.login(username="hy_test_user", password="123456")
form_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "openid read write",
"redirect_uri": "http://example.com?foo=bar",
"response_type": "code id_token",
"allow": True,
"nonce": "nonce",
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=form_data)
self.assertEqual(response.status_code, 302)
self.assertIn("http://example.com?foo=bar", response["Location"])
self.assertIn("code=", response["Location"])
self.assertIn("id_token=", response["Location"])
def test_code_post_auth_redirection_uri_with_querystring_code_id_token_token(self):
"""
Tests that a redirection uri with query string is allowed
and query string is retained on redirection.
See https://rfc-editor.org/rfc/rfc6749.html#section-3.1.2
"""
self.client.login(username="hy_test_user", password="123456")
form_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "openid read write",
"redirect_uri": "http://example.com?foo=bar",
"response_type": "code id_token token",
"allow": True,
"nonce": "nonce",
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=form_data)
self.assertEqual(response.status_code, 302)
self.assertIn("http://example.com?foo=bar", response["Location"])
self.assertIn("code=", response["Location"])
self.assertIn("id_token=", response["Location"])
self.assertIn("access_token=", response["Location"])
def test_code_post_auth_failing_redirection_uri_with_querystring(self):
"""
Test that in case of error the querystring of the redirection uri is preserved
See https://github.com/evonove/django-oauth-toolkit/issues/238
"""
self.client.login(username="hy_test_user", password="123456")
form_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.com?foo=bar",
"response_type": "code",
"allow": False,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=form_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(
"http://example.com?foo=bar&error=access_denied&state=random_state_string", response["Location"]
)
def test_code_post_auth_fails_when_redirect_uri_path_is_invalid(self):
"""
Tests that a redirection uri is matched using scheme + netloc + path
"""
self.client.login(username="hy_test_user", password="123456")
form_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.com/a?foo=bar",
"response_type": "code",
"allow": True,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=form_data)
self.assertEqual(response.status_code, 400)
@pytest.mark.oauth2_settings(presets.OIDC_SETTINGS_RW)
| TestHybridView |
python | ray-project__ray | doc/source/serve/doc_code/varying_deps.py | {
"start": 144,
"end": 1106
} | class ____:
def __init__(
self, ver_25_handle: DeploymentHandle, ver_26_handle: DeploymentHandle
):
self.ver_25_handle = ver_25_handle
self.ver_26_handle = ver_26_handle
async def __call__(self, request: Request):
if request.query_params["version"] == "25":
return await self.ver_25_handle.remote()
else:
return await self.ver_26_handle.remote()
@serve.deployment
def requests_version():
return requests.__version__
ver_25 = requests_version.options(
name="25",
ray_actor_options={"runtime_env": {"pip": ["requests==2.25.1"]}},
).bind()
ver_26 = requests_version.options(
name="26",
ray_actor_options={"runtime_env": {"pip": ["requests==2.26.0"]}},
).bind()
app = Ingress.bind(ver_25, ver_26)
serve.run(app)
assert requests.get("http://127.0.0.1:8000/?version=25").text == "2.25.1"
assert requests.get("http://127.0.0.1:8000/?version=26").text == "2.26.0"
| Ingress |
python | ansible__ansible | test/lib/ansible_test/_internal/ci/azp.py | {
"start": 5240,
"end": 11006
} | class ____:
"""Change information for an Azure Pipelines build."""
def __init__(self, args: CommonConfig) -> None:
self.args = args
self.git = Git()
try:
self.org_uri = os.environ['SYSTEM_COLLECTIONURI'] # ex: https://dev.azure.com/{org}/
self.project = os.environ['SYSTEM_TEAMPROJECT']
self.repo_type = os.environ['BUILD_REPOSITORY_PROVIDER'] # ex: GitHub
self.source_branch = os.environ['BUILD_SOURCEBRANCH']
self.source_branch_name = os.environ['BUILD_SOURCEBRANCHNAME']
self.pr_branch_name = os.environ.get('SYSTEM_PULLREQUEST_TARGETBRANCH')
except KeyError as ex:
raise MissingEnvironmentVariable(name=ex.args[0]) from None
if self.source_branch.startswith('refs/tags/'):
raise ChangeDetectionNotSupported('Change detection is not supported for tags.')
self.org = self.org_uri.strip('/').split('/')[-1]
self.is_pr = self.pr_branch_name is not None
if self.is_pr:
# HEAD is a merge commit of the PR branch into the target branch
# HEAD^1 is HEAD of the target branch (first parent of merge commit)
# HEAD^2 is HEAD of the PR branch (second parent of merge commit)
# see: https://git-scm.com/docs/gitrevisions
self.branch = self.pr_branch_name
self.base_commit = 'HEAD^1'
self.commit = 'HEAD^2'
else:
commits = self.get_successful_merge_run_commits()
self.branch = self.source_branch_name
self.base_commit = self.get_last_successful_commit(commits)
self.commit = 'HEAD'
self.commit = self.git.run_git(['rev-parse', self.commit]).strip()
if self.base_commit:
self.base_commit = self.git.run_git(['rev-parse', self.base_commit]).strip()
# <commit>...<commit>
# This form is to view the changes on the branch containing and up to the second <commit>, starting at a common ancestor of both <commit>.
# see: https://git-scm.com/docs/git-diff
dot_range = '%s...%s' % (self.base_commit, self.commit)
self.paths = sorted(self.git.get_diff_names([dot_range]))
self.diff = self.git.get_diff([dot_range])
else:
self.paths = None # act as though change detection not enabled, do not filter targets
self.diff = []
def get_successful_merge_run_commits(self) -> set[str]:
"""
Return a set of recent successful merge commits from Azure Pipelines.
A warning will be displayed and no commits returned if an error occurs.
"""
try:
commits = self._get_successful_merge_run_commits()
except ApplicationError as ex:
commits = set()
display.warning(f'Cannot determine changes. All tests will be executed. Reason: {ex}')
return commits
def _get_successful_merge_run_commits(self) -> set[str]:
"""Return a set of recent successful merge commits from Azure Pipelines."""
parameters = dict(
maxBuildsPerDefinition=100, # max 5000
queryOrder='queueTimeDescending', # assumes under normal circumstances that later queued jobs are for later commits
resultFilter='succeeded',
reasonFilter='batchedCI', # may miss some non-PR reasons, the alternative is to filter the list after receiving it
repositoryType=self.repo_type,
repositoryId='%s/%s' % (self.org, self.project),
)
url = '%s%s/_apis/build/builds?api-version=7.1&%s' % (self.org_uri, self.project, urllib.parse.urlencode(parameters))
http = HttpClient(self.args, always=True)
response = http.get(url)
try:
result = json.loads(response.response)
result_type = 'JSON'
except json.JSONDecodeError:
result = ...
result_type = 'Non-JSON'
result_description = f'HTTP {response.status_code} {result_type} result'
if response.status_code != 200 or result is ...:
raise ApplicationError(f'Unable to find project due to {result_description}.')
try:
commits = {build['sourceVersion'] for build in result['value']}
except KeyError as ex:
raise ApplicationError(f'Missing {ex.args[0]!r} key in response from {result_description}.') from ex
except (ValueError, TypeError) as ex:
raise ApplicationError(f'Unexpected response format from {result_description}: {ex}') from ex
return commits
def get_last_successful_commit(self, commits: set[str]) -> t.Optional[str]:
"""Return the last successful commit from git history that is found in the given commit list, or None."""
commit_history = self.git.get_rev_list(max_count=100)
ordered_successful_commits = [commit for commit in commit_history if commit in commits]
last_successful_commit = ordered_successful_commits[0] if ordered_successful_commits else None
return last_successful_commit
def vso_add_attachment(file_type: str, file_name: str, path: str) -> None:
"""Upload and attach a file to the current timeline record."""
vso('task.addattachment', dict(type=file_type, name=file_name), path)
def vso(name: str, data: dict[str, str], message: str) -> None:
"""
Write a logging command for the Azure Pipelines agent to process.
See: https://docs.microsoft.com/en-us/azure/devops/pipelines/scripts/logging-commands?view=azure-devops&tabs=bash
"""
display.info('##vso[%s %s]%s' % (name, ';'.join('='.join((key, value)) for key, value in data.items()), message))
| AzurePipelinesChanges |
python | encode__django-rest-framework | tests/models.py | {
"start": 1865,
"end": 2191
} | class ____(RESTFrameworkModel):
target = models.ForeignKey(ForeignKeyTarget, help_text='Target',
verbose_name='Target',
limit_choices_to={"name__startswith": "limited-"},
on_delete=models.CASCADE)
| ForeignKeySourceWithLimitedChoices |
python | dagster-io__dagster | python_modules/dagster/dagster/_config/field_utils.py | {
"start": 7492,
"end": 9028
} | class ____(_ConfigHasFields):
"""Defines a config dict with a partially specified schema.
A permissive dict allows partial specification of the config schema. Any fields with a
specified schema will be type checked. Other fields will be allowed, but will be ignored by
the type checker.
Args:
fields (Dict[str, Field]): The partial specification of the config dict.
**Examples:**
.. code-block:: python
@op(config_schema=Field(Permissive({'required': Field(String)})))
def map_config_op(context) -> List:
return sorted(list(context.op_config.items()))
"""
def __new__(cls, fields=None, description=None):
return _memoize_inst_in_field_cache(
cls,
Permissive,
_define_permissive_dict_key(
expand_fields_dict(fields) if fields else None, description
),
)
def __init__(self, fields=None, description=None):
# if we hit in field cache avoid double init
if self._initialized:
return
fields = expand_fields_dict(fields) if fields else None
super().__init__(
key=_define_permissive_dict_key(fields, description),
kind=ConfigTypeKind.PERMISSIVE_SHAPE,
fields=fields or dict(),
description=description,
)
self._initialized = True
def _define_selector_key(fields, description):
return "Selector." + compute_fields_hash(fields, description=description)
@public
| Permissive |
python | Pylons__pyramid | tests/test_integration.py | {
"start": 33108,
"end": 33940
} | class ____:
subpath = ('__init__.py',)
traversed = None
environ = {'REQUEST_METHOD': 'GET', 'wsgi.version': (1, 0)}
def get_response(self, application):
return application(None, None)
def httpdate(ts):
return ts.strftime("%a, %d %b %Y %H:%M:%S GMT")
def read_(filename):
with open(filename, 'rb') as fp:
val = fp.read()
return val
def _assertBody(body, filename):
if defaultlocale is None: # pragma: no cover
# If system locale does not have an encoding then default to utf-8
filename = filename.encode('utf-8')
# strip both \n and \r for windows
body = body.replace(b'\r', b'')
body = body.replace(b'\n', b'')
data = read_(filename)
data = data.replace(b'\r', b'')
data = data.replace(b'\n', b'')
assert body == data
| DummyRequest |
python | PyCQA__pylint | tests/functional/b/broad_exception/broad_exception_caught_trystar.py | {
"start": 104,
"end": 607
} | class ____(CustomBroadException):
pass
try:
__revision__ += 1
except* Exception: # [broad-exception-caught]
print('error')
try:
__revision__ += 1
except* BaseException: # [broad-exception-caught]
print('error')
try:
__revision__ += 1
except* ValueError:
print('error')
try:
__revision__ += 1
except CustomBroadException: # [broad-exception-caught]
print('error')
try:
__revision__ += 1
except* CustomNarrowException:
print('error')
| CustomNarrowException |
python | pezy__LeetCode | 103. Maximum Depth of Binary Tree/solution.py | {
"start": 128,
"end": 739
} | class ____:
def maxDepth(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if root is None:
return 0
return max(self.maxDepth(root.left), self.maxDepth(root.right)) + 1
if __name__ == "__main__":
root = TreeNode(1)
root.left = TreeNode(2)
root.right = TreeNode(3)
root.left.left = TreeNode(4)
root.left.right = TreeNode(5)
root.right.left = TreeNode(6)
root.left.left.left = TreeNode(7)
root.left.left.right = TreeNode(8)
root.left.left.right.left = TreeNode(9)
print(Solution().maxDepth(root))
| Solution |
python | pikepdf__pikepdf | src/pikepdf/canvas.py | {
"start": 27621,
"end": 27791
} | class ____:
"""Loaded image.
This class is used to track images that have been loaded into a
canvas.
"""
name: Name
image: Image.Image
| LoadedImage |
python | sqlalchemy__sqlalchemy | test/orm/inheritance/test_with_poly.py | {
"start": 5382,
"end": 5482
} | class ____(
_WithPolymorphicBase, _PolymorphicAliasedJoins
):
pass
| PolymorphicAliasedJoinsTest |
python | allegroai__clearml | clearml/backend_api/services/v2_9/queues.py | {
"start": 63235,
"end": 64417
} | class ____(Response):
"""
Response of queues.remove_task endpoint.
:param removed: Number of tasks removed (0 or 1)
:type removed: int
"""
_service = "queues"
_action = "remove_task"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"removed": {
"description": "Number of tasks removed (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, removed: Optional[int] = None, **kwargs: Any) -> None:
super(RemoveTaskResponse, self).__init__(**kwargs)
self.removed = removed
@schema_property("removed")
def removed(self) -> Optional[int]:
return self._property_removed
@removed.setter
def removed(self, value: Optional[int]) -> None:
if value is None:
self._property_removed = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "removed", six.integer_types)
self._property_removed = value
| RemoveTaskResponse |
python | ansible__ansible | test/integration/targets/ansible-test-sanity-pylint/ansible_collections/ns/col/plugins/lookup/deprecated.py | {
"start": 1336,
"end": 1417
} | class ____(LookupBase):
def run(self, **kwargs):
return []
| LookupModule |
python | getsentry__sentry | tests/sentry/issues/test_ingest.py | {
"start": 35980,
"end": 36904
} | class ____(OccurrenceTestMixin, TestCase):
def test(self) -> None:
culprit = "abcde" * 100
occurrence = self.build_occurrence(culprit=culprit)
event = self.store_event(data={}, project_id=self.project.id)
assert _create_issue_kwargs(occurrence, event, None) == {
"platform": event.platform,
"message": event.search_message,
"level": LOG_LEVELS_MAP.get(occurrence.level),
# Should truncate the culprit to max allowable length
"culprit": f"{culprit[:MAX_CULPRIT_LENGTH-3]}...",
"last_seen": event.datetime,
"first_seen": event.datetime,
"active_at": event.datetime,
"type": occurrence.type.type_id,
"first_release": None,
"data": materialize_metadata(occurrence, event),
"priority": occurrence.type.default_priority,
}
| CreateIssueKwargsTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.