language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | ray-project__ray | python/ray/exceptions.py | {
"start": 19592,
"end": 20300
} | class ____(RayError):
"""Indicates that the local disk is full.
This is raised if the attempt to store the object fails
because both the object store and disk are full.
"""
def __str__(self):
# TODO(scv119): expose more disk usage information and link to a doc.
return super(OutOfDiskError, self).__str__() + (
"\n"
"The object cannot be created because the local object store"
" is full and the local disk's utilization is over capacity"
" (95% by default)."
"Tip: Use `df` on this node to check disk usage and "
"`ray memory` to check object store memory usage."
)
@PublicAPI
| OutOfDiskError |
python | spyder-ide__spyder | spyder/plugins/run/api.py | {
"start": 5488,
"end": 5934
} | class ____(TypedDict):
"""Supported file extension and contexts schema."""
# File extension or identifier of the input context.
input_extension: str
# The supported contexts for the given input extension, e.g. file,
# selection, cell or others.
# The context can be compared against the values of `RunContext`. e.g.,
# `info['context'] == RunContext.File`
contexts: List[ExtendedContext]
| SupportedExtensionContexts |
python | pytorch__pytorch | torch/_inductor/runtime/caching/interfaces.py | {
"start": 861,
"end": 956
} | class ____(Enum):
RECORD = "record"
GET = "get"
INSERT = "insert"
| _IntfCallbackOrigin |
python | tensorflow__tensorflow | tensorflow/core/function/trace_type/default_types_test.py | {
"start": 2227,
"end": 2549
} | class ____:
"""Helps test attrs collections."""
__attrs_attrs__ = (TestAttr('a'), TestAttr('b'))
def __init__(self, a, b):
self.a = a
self.b = b
def __eq__(self, other):
return (
isinstance(other, TestAttrsClass)
and self.a == other.a
and self.b == other.b
)
| TestAttrsClass |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/constructor11.py | {
"start": 533,
"end": 800
} | class ____(Generic[K, V]):
def __init__(self, g: MyFuncType[K, V]) -> None:
self.g = g
MyFuncMapping = Mapping[K, Optional[MyFunc[K, V]]]
my_func_defaultdict: MyFuncMapping[str, int] = defaultdict(
lambda: None, {"x": MyFunc(lambda f: f("a"))}
)
| MyFunc |
python | ansible__ansible | lib/ansible/plugins/__init__.py | {
"start": 1723,
"end": 1944
} | class ____(t.Protocol):
"""Protocol to provide type-safe access to config for plugin-related mixins."""
def get_option(self, option: str, hostvars: dict[str, object] | None = None) -> t.Any: ...
| _ConfigurablePlugin |
python | google__jax | tests/tree_util_test.py | {
"start": 50669,
"end": 56459
} | class ____(jtu.JaxTestCase):
"""Simple smoke-tests for tree_util aliases under jax.tree"""
def test_tree_all(self):
obj = [True, True, (True, False)]
self.assertEqual(
jax.tree.all(obj),
tree_util.tree_all(obj),
)
def test_tree_all_is_leaf(self):
obj = [True, True, (True, False)]
is_leaf = lambda x: isinstance(x, tuple)
self.assertEqual(
jax.tree.all(obj, is_leaf=is_leaf),
tree_util.tree_all(obj, is_leaf=is_leaf),
)
def test_tree_flatten(self):
obj = [1, 2, (3, 4)]
self.assertEqual(
jax.tree.flatten(obj),
tree_util.tree_flatten(obj),
)
def test_tree_flatten_is_leaf(self):
obj = [1, 2, (3, 4)]
is_leaf = lambda x: isinstance(x, tuple)
self.assertEqual(
jax.tree.flatten(obj, is_leaf=is_leaf),
tree_util.tree_flatten(obj, is_leaf=is_leaf),
)
def test_tree_leaves(self):
obj = [1, 2, (3, 4)]
self.assertEqual(
jax.tree.leaves(obj),
tree_util.tree_leaves(obj),
)
def test_tree_leaves_is_leaf(self):
obj = [1, 2, (3, 4)]
is_leaf = lambda x: isinstance(x, tuple)
self.assertEqual(
jax.tree.leaves(obj, is_leaf=is_leaf),
tree_util.tree_leaves(obj, is_leaf=is_leaf),
)
def test_tree_map(self):
func = lambda x: x * 2
obj = [1, 2, (3, 4)]
self.assertEqual(
jax.tree.map(func, obj),
tree_util.tree_map(func, obj),
)
def test_tree_map_is_leaf(self):
func = lambda x: x * 2
obj = [1, 2, (3, 4)]
is_leaf = lambda x: isinstance(x, tuple)
self.assertEqual(
jax.tree.map(func, obj, is_leaf=is_leaf),
tree_util.tree_map(func, obj, is_leaf=is_leaf),
)
def test_tree_reduce(self):
func = lambda a, b: a + b
obj = [1, 2, (3, 4)]
self.assertEqual(
jax.tree.reduce(func, obj),
tree_util.tree_reduce(func, obj),
)
def test_tree_reduce_is_leaf(self):
func = lambda a, b: a + b
obj = [(1, 2), (3, 4)]
is_leaf = lambda x: isinstance(x, tuple)
self.assertEqual(
jax.tree.reduce(func, obj, is_leaf=is_leaf),
tree_util.tree_reduce(func, obj, is_leaf=is_leaf),
)
def test_tree_reduce_associative(self):
func = lambda a, b: a + b
obj = [1, 2, (3, 4)]
self.assertEqual(
jax.tree.reduce_associative(func, obj),
tree_util.tree_reduce_associative(func, obj),
)
def test_tree_reduce_associative_is_leaf(self):
func = lambda a, b: a + b
obj = [(1, 2), (3, 4)]
is_leaf = lambda x: isinstance(x, tuple)
self.assertEqual(
jax.tree.reduce_associative(func, obj, is_leaf=is_leaf),
tree_util.tree_reduce_associative(func, obj, is_leaf=is_leaf),
)
def test_tree_structure(self):
obj = [1, 2, (3, 4)]
self.assertEqual(
jax.tree.structure(obj),
tree_util.tree_structure(obj),
)
def test_tree_structure_is_leaf(self):
obj = [1, 2, (3, 4)]
is_leaf = lambda x: isinstance(x, tuple)
self.assertEqual(
jax.tree.structure(obj, is_leaf=is_leaf),
tree_util.tree_structure(obj, is_leaf=is_leaf),
)
def test_tree_transpose(self):
obj = [(1, 2), (3, 4), (5, 6)]
outer_treedef = tree_util.tree_structure(['*', '*', '*'])
inner_treedef = tree_util.tree_structure(('*', '*'))
self.assertEqual(
jax.tree.transpose(outer_treedef, inner_treedef, obj),
tree_util.tree_transpose(outer_treedef, inner_treedef, obj)
)
def test_tree_broadcast(self):
prefix = (1, 2, 3)
full = (0, {'a': 0, 'b': 0}, (0, 0))
actual = jax.tree.broadcast(prefix, full)
expected = (1, {'a': 2, 'b': 2}, (3, 3))
self.assertEqual(actual, expected)
def test_tree_unflatten(self):
leaves, treedef = jax.tree.flatten([1, 2, (3, 4)])
self.assertEqual(
jax.tree.unflatten(treedef, leaves),
tree_util.tree_unflatten(treedef, leaves)
)
def test_tree_flatten_with_path(self):
obj = [1, 2, (3, 4)]
self.assertEqual(
jax.tree.flatten_with_path(obj),
tree_util.tree_flatten_with_path(obj),
)
def test_tree_flatten_with_path_is_leaf(self):
obj = [1, 2, (3, 4)]
is_leaf = lambda x: isinstance(x, tuple)
is_leaf = lambda kp, x: isinstance(x, tuple)
self.assertEqual(
jax.tree.flatten_with_path(obj, is_leaf, is_leaf_takes_path=True),
tree_util.tree_flatten_with_path(obj, is_leaf, is_leaf_takes_path=True),
)
def test_tree_leaves_with_path(self):
obj = [1, 2, (3, 4)]
self.assertEqual(
jax.tree.leaves_with_path(obj),
tree_util.tree_leaves_with_path(obj),
)
def test_tree_leaves_with_path_is_leaf(self):
obj = [1, 2, (3, 4)]
is_leaf = lambda x: isinstance(x, tuple)
is_leaf = lambda kp, x: isinstance(x, tuple)
self.assertEqual(
jax.tree.leaves_with_path(
obj, is_leaf=is_leaf, is_leaf_takes_path=True
),
tree_util.tree_leaves_with_path(
obj, is_leaf=is_leaf, is_leaf_takes_path=True
),
)
def test_tree_map_with_path(self):
func = lambda kp, x, y: (sum(k.idx for k in kp), x + y)
obj = [1, 2, (3, 4)]
obj2 = [5, 6, (7, 8)]
self.assertEqual(
jax.tree.map_with_path(func, obj, obj2),
tree_util.tree_map_with_path(func, obj, obj2),
)
def test_tree_map_with_path_is_leaf(self):
func = lambda kp, x, y: (sum(k.idx for k in kp), x + y)
obj = [1, 2, (3, 4)]
obj2 = [5, 6, (7, 8)]
is_leaf = lambda x: isinstance(x, tuple)
is_leaf = lambda kp, x: isinstance(x, tuple)
self.assertEqual(
jax.tree.map_with_path(
func, obj, obj2, is_leaf=is_leaf, is_leaf_takes_path=True
),
tree_util.tree_map_with_path(
func, obj, obj2, is_leaf=is_leaf, is_leaf_takes_path=True
),
)
| TreeAliasTest |
python | tensorflow__tensorflow | tensorflow/python/tools/api/generator2/extractor/extractor.py | {
"start": 1391,
"end": 1461
} | class ____(Exception):
"""Exception for bad exports."""
| BadExportError |
python | pandas-dev__pandas | pandas/tests/tseries/offsets/test_offsets.py | {
"start": 26683,
"end": 28057
} | class ____:
def test_get_offset_name(self):
assert BDay().freqstr == "B"
assert BDay(2).freqstr == "2B"
assert BMonthEnd().freqstr == "BME"
assert Week(weekday=0).freqstr == "W-MON"
assert Week(weekday=1).freqstr == "W-TUE"
assert Week(weekday=2).freqstr == "W-WED"
assert Week(weekday=3).freqstr == "W-THU"
assert Week(weekday=4).freqstr == "W-FRI"
assert LastWeekOfMonth(weekday=WeekDay.SUN).freqstr == "LWOM-SUN"
def test_get_offset():
with pytest.raises(ValueError, match=INVALID_FREQ_ERR_MSG):
_get_offset("gibberish")
with pytest.raises(ValueError, match=INVALID_FREQ_ERR_MSG):
_get_offset("QS-JAN-B")
pairs = [
("B", BDay()),
("BME", BMonthEnd()),
("W-MON", Week(weekday=0)),
("W-TUE", Week(weekday=1)),
("W-WED", Week(weekday=2)),
("W-THU", Week(weekday=3)),
("W-FRI", Week(weekday=4)),
]
for name, expected in pairs:
offset = _get_offset(name)
assert offset == expected, (
f"Expected {name!r} to yield {expected!r} (actual: {offset!r})"
)
def test_get_offset_legacy():
pairs = [("w@Sat", Week(weekday=5))]
for name, expected in pairs:
with pytest.raises(ValueError, match=INVALID_FREQ_ERR_MSG):
_get_offset(name)
| TestOffsetNames |
python | sqlalchemy__sqlalchemy | test/sql/test_syntax_extensions.py | {
"start": 2215,
"end": 2476
} | class ____(SyntaxExtension, ClauseElement):
_traverse_internals = []
def apply_to_select(self, select_stmt):
select_stmt.apply_syntax_extension_point(
lambda existing: [self],
"post_criteria",
)
| PostCriteriaClause3 |
python | plotly__plotly.py | plotly/graph_objs/layout/polar/_domain.py | {
"start": 235,
"end": 5045
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.polar"
_path_str = "layout.polar.domain"
_valid_props = {"column", "row", "x", "y"}
@property
def column(self):
"""
If there is a layout grid, use the domain for this column in
the grid for this polar subplot .
The 'column' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["column"]
@column.setter
def column(self, val):
self["column"] = val
@property
def row(self):
"""
If there is a layout grid, use the domain for this row in the
grid for this polar subplot .
The 'row' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["row"]
@row.setter
def row(self, val):
self["row"] = val
@property
def x(self):
"""
Sets the horizontal domain of this polar subplot (in plot
fraction).
The 'x' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'x[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'x[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
@property
def y(self):
"""
Sets the vertical domain of this polar subplot (in plot
fraction).
The 'y' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'y[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'y[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
@property
def _prop_descriptions(self):
return """\
column
If there is a layout grid, use the domain for this
column in the grid for this polar subplot .
row
If there is a layout grid, use the domain for this row
in the grid for this polar subplot .
x
Sets the horizontal domain of this polar subplot (in
plot fraction).
y
Sets the vertical domain of this polar subplot (in plot
fraction).
"""
def __init__(self, arg=None, column=None, row=None, x=None, y=None, **kwargs):
"""
Construct a new Domain object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.polar.Domain`
column
If there is a layout grid, use the domain for this
column in the grid for this polar subplot .
row
If there is a layout grid, use the domain for this row
in the grid for this polar subplot .
x
Sets the horizontal domain of this polar subplot (in
plot fraction).
y
Sets the vertical domain of this polar subplot (in plot
fraction).
Returns
-------
Domain
"""
super().__init__("domain")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.polar.Domain
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.polar.Domain`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("column", arg, column)
self._set_property("row", arg, row)
self._set_property("x", arg, x)
self._set_property("y", arg, y)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Domain |
python | optuna__optuna | optuna/artifacts/_protocol.py | {
"start": 151,
"end": 1803
} | class ____(Protocol):
"""A protocol defining the interface for an artifact backend.
The methods defined in this protocol are not supposed to be directly called by library users.
An artifact backend is responsible for managing the storage and retrieval
of artifact data. The backend should provide methods for opening, writing
and removing artifacts.
"""
def open_reader(self, artifact_id: str) -> BinaryIO:
"""Open the artifact identified by the artifact_id.
This method should return a binary file-like object in read mode, similar to
``open(..., mode="rb")``. If the artifact does not exist, an
:exc:`~optuna.artifacts.exceptions.ArtifactNotFound` exception
should be raised.
Args:
artifact_id: The identifier of the artifact to open.
Returns:
BinaryIO: A binary file-like object that can be read from.
"""
...
def write(self, artifact_id: str, content_body: BinaryIO) -> None:
"""Save the content to the backend.
Args:
artifact_id: The identifier of the artifact to write to.
content_body: The content to write to the artifact.
"""
...
def remove(self, artifact_id: str) -> None:
"""Remove the artifact identified by the artifact_id.
This method should delete the artifact from the backend. If the artifact does not
exist, an :exc:`~optuna.artifacts.exceptions.ArtifactNotFound` exception
may be raised.
Args:
artifact_id: The identifier of the artifact to remove.
"""
...
| ArtifactStore |
python | walkccc__LeetCode | solutions/1927. Sum Game/1927.py | {
"start": 0,
"end": 325
} | class ____:
def sumGame(self, num: str) -> bool:
n = len(num)
ans = 0.0
def getExpectation(c: str) -> float:
return 4.5 if c == '?' else int(c)
for i in range(n // 2):
ans += getExpectation(num[i])
for i in range(n // 2, n):
ans -= getExpectation(num[i])
return ans != 0.0
| Solution |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 104541,
"end": 105038
} | class ____(Structure):
_fields_ = [("multiprocessorCount", c_uint),
("sharedCopyEngineCount", c_uint),
("sharedDecoderCount", c_uint),
("sharedEncoderCount", c_uint),
("sharedJpegCount", c_uint),
("sharedOfaCount", c_uint),
("gpuInstanceSliceCount", c_uint),
("computeInstanceSliceCount", c_uint),
("memorySizeMB", c_ulonglong),
]
| c_nvmlDeviceAttributes |
python | django__django | tests/staticfiles_tests/storage.py | {
"start": 594,
"end": 1441
} | class ____(storage.Storage):
def _save(self, name, content):
return "dummy"
def _path(self, name):
return os.path.join(settings.STATIC_ROOT, name)
def exists(self, name):
return os.path.exists(self._path(name))
def listdir(self, path):
path = self._path(path)
directories, files = [], []
with os.scandir(path) as entries:
for entry in entries:
if entry.is_dir():
directories.append(entry.name)
else:
files.append(entry.name)
return directories, files
def delete(self, name):
name = self._path(name)
try:
os.remove(name)
except FileNotFoundError:
pass
def path(self, name):
raise NotImplementedError
| PathNotImplementedStorage |
python | doocs__leetcode | solution/1600-1699/1676.Lowest Common Ancestor of a Binary Tree IV/Solution.py | {
"start": 164,
"end": 601
} | class ____:
def lowestCommonAncestor(
self, root: 'TreeNode', nodes: 'List[TreeNode]'
) -> 'TreeNode':
def dfs(root):
if root is None or root.val in s:
return root
left, right = dfs(root.left), dfs(root.right)
if left and right:
return root
return left or right
s = {node.val for node in nodes}
return dfs(root)
| Solution |
python | scrapy__scrapy | scrapy/commands/__init__.py | {
"start": 534,
"end": 4465
} | class ____(ABC):
requires_project: bool = False
requires_crawler_process: bool = True
crawler_process: CrawlerProcessBase | None = None # set in scrapy.cmdline
# default settings to be used for this command instead of global defaults
default_settings: dict[str, Any] = {}
exitcode: int = 0
def __init__(self) -> None:
self.settings: Settings | None = None # set in scrapy.cmdline
def set_crawler(self, crawler: Crawler) -> None:
if hasattr(self, "_crawler"):
raise RuntimeError("crawler already set")
self._crawler: Crawler = crawler
def syntax(self) -> str:
"""
Command syntax (preferably one-line). Do not include command name.
"""
return ""
@abstractmethod
def short_desc(self) -> str:
"""
A short description of the command
"""
return ""
def long_desc(self) -> str:
"""A long description of the command. Return short description when not
available. It cannot contain newlines since contents will be formatted
by optparser which removes newlines and wraps text.
"""
return self.short_desc()
def help(self) -> str:
"""An extensive help for the command. It will be shown when using the
"help" command. It can contain newlines since no post-formatting will
be applied to its contents.
"""
return self.long_desc()
def add_options(self, parser: argparse.ArgumentParser) -> None:
"""
Populate option parse with options available for this command
"""
assert self.settings is not None
group = parser.add_argument_group(title="Global Options")
group.add_argument(
"--logfile", metavar="FILE", help="log file. if omitted stderr will be used"
)
group.add_argument(
"-L",
"--loglevel",
metavar="LEVEL",
default=None,
help=f"log level (default: {self.settings['LOG_LEVEL']})",
)
group.add_argument(
"--nolog", action="store_true", help="disable logging completely"
)
group.add_argument(
"--profile",
metavar="FILE",
default=None,
help="write python cProfile stats to FILE",
)
group.add_argument("--pidfile", metavar="FILE", help="write process ID to FILE")
group.add_argument(
"-s",
"--set",
action="append",
default=[],
metavar="NAME=VALUE",
help="set/override setting (may be repeated)",
)
group.add_argument("--pdb", action="store_true", help="enable pdb on failure")
def process_options(self, args: list[str], opts: argparse.Namespace) -> None:
assert self.settings is not None
try:
self.settings.setdict(arglist_to_dict(opts.set), priority="cmdline")
except ValueError:
raise UsageError("Invalid -s value, use -s NAME=VALUE", print_help=False)
if opts.logfile:
self.settings.set("LOG_ENABLED", True, priority="cmdline")
self.settings.set("LOG_FILE", opts.logfile, priority="cmdline")
if opts.loglevel:
self.settings.set("LOG_ENABLED", True, priority="cmdline")
self.settings.set("LOG_LEVEL", opts.loglevel, priority="cmdline")
if opts.nolog:
self.settings.set("LOG_ENABLED", False, priority="cmdline")
if opts.pidfile:
Path(opts.pidfile).write_text(
str(os.getpid()) + os.linesep, encoding="utf-8"
)
if opts.pdb:
failure.startDebugMode()
@abstractmethod
def run(self, args: list[str], opts: argparse.Namespace) -> None:
"""
Entry point for running commands
"""
raise NotImplementedError
| ScrapyCommand |
python | plotly__plotly.py | tests/test_optional/test_figure_factory/test_figure_factory.py | {
"start": 51608,
"end": 64259
} | class ____(NumpyTestUtilsMixin, TestCaseNoTemplate):
def test_dataframe_input(self):
# check: dataframe is imported
df = "foo"
pattern = (
"Dataframe not inputed. Please use a pandas dataframe to produce "
"a scatterplot matrix."
)
self.assertRaisesRegex(PlotlyError, pattern, ff.create_scatterplotmatrix, df)
def test_one_column_dataframe(self):
# check: dataframe has 1 column or less
df = pd.DataFrame([1, 2, 3])
pattern = (
"Dataframe has only one column. To use the scatterplot matrix, "
"use at least 2 columns."
)
self.assertRaisesRegex(PlotlyError, pattern, ff.create_scatterplotmatrix, df)
def test_valid_diag_choice(self):
# make sure that the diagonal param is valid
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
self.assertRaises(PlotlyError, ff.create_scatterplotmatrix, df, diag="foo")
def test_forbidden_params(self):
# check: the forbidden params of 'marker' in **kwargs
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
kwargs = {"marker": {"size": 15}}
pattern = (
"Your kwargs dictionary cannot include the 'size', 'color' or "
"'colorscale' key words inside the marker dict since 'size' is "
"already an argument of the scatterplot matrix function and both "
"'color' and 'colorscale are set internally."
)
self.assertRaisesRegex(
PlotlyError, pattern, ff.create_scatterplotmatrix, df, **kwargs
)
def test_valid_index_choice(self):
# check: index is a column name
df = pd.DataFrame([[1, 2], [3, 4]], columns=["apple", "pear"])
pattern = (
"Make sure you set the index input variable to one of the column "
"names of your dataframe."
)
self.assertRaisesRegex(
PlotlyError, pattern, ff.create_scatterplotmatrix, df, index="grape"
)
def test_same_data_in_dataframe_columns(self):
# check: either all numbers or strings in each dataframe column
df = pd.DataFrame([["a", 2], [3, 4]])
pattern = (
"Error in dataframe. Make sure all entries of each column are "
"either numbers or strings."
)
self.assertRaisesRegex(PlotlyError, pattern, ff.create_scatterplotmatrix, df)
df = pd.DataFrame([[1, 2], ["a", 4]])
self.assertRaisesRegex(PlotlyError, pattern, ff.create_scatterplotmatrix, df)
def test_same_data_in_index(self):
# check: either all numbers or strings in index column
df = pd.DataFrame([["a", 2], [3, 4]], columns=["apple", "pear"])
pattern = (
"Error in indexing column. Make sure all entries of each column "
"are all numbers or all strings."
)
self.assertRaisesRegex(
PlotlyError, pattern, ff.create_scatterplotmatrix, df, index="apple"
)
df = pd.DataFrame([[1, 2], ["a", 4]], columns=["apple", "pear"])
self.assertRaisesRegex(
PlotlyError, pattern, ff.create_scatterplotmatrix, df, index="apple"
)
def test_valid_colormap(self):
# check: the colormap argument is in a valid form
df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"])
# check: valid plotly scalename is entered
self.assertRaises(
PlotlyError,
ff.create_scatterplotmatrix,
df,
index="a",
colormap="fake_scale",
)
pattern_rgb = (
"Whoops! The elements in your rgb colors tuples cannot exceed 255.0."
)
# check: proper 'rgb' color
self.assertRaisesRegex(
PlotlyError,
pattern_rgb,
ff.create_scatterplotmatrix,
df,
colormap="rgb(500, 1, 1)",
index="c",
)
self.assertRaisesRegex(
PlotlyError,
pattern_rgb,
ff.create_scatterplotmatrix,
df,
colormap=["rgb(500, 1, 1)"],
index="c",
)
pattern_tuple = "Whoops! The elements in your colors tuples cannot exceed 1.0."
# check: proper color tuple
self.assertRaisesRegex(
PlotlyError,
pattern_tuple,
ff.create_scatterplotmatrix,
df,
colormap=(2, 1, 1),
index="c",
)
self.assertRaisesRegex(
PlotlyError,
pattern_tuple,
ff.create_scatterplotmatrix,
df,
colormap=[(2, 1, 1)],
index="c",
)
def test_valid_endpts(self):
# check: the endpts is a list or a tuple
df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"])
pattern = (
"The intervals_endpts argument must be a list or tuple of a "
"sequence of increasing numbers."
)
self.assertRaisesRegex(
PlotlyError,
pattern,
ff.create_scatterplotmatrix,
df,
index="a",
colormap="Hot",
endpts="foo",
)
# check: the endpts are a list of numbers
self.assertRaisesRegex(
PlotlyError,
pattern,
ff.create_scatterplotmatrix,
df,
index="a",
colormap="Hot",
endpts=["a"],
)
# check: endpts is a list of INCREASING numbers
self.assertRaisesRegex(
PlotlyError,
pattern,
ff.create_scatterplotmatrix,
df,
index="a",
colormap="Hot",
endpts=[2, 1],
)
def test_dictionary_colormap(self):
# if colormap is a dictionary, make sure it all the values in the
# index column are keys in colormap
df = pd.DataFrame(
[["apple", "happy"], ["pear", "sad"]], columns=["Fruit", "Emotion"]
)
colormap = {"happy": "rgb(5, 5, 5)"}
pattern = (
"If colormap is a dictionary, all the names in the index must be keys."
)
self.assertRaisesRegex(
PlotlyError,
pattern,
ff.create_scatterplotmatrix,
df,
index="Emotion",
colormap=colormap,
)
def test_scatter_plot_matrix(self):
# check if test scatter plot matrix without index or theme matches
# with the expected output
df = pd.DataFrame(
[
[2, "Apple"],
[6, "Pear"],
[-15, "Apple"],
[5, "Pear"],
[-2, "Apple"],
[0, "Apple"],
],
columns=["Numbers", "Fruit"],
)
test_scatter_plot_matrix = ff.create_scatterplotmatrix(
df=df,
diag="box",
height=1000,
width=1000,
size=13,
title="Scatterplot Matrix",
)
exp_scatter_plot_matrix = {
"data": [
{
"showlegend": False,
"type": "box",
"xaxis": "x",
"y": [2, 6, -15, 5, -2, 0],
"yaxis": "y",
},
{
"marker": {"size": 13},
"mode": "markers",
"showlegend": False,
"type": "scatter",
"x": ["Apple", "Pear", "Apple", "Pear", "Apple", "Apple"],
"xaxis": "x2",
"y": [2, 6, -15, 5, -2, 0],
"yaxis": "y2",
},
{
"marker": {"size": 13},
"mode": "markers",
"showlegend": False,
"type": "scatter",
"x": [2, 6, -15, 5, -2, 0],
"xaxis": "x3",
"y": ["Apple", "Pear", "Apple", "Pear", "Apple", "Apple"],
"yaxis": "y3",
},
{
"name": None,
"showlegend": False,
"type": "box",
"xaxis": "x4",
"y": ["Apple", "Pear", "Apple", "Pear", "Apple", "Apple"],
"yaxis": "y4",
},
],
"layout": {
"height": 1000,
"showlegend": True,
"title": {"text": "Scatterplot Matrix"},
"width": 1000,
"xaxis": {
"anchor": "y",
"domain": [0.0, 0.45],
"showticklabels": False,
},
"xaxis2": {"anchor": "y2", "domain": [0.55, 1.0]},
"xaxis3": {
"anchor": "y3",
"domain": [0.0, 0.45],
"title": {"text": "Numbers"},
},
"xaxis4": {
"anchor": "y4",
"domain": [0.55, 1.0],
"showticklabels": False,
"title": {"text": "Fruit"},
},
"yaxis": {
"anchor": "x",
"domain": [0.575, 1.0],
"title": {"text": "Numbers"},
},
"yaxis2": {"anchor": "x2", "domain": [0.575, 1.0]},
"yaxis3": {
"anchor": "x3",
"domain": [0.0, 0.425],
"title": {"text": "Fruit"},
},
"yaxis4": {"anchor": "x4", "domain": [0.0, 0.425]},
},
}
self.assert_fig_equal(
test_scatter_plot_matrix["data"][0], exp_scatter_plot_matrix["data"][0]
)
self.assert_fig_equal(
test_scatter_plot_matrix["data"][1], exp_scatter_plot_matrix["data"][1]
)
self.assert_fig_equal(
test_scatter_plot_matrix["layout"], exp_scatter_plot_matrix["layout"]
)
def test_scatter_plot_matrix_kwargs(self):
# check if test scatter plot matrix matches with
# the expected output
df = pd.DataFrame(
[
[2, "Apple"],
[6, "Pear"],
[-15, "Apple"],
[5, "Pear"],
[-2, "Apple"],
[0, "Apple"],
],
columns=["Numbers", "Fruit"],
)
test_scatter_plot_matrix = ff.create_scatterplotmatrix(
df,
index="Fruit",
endpts=[-10, -1],
diag="histogram",
height=1000,
width=1000,
size=13,
title="Scatterplot Matrix",
colormap="YlOrRd",
marker=dict(symbol=136),
)
exp_scatter_plot_matrix = {
"data": [
{
"marker": {"color": "rgb(128, 0, 38)"},
"showlegend": False,
"type": "histogram",
"x": [2, -15, -2, 0],
"xaxis": "x",
"yaxis": "y",
},
{
"marker": {"color": "rgb(255, 255, 204)"},
"showlegend": False,
"type": "histogram",
"x": [6, 5],
"xaxis": "x",
"yaxis": "y",
},
],
"layout": {
"barmode": "stack",
"height": 1000,
"showlegend": True,
"title": {"text": "Scatterplot Matrix"},
"width": 1000,
"xaxis": {
"anchor": "y",
"domain": [0.0, 1.0],
"title": {"text": "Numbers"},
},
"yaxis": {
"anchor": "x",
"domain": [0.0, 1.0],
"title": {"text": "Numbers"},
},
},
}
self.assert_fig_equal(
test_scatter_plot_matrix["data"][0], exp_scatter_plot_matrix["data"][0]
)
self.assert_fig_equal(
test_scatter_plot_matrix["data"][1], exp_scatter_plot_matrix["data"][1]
)
self.assert_fig_equal(
test_scatter_plot_matrix["layout"], exp_scatter_plot_matrix["layout"]
)
| TestScatterPlotMatrix |
python | pytorch__pytorch | test/test_overrides.py | {
"start": 50790,
"end": 51138
} | class ____(TestCase):
def test_resolve_name(self):
for cs in get_overridable_functions().values():
for c in cs:
self.assertEqual(
eval(torch.overrides.resolve_name(c)),
c,
msg=f"{c}, {torch.overrides.resolve_name(c)}"
)
| TestResolveName |
python | joke2k__faker | faker/providers/internet/hr_HR/__init__.py | {
"start": 46,
"end": 654
} | class ____(InternetProvider):
free_email_domains = (
"gmail.com",
"hotmail.com",
"yahoo.com",
"net.hr",
"zg.t-com.hr",
"inet.hr",
"t.ht.hr",
"vip.hr",
"globalnet.hr",
"xnet.hr",
"yahoo.hr",
"zagreb.hr",
)
tlds = ("hr", "com", "com.hr", "info", "org", "net", "biz")
replacements = (
("č", "c"),
("Č", "C"),
("ć", "c"),
("Ć", "C"),
("đ", "dj"),
("Đ", "Dj"),
("š", "s"),
("Š", "S"),
("ž", "z"),
("Ž", "Z"),
)
| Provider |
python | bokeh__bokeh | src/bokeh/models/tools.py | {
"start": 8780,
"end": 9042
} | class ____(GestureTool):
''' A base class for tools that respond to scroll events.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
@abstract
| Scroll |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/dev_build_test_install/package.py | {
"start": 225,
"end": 898
} | class ____(MakefilePackage):
homepage = "example.com"
url = "fake.com"
version("0.0.0", sha256="0123456789abcdef0123456789abcdef")
filename = "dev-build-test-file.txt"
original_string = "This file should be edited"
replacement_string = "This file has been edited"
def edit(self, spec, prefix):
with open(self.filename, "r+", encoding="utf-8") as f:
assert f.read() == self.original_string
f.seek(0)
f.truncate()
f.write(self.replacement_string)
def build(self, spec, prefix):
pass
def install(self, spec, prefix):
install(self.filename, prefix)
| DevBuildTestInstall |
python | vyperlang__vyper | vyper/semantics/analysis/base.py | {
"start": 5198,
"end": 5355
} | class ____(AnalysisResult):
used_modules: list[ModuleInfo]
node: Optional[vy_ast.VyperNode] = None
# analysis result of ExportsDecl
@dataclass
| UsesInfo |
python | doocs__leetcode | solution/2000-2099/2040.Kth Smallest Product of Two Sorted Arrays/Solution.py | {
"start": 0,
"end": 615
} | class ____:
def kthSmallestProduct(self, nums1: List[int], nums2: List[int], k: int) -> int:
def count(p: int) -> int:
cnt = 0
n = len(nums2)
for x in nums1:
if x > 0:
cnt += bisect_right(nums2, p / x)
elif x < 0:
cnt += n - bisect_left(nums2, p / x)
else:
cnt += n * int(p >= 0)
return cnt
mx = max(abs(nums1[0]), abs(nums1[-1])) * max(abs(nums2[0]), abs(nums2[-1]))
return bisect_left(range(-mx, mx + 1), k, key=count) - mx
| Solution |
python | plotly__plotly.py | plotly/graph_objs/scattergl/_error_y.py | {
"start": 233,
"end": 14397
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scattergl"
_path_str = "scattergl.error_y"
_valid_props = {
"array",
"arrayminus",
"arrayminussrc",
"arraysrc",
"color",
"symmetric",
"thickness",
"traceref",
"tracerefminus",
"type",
"value",
"valueminus",
"visible",
"width",
}
@property
def array(self):
"""
Sets the data corresponding the length of each error bar.
Values are plotted relative to the underlying data.
The 'array' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["array"]
@array.setter
def array(self, val):
self["array"] = val
@property
def arrayminus(self):
"""
Sets the data corresponding the length of each error bar in the
bottom (left) direction for vertical (horizontal) bars Values
are plotted relative to the underlying data.
The 'arrayminus' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["arrayminus"]
@arrayminus.setter
def arrayminus(self, val):
self["arrayminus"] = val
@property
def arrayminussrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`arrayminus`.
The 'arrayminussrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["arrayminussrc"]
@arrayminussrc.setter
def arrayminussrc(self, val):
self["arrayminussrc"] = val
@property
def arraysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `array`.
The 'arraysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["arraysrc"]
@arraysrc.setter
def arraysrc(self, val):
self["arraysrc"] = val
@property
def color(self):
"""
Sets the stroke color of the error bars.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def symmetric(self):
"""
Determines whether or not the error bars have the same length
in both direction (top/bottom for vertical bars, left/right for
horizontal bars.
The 'symmetric' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["symmetric"]
@symmetric.setter
def symmetric(self, val):
self["symmetric"] = val
@property
def thickness(self):
"""
Sets the thickness (in px) of the error bars.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
@property
def traceref(self):
"""
The 'traceref' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["traceref"]
@traceref.setter
def traceref(self, val):
self["traceref"] = val
@property
def tracerefminus(self):
"""
The 'tracerefminus' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["tracerefminus"]
@tracerefminus.setter
def tracerefminus(self, val):
self["tracerefminus"] = val
@property
def type(self):
"""
Determines the rule used to generate the error bars. If
"constant", the bar lengths are of a constant value. Set this
constant in `value`. If "percent", the bar lengths correspond
to a percentage of underlying data. Set this percentage in
`value`. If "sqrt", the bar lengths correspond to the square of
the underlying data. If "data", the bar lengths are set with
data set `array`.
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['percent', 'constant', 'sqrt', 'data']
Returns
-------
Any
"""
return self["type"]
@type.setter
def type(self, val):
self["type"] = val
@property
def value(self):
"""
Sets the value of either the percentage (if `type` is set to
"percent") or the constant (if `type` is set to "constant")
corresponding to the lengths of the error bars.
The 'value' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
@property
def valueminus(self):
"""
Sets the value of either the percentage (if `type` is set to
"percent") or the constant (if `type` is set to "constant")
corresponding to the lengths of the error bars in the bottom
(left) direction for vertical (horizontal) bars
The 'valueminus' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["valueminus"]
@valueminus.setter
def valueminus(self, val):
self["valueminus"] = val
@property
def visible(self):
"""
Determines whether or not this set of error bars is visible.
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
@property
def width(self):
"""
Sets the width (in px) of the cross-bar at both ends of the
error bars.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
@property
def _prop_descriptions(self):
return """\
array
Sets the data corresponding the length of each error
bar. Values are plotted relative to the underlying
data.
arrayminus
Sets the data corresponding the length of each error
bar in the bottom (left) direction for vertical
(horizontal) bars Values are plotted relative to the
underlying data.
arrayminussrc
Sets the source reference on Chart Studio Cloud for
`arrayminus`.
arraysrc
Sets the source reference on Chart Studio Cloud for
`array`.
color
Sets the stroke color of the error bars.
symmetric
Determines whether or not the error bars have the same
length in both direction (top/bottom for vertical bars,
left/right for horizontal bars.
thickness
Sets the thickness (in px) of the error bars.
traceref
tracerefminus
type
Determines the rule used to generate the error bars. If
"constant", the bar lengths are of a constant value.
Set this constant in `value`. If "percent", the bar
lengths correspond to a percentage of underlying data.
Set this percentage in `value`. If "sqrt", the bar
lengths correspond to the square of the underlying
data. If "data", the bar lengths are set with data set
`array`.
value
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars.
valueminus
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars in the bottom (left) direction for vertical
(horizontal) bars
visible
Determines whether or not this set of error bars is
visible.
width
Sets the width (in px) of the cross-bar at both ends of
the error bars.
"""
def __init__(
self,
arg=None,
array=None,
arrayminus=None,
arrayminussrc=None,
arraysrc=None,
color=None,
symmetric=None,
thickness=None,
traceref=None,
tracerefminus=None,
type=None,
value=None,
valueminus=None,
visible=None,
width=None,
**kwargs,
):
"""
Construct a new ErrorY object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattergl.ErrorY`
array
Sets the data corresponding the length of each error
bar. Values are plotted relative to the underlying
data.
arrayminus
Sets the data corresponding the length of each error
bar in the bottom (left) direction for vertical
(horizontal) bars Values are plotted relative to the
underlying data.
arrayminussrc
Sets the source reference on Chart Studio Cloud for
`arrayminus`.
arraysrc
Sets the source reference on Chart Studio Cloud for
`array`.
color
Sets the stroke color of the error bars.
symmetric
Determines whether or not the error bars have the same
length in both direction (top/bottom for vertical bars,
left/right for horizontal bars.
thickness
Sets the thickness (in px) of the error bars.
traceref
tracerefminus
type
Determines the rule used to generate the error bars. If
"constant", the bar lengths are of a constant value.
Set this constant in `value`. If "percent", the bar
lengths correspond to a percentage of underlying data.
Set this percentage in `value`. If "sqrt", the bar
lengths correspond to the square of the underlying
data. If "data", the bar lengths are set with data set
`array`.
value
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars.
valueminus
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars in the bottom (left) direction for vertical
(horizontal) bars
visible
Determines whether or not this set of error bars is
visible.
width
Sets the width (in px) of the cross-bar at both ends of
the error bars.
Returns
-------
ErrorY
"""
super().__init__("error_y")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scattergl.ErrorY
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattergl.ErrorY`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("array", arg, array)
self._set_property("arrayminus", arg, arrayminus)
self._set_property("arrayminussrc", arg, arrayminussrc)
self._set_property("arraysrc", arg, arraysrc)
self._set_property("color", arg, color)
self._set_property("symmetric", arg, symmetric)
self._set_property("thickness", arg, thickness)
self._set_property("traceref", arg, traceref)
self._set_property("tracerefminus", arg, tracerefminus)
self._set_property("type", arg, type)
self._set_property("value", arg, value)
self._set_property("valueminus", arg, valueminus)
self._set_property("visible", arg, visible)
self._set_property("width", arg, width)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| ErrorY |
python | google__flatbuffers | tests/MyGame/Example/StructOfStructs.py | {
"start": 176,
"end": 1284
} | class ____(object):
__slots__ = ['_tab']
@classmethod
def SizeOf(cls):
return 20
# StructOfStructs
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# StructOfStructs
def A(self, obj):
obj.Init(self._tab.Bytes, self._tab.Pos + 0)
return obj
# StructOfStructs
def B(self, obj):
obj.Init(self._tab.Bytes, self._tab.Pos + 8)
return obj
# StructOfStructs
def C(self, obj):
obj.Init(self._tab.Bytes, self._tab.Pos + 12)
return obj
def CreateStructOfStructs(builder, a_id, a_distance, b_a, b_b, c_id, c_distance):
builder.Prep(4, 20)
builder.Prep(4, 8)
builder.PrependUint32(c_distance)
builder.PrependUint32(c_id)
builder.Prep(2, 4)
builder.Pad(1)
builder.PrependInt8(b_b)
builder.PrependInt16(b_a)
builder.Prep(4, 8)
builder.PrependUint32(a_distance)
builder.PrependUint32(a_id)
return builder.Offset()
import MyGame.Example.Ability
import MyGame.Example.Test
try:
from typing import Optional
except:
pass
| StructOfStructs |
python | spack__spack | lib/spack/spack/vendor/jinja2/nodes.py | {
"start": 11677,
"end": 11993
} | class ____(Stmt):
"""Like a macro without a name but a call instead. `call` is called with
the unnamed macro as `caller` argument this node holds.
"""
fields = ("call", "args", "defaults", "body")
call: "Call"
args: t.List["Name"]
defaults: t.List["Expr"]
body: t.List[Node]
| CallBlock |
python | apache__airflow | providers/google/tests/unit/google/cloud/hooks/test_bigtable.py | {
"start": 6920,
"end": 26705
} | class ____:
def setup_method(self):
with mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.__init__",
new=mock_base_gcp_hook_default_project_id,
):
self.bigtable_hook_default_project_id = BigtableHook(gcp_conn_id="test")
@mock.patch("airflow.providers.google.cloud.hooks.bigtable.BigtableHook.get_credentials")
@mock.patch("airflow.providers.google.cloud.hooks.bigtable.Client")
def test_bigtable_client_creation(self, mock_client, mock_get_creds):
result = self.bigtable_hook_default_project_id._get_client(GCP_PROJECT_ID_HOOK_UNIT_TEST)
mock_client.assert_called_once_with(
project=GCP_PROJECT_ID_HOOK_UNIT_TEST,
credentials=mock_get_creds.return_value,
client_info=CLIENT_INFO,
admin=True,
)
assert mock_client.return_value == result
assert self.bigtable_hook_default_project_id._client == result
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client")
def test_get_instance(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
res = self.bigtable_hook_default_project_id.get_instance(
instance_id=CBT_INSTANCE,
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
instance_method.assert_called_once_with("instance")
instance_exists_method.assert_called_once_with()
get_client.assert_called_once_with(project_id="example-project")
assert res is not None
@mock.patch("airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client")
def test_get_instance_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
res = self.bigtable_hook_default_project_id.get_instance(
project_id="new-project", instance_id=CBT_INSTANCE
)
instance_method.assert_called_once_with("instance")
instance_exists_method.assert_called_once_with()
get_client.assert_called_once_with(project_id="new-project")
assert res is not None
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client")
def test_get_instance_no_instance(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = False
res = self.bigtable_hook_default_project_id.get_instance(
instance_id=CBT_INSTANCE,
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
instance_method.assert_called_once_with("instance")
instance_exists_method.assert_called_once_with()
get_client.assert_called_once_with(project_id="example-project")
assert res is None
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client")
def test_delete_instance(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
delete_method = instance_method.return_value.delete
res = self.bigtable_hook_default_project_id.delete_instance(
instance_id=CBT_INSTANCE,
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
instance_method.assert_called_once_with("instance")
instance_exists_method.assert_called_once_with()
delete_method.assert_called_once_with()
get_client.assert_called_once_with(project_id="example-project")
assert res is None
@mock.patch("airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client")
def test_delete_instance_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
delete_method = instance_method.return_value.delete
res = self.bigtable_hook_default_project_id.delete_instance(
project_id="new-project", instance_id=CBT_INSTANCE
)
instance_method.assert_called_once_with("instance")
instance_exists_method.assert_called_once_with()
delete_method.assert_called_once_with()
get_client.assert_called_once_with(project_id="new-project")
assert res is None
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client")
def test_delete_instance_no_instance(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = False
delete_method = instance_method.return_value.delete
self.bigtable_hook_default_project_id.delete_instance(
instance_id=CBT_INSTANCE,
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
instance_method.assert_called_once_with("instance")
instance_exists_method.assert_called_once_with()
delete_method.assert_not_called()
get_client.assert_called_once_with(project_id="example-project")
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch("google.cloud.bigtable.instance.Instance.create")
@mock.patch("airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client")
def test_create_instance(self, get_client, instance_create, mock_project_id):
operation = mock.Mock()
operation.result_return_value = Instance(instance_id=CBT_INSTANCE, client=get_client)
instance_create.return_value = operation
res = self.bigtable_hook_default_project_id.create_instance(
instance_id=CBT_INSTANCE,
main_cluster_id=CBT_CLUSTER,
main_cluster_zone=CBT_ZONE,
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
get_client.assert_called_once_with(project_id="example-project")
instance_create.assert_called_once_with(clusters=mock.ANY)
assert res.instance_id == "instance"
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch("google.cloud.bigtable.instance.Instance.cluster")
@mock.patch("google.cloud.bigtable.instance.Instance.create")
@mock.patch("airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client")
def test_create_instance_with_one_replica_cluster_production(
self, get_client, instance_create, cluster, mock_project_id
):
operation = mock.Mock()
operation.result_return_value = Instance(instance_id=CBT_INSTANCE, client=get_client)
instance_create.return_value = operation
res = self.bigtable_hook_default_project_id.create_instance(
instance_id=CBT_INSTANCE,
main_cluster_id=CBT_CLUSTER,
main_cluster_zone=CBT_ZONE,
replica_clusters=[{"id": CBT_REPLICA_CLUSTER_ID, "zone": CBT_REPLICA_CLUSTER_ZONE}],
cluster_nodes=1,
cluster_storage_type=enums.StorageType.SSD,
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
instance_type=enums.Instance.Type.PRODUCTION,
)
cluster.assert_has_calls(
[
mock.call(
cluster_id=CBT_CLUSTER,
location_id=CBT_ZONE,
serve_nodes=1,
default_storage_type=enums.StorageType.SSD,
),
mock.call(CBT_REPLICA_CLUSTER_ID, CBT_REPLICA_CLUSTER_ZONE, 1, enums.StorageType.SSD),
],
any_order=True,
)
get_client.assert_called_once_with(project_id="example-project")
instance_create.assert_called_once_with(clusters=mock.ANY)
assert res.instance_id == "instance"
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch("google.cloud.bigtable.instance.Instance.cluster")
@mock.patch("google.cloud.bigtable.instance.Instance.create")
@mock.patch("airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client")
def test_create_instance_with_one_replica_cluster_development(
self, get_client, instance_create, cluster, mock_project_id
):
operation = mock.Mock()
operation.result_return_value = Instance(instance_id=CBT_INSTANCE, client=get_client)
instance_create.return_value = operation
res = self.bigtable_hook_default_project_id.create_instance(
instance_id=CBT_INSTANCE,
main_cluster_id=CBT_CLUSTER,
main_cluster_zone=CBT_ZONE,
replica_clusters=[{"id": CBT_REPLICA_CLUSTER_ID, "zone": CBT_REPLICA_CLUSTER_ZONE}],
cluster_nodes=1,
cluster_storage_type=enums.StorageType.SSD,
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
instance_type=enums.Instance.Type.DEVELOPMENT,
)
cluster.assert_has_calls(
[
mock.call(
cluster_id=CBT_CLUSTER, location_id=CBT_ZONE, default_storage_type=enums.StorageType.SSD
),
mock.call(CBT_REPLICA_CLUSTER_ID, CBT_REPLICA_CLUSTER_ZONE, 1, enums.StorageType.SSD),
],
any_order=True,
)
get_client.assert_called_once_with(project_id="example-project")
instance_create.assert_called_once_with(clusters=mock.ANY)
assert res.instance_id == "instance"
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch("google.cloud.bigtable.instance.Instance.cluster")
@mock.patch("google.cloud.bigtable.instance.Instance.create")
@mock.patch("airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client")
def test_create_instance_with_multiple_replica_clusters(
self, get_client, instance_create, cluster, mock_project_id
):
operation = mock.Mock()
operation.result_return_value = Instance(instance_id=CBT_INSTANCE, client=get_client)
instance_create.return_value = operation
res = self.bigtable_hook_default_project_id.create_instance(
instance_id=CBT_INSTANCE,
main_cluster_id=CBT_CLUSTER,
main_cluster_zone=CBT_ZONE,
replica_clusters=CBT_REPLICATE_CLUSTERS,
cluster_nodes=1,
cluster_storage_type=enums.StorageType.SSD,
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
cluster.assert_has_calls(
[
mock.call(
cluster_id=CBT_CLUSTER,
location_id=CBT_ZONE,
serve_nodes=1,
default_storage_type=enums.StorageType.SSD,
),
mock.call("replica-1", "us-west1-a", 1, enums.StorageType.SSD),
mock.call("replica-2", "us-central1-f", 1, enums.StorageType.SSD),
mock.call("replica-3", "us-east1-d", 1, enums.StorageType.SSD),
],
any_order=True,
)
get_client.assert_called_once_with(project_id="example-project")
instance_create.assert_called_once_with(clusters=mock.ANY)
assert res.instance_id == "instance"
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch("google.cloud.bigtable.instance.Instance.update")
@mock.patch("airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client")
def test_update_instance(self, get_client, instance_update, mock_project_id):
operation = mock.Mock()
operation.result_return_value = Instance(instance_id=CBT_INSTANCE, client=get_client)
instance_update.return_value = operation
res = self.bigtable_hook_default_project_id.update_instance(
instance_id=CBT_INSTANCE,
instance_display_name=CBT_INSTANCE_DISPLAY_NAME,
instance_type=CBT_INSTANCE_TYPE,
instance_labels=CBT_INSTANCE_LABELS,
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
get_client.assert_called_once_with(project_id="example-project")
instance_update.assert_called_once_with()
assert res.instance_id == "instance"
@mock.patch("google.cloud.bigtable.instance.Instance.create")
@mock.patch("airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client")
def test_create_instance_overridden_project_id(self, get_client, instance_create):
operation = mock.Mock()
operation.result_return_value = Instance(instance_id=CBT_INSTANCE, client=get_client)
instance_create.return_value = operation
res = self.bigtable_hook_default_project_id.create_instance(
project_id="new-project",
instance_id=CBT_INSTANCE,
main_cluster_id=CBT_CLUSTER,
main_cluster_zone=CBT_ZONE,
)
get_client.assert_called_once_with(project_id="new-project")
instance_create.assert_called_once_with(clusters=mock.ANY)
assert res.instance_id == "instance"
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client")
def test_delete_table(self, get_client, mock_project_id):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
table_delete_method = instance_method.return_value.table.return_value.delete
instance_exists_method.return_value = True
self.bigtable_hook_default_project_id.delete_table(
instance_id=CBT_INSTANCE,
table_id=CBT_TABLE,
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
get_client.assert_called_once_with(project_id="example-project")
instance_exists_method.assert_called_once_with()
table_delete_method.assert_called_once_with()
@mock.patch("airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client")
def test_delete_table_overridden_project_id(self, get_client):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
table_delete_method = instance_method.return_value.table.return_value.delete
instance_exists_method.return_value = True
self.bigtable_hook_default_project_id.delete_table(
project_id="new-project", instance_id=CBT_INSTANCE, table_id=CBT_TABLE
)
get_client.assert_called_once_with(project_id="new-project")
instance_exists_method.assert_called_once_with()
table_delete_method.assert_called_once_with()
@mock.patch("google.cloud.bigtable.table.Table.create")
@mock.patch("airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client")
def test_create_table(self, get_client, create):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
client = mock.Mock(Client)
instance = google.cloud.bigtable.instance.Instance(instance_id=CBT_INSTANCE, client=client)
self.bigtable_hook_default_project_id.create_table(instance=instance, table_id=CBT_TABLE)
get_client.assert_not_called()
create.assert_called_once_with([], {})
@mock.patch("google.cloud.bigtable.cluster.Cluster.update")
@mock.patch("google.cloud.bigtable.cluster.Cluster.reload")
@mock.patch("airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client")
def test_update_cluster(self, get_client, reload, update):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
client = mock.Mock(Client)
instance = google.cloud.bigtable.instance.Instance(instance_id=CBT_INSTANCE, client=client)
self.bigtable_hook_default_project_id.update_cluster(
instance=instance, cluster_id=CBT_CLUSTER, nodes=4
)
get_client.assert_not_called()
reload.assert_called_once_with()
update.assert_called_once_with()
@mock.patch("google.cloud.bigtable.table.Table.list_column_families")
@mock.patch("airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client")
def test_list_column_families(self, get_client, list_column_families):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
client = mock.Mock(Client)
get_client.return_value = client
instance = google.cloud.bigtable.instance.Instance(instance_id=CBT_INSTANCE, client=client)
self.bigtable_hook_default_project_id.get_column_families_for_table(
instance=instance, table_id=CBT_TABLE
)
get_client.assert_not_called()
list_column_families.assert_called_once_with()
@mock.patch("google.cloud.bigtable.table.Table.get_cluster_states")
@mock.patch("airflow.providers.google.cloud.hooks.bigtable.BigtableHook._get_client")
def test_get_cluster_states(self, get_client, get_cluster_states):
instance_method = get_client.return_value.instance
instance_exists_method = instance_method.return_value.exists
instance_exists_method.return_value = True
client = mock.Mock(Client)
instance = google.cloud.bigtable.instance.Instance(instance_id=CBT_INSTANCE, client=client)
self.bigtable_hook_default_project_id.get_cluster_states_for_table(
instance=instance, table_id=CBT_TABLE
)
get_client.assert_not_called()
get_cluster_states.assert_called_once_with()
| TestBigtableHookDefaultProjectId |
python | pytorch__pytorch | benchmarks/gpt_fast/quantize.py | {
"start": 2775,
"end": 3565
} | class ____(torch.nn.Module):
__constants__ = ["in_features", "out_features"]
in_features: int
out_features: int
weight: torch.Tensor
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
device=None,
dtype=None,
) -> None:
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.register_buffer(
"weight", torch.empty((out_features, in_features), dtype=torch.int8)
)
self.register_buffer("scales", torch.ones(out_features, dtype=torch.bfloat16))
def forward(self, input: torch.Tensor) -> torch.Tensor:
return F.linear(input, self.weight.to(dtype=input.dtype)) * self.scales
| WeightOnlyInt8Linear |
python | plotly__plotly.py | _plotly_utils/basevalidators.py | {
"start": 71883,
"end": 73769
} | class ____(BaseValidator):
_PIL = None
try:
_PIL = import_module("PIL")
except ImportError:
pass
def __init__(self, plotly_name, parent_name, **kwargs):
super(ImageUriValidator, self).__init__(
plotly_name=plotly_name, parent_name=parent_name, **kwargs
)
def description(self):
desc = """\
The '{plotly_name}' property is an image URI that may be specified as:
- A remote image URI string
(e.g. 'http://www.somewhere.com/image.png')
- A data URI image string
(e.g. 'data:image/png;base64,iVBORw0KGgoAAAANSU')
- A PIL.Image.Image object which will be immediately converted
to a data URI image string
See http://pillow.readthedocs.io/en/latest/reference/Image.html
""".format(plotly_name=self.plotly_name)
return desc
def validate_coerce(self, v):
if v is None:
pass
elif isinstance(v, str):
# Future possibilities:
# - Detect filesystem system paths and convert to URI
# - Validate either url or data uri
pass
elif self._PIL and isinstance(v, self._PIL.Image.Image):
# Convert PIL image to png data uri string
v = self.pil_image_to_uri(v)
else:
self.raise_invalid_val(v)
return v
@staticmethod
def pil_image_to_uri(v):
in_mem_file = io.BytesIO()
v.save(in_mem_file, format="PNG")
in_mem_file.seek(0)
img_bytes = in_mem_file.read()
base64_encoded_result_bytes = base64.b64encode(img_bytes)
base64_encoded_result_str = base64_encoded_result_bytes.decode("ascii")
v = "data:image/png;base64,{base64_encoded_result_str}".format(
base64_encoded_result_str=base64_encoded_result_str
)
return v
| ImageUriValidator |
python | pytorch__pytorch | torch/serialization.py | {
"start": 2938,
"end": 3226
} | class ____(threading.local):
def __init__(self):
super().__init__()
self.map_location: Optional[MAP_LOCATION] = None
self.skip_data: bool = False
self.materialize_fake_tensors: bool = False
_serialization_tls = _SerializationLocal()
| _SerializationLocal |
python | anthropics__anthropic-sdk-python | src/anthropic/_base_client.py | {
"start": 47993,
"end": 51396
} | class ____(httpx.AsyncClient):
def __init__(self, **kwargs: Any) -> None:
kwargs.setdefault("timeout", DEFAULT_TIMEOUT)
kwargs.setdefault("limits", DEFAULT_CONNECTION_LIMITS)
kwargs.setdefault("follow_redirects", True)
if "transport" not in kwargs:
socket_options: List[Tuple[int, int, Union[int, bool]]] = [(socket.SOL_SOCKET, socket.SO_KEEPALIVE, True)]
TCP_KEEPINTVL = getattr(socket, "TCP_KEEPINTVL", None)
if TCP_KEEPINTVL is not None:
socket_options.append((socket.IPPROTO_TCP, TCP_KEEPINTVL, 60))
elif sys.platform == "darwin":
TCP_KEEPALIVE = getattr(socket, "TCP_KEEPALIVE", 0x10)
socket_options.append((socket.IPPROTO_TCP, TCP_KEEPALIVE, 60))
TCP_KEEPCNT = getattr(socket, "TCP_KEEPCNT", None)
if TCP_KEEPCNT is not None:
socket_options.append((socket.IPPROTO_TCP, TCP_KEEPCNT, 5))
TCP_KEEPIDLE = getattr(socket, "TCP_KEEPIDLE", None)
if TCP_KEEPIDLE is not None:
socket_options.append((socket.IPPROTO_TCP, TCP_KEEPIDLE, 60))
proxy_map = {key: None if url is None else Proxy(url=url) for key, url in get_environment_proxies().items()}
transport_kwargs = {
arg: kwargs[arg] for arg in ("verify", "cert", "trust_env", "http1", "http2", "limits") if arg in kwargs
}
transport_kwargs["socket_options"] = socket_options
proxy_mounts = {
key: None if proxy is None else AsyncHTTPTransport(proxy=proxy, **transport_kwargs)
for key, proxy in proxy_map.items()
}
default_transport = AsyncHTTPTransport(**transport_kwargs)
# Prioritize the mounts set by the user over the environment variables.
proxy_mounts.update(kwargs.get("mounts", {}))
kwargs["mounts"] = proxy_mounts
# Sets the default transport so that HTTPX won't automatically configure proxies.
kwargs["transport"] = default_transport
super().__init__(**kwargs)
try:
import httpx_aiohttp
except ImportError:
class _DefaultAioHttpClient(httpx.AsyncClient):
def __init__(self, **_kwargs: Any) -> None:
raise RuntimeError("To use the aiohttp client you must have installed the package with the `aiohttp` extra")
else:
class _DefaultAioHttpClient(httpx_aiohttp.HttpxAiohttpClient): # type: ignore
def __init__(self, **kwargs: Any) -> None:
kwargs.setdefault("timeout", DEFAULT_TIMEOUT)
kwargs.setdefault("limits", DEFAULT_CONNECTION_LIMITS)
kwargs.setdefault("follow_redirects", True)
super().__init__(**kwargs)
if TYPE_CHECKING:
DefaultAsyncHttpxClient = httpx.AsyncClient
"""An alias to `httpx.AsyncClient` that provides the same defaults that this SDK
uses internally.
This is useful because overriding the `http_client` with your own instance of
`httpx.AsyncClient` will result in httpx's defaults being used, not ours.
"""
DefaultAioHttpClient = httpx.AsyncClient
"""An alias to `httpx.AsyncClient` that changes the default HTTP transport to `aiohttp`."""
else:
DefaultAsyncHttpxClient = _DefaultAsyncHttpxClient
DefaultAioHttpClient = _DefaultAioHttpClient
| _DefaultAsyncHttpxClient |
python | google__jax | jax/_src/pallas/pipelining/internal.py | {
"start": 1739,
"end": 2501
} | class ____:
"""An internal representation of a pipeline stage."""
jaxpr: jax_core.ClosedJaxpr
effects: set[RefEffect]
properties: SchedulingProperties
name: str
def get_read_idxs(self) -> set[BufferIndex]:
"""Returns the buffer indices that this stage reads from."""
return {
effect.input_index
for effect in filter_read_effects(self.effects)
}
def get_write_idxs(self) -> set[BufferIndex]:
"""Returns the buffer indices that this stage writes to."""
return {
effect.input_index
for effect in filter_write_effects(self.effects)
}
def __str__(self):
return self.name
def __repr__(self):
return f"{self.name}[effs={self.effects}]"
@dataclasses.dataclass(frozen=True)
| PipelineStage |
python | scipy__scipy | benchmarks/benchmarks/sparse_linalg_spsolve_triangular.py | {
"start": 766,
"end": 1298
} | class ____(Benchmark):
params = [
[100,1000],
["spsolve", "spsolve_triangular"],
]
param_names = ['(n,n)',"method"]
def setup(self, n, method):
self.b = np.ones(n*n)
self.P_sparse = _create_sparse_poisson2d_half(n)
def time_solve(self, n, method):
if method == "spsolve":
spsolve(self.P_sparse, self.b)
elif method == "spsolve_triangular":
spsolve_triangular(self.P_sparse, self.b)
else:
raise NotImplementedError()
| Bench |
python | fluentpython__example-code | 13-op-overloading/vector_v6.py | {
"start": 5678,
"end": 8834
} | class ____:
typecode = 'd'
def __init__(self, components):
self._components = array(self.typecode, components)
def __iter__(self):
return iter(self._components)
def __repr__(self):
components = reprlib.repr(self._components)
components = components[components.find('['):-1]
return 'Vector({})'.format(components)
def __str__(self):
return str(tuple(self))
def __bytes__(self):
return (bytes([ord(self.typecode)]) +
bytes(self._components))
def __eq__(self, other):
return (len(self) == len(other) and
all(a == b for a, b in zip(self, other)))
def __hash__(self):
hashes = (hash(x) for x in self)
return functools.reduce(operator.xor, hashes, 0)
# BEGIN VECTOR_V6_UNARY
def __abs__(self):
return math.sqrt(sum(x * x for x in self))
def __neg__(self):
return Vector(-x for x in self) # <1>
def __pos__(self):
return Vector(self) # <2>
# END VECTOR_V6_UNARY
def __bool__(self):
return bool(abs(self))
def __len__(self):
return len(self._components)
def __getitem__(self, index):
cls = type(self)
if isinstance(index, slice):
return cls(self._components[index])
elif isinstance(index, numbers.Integral):
return self._components[index]
else:
msg = '{.__name__} indices must be integers'
raise TypeError(msg.format(cls))
shortcut_names = 'xyzt'
def __getattr__(self, name):
cls = type(self)
if len(name) == 1:
pos = cls.shortcut_names.find(name)
if 0 <= pos < len(self._components):
return self._components[pos]
msg = '{.__name__!r} object has no attribute {!r}'
raise AttributeError(msg.format(cls, name))
def angle(self, n):
r = math.sqrt(sum(x * x for x in self[n:]))
a = math.atan2(r, self[n-1])
if (n == len(self) - 1) and (self[-1] < 0):
return math.pi * 2 - a
else:
return a
def angles(self):
return (self.angle(n) for n in range(1, len(self)))
def __format__(self, fmt_spec=''):
if fmt_spec.endswith('h'): # hyperspherical coordinates
fmt_spec = fmt_spec[:-1]
coords = itertools.chain([abs(self)],
self.angles())
outer_fmt = '<{}>'
else:
coords = self
outer_fmt = '({})'
components = (format(c, fmt_spec) for c in coords)
return outer_fmt.format(', '.join(components))
@classmethod
def frombytes(cls, octets):
typecode = chr(octets[0])
memv = memoryview(octets[1:]).cast(typecode)
return cls(memv)
# BEGIN VECTOR_V6_ADD
def __add__(self, other):
try:
pairs = itertools.zip_longest(self, other, fillvalue=0.0)
return Vector(a + b for a, b in pairs)
except TypeError:
return NotImplemented
def __radd__(self, other):
return self + other
# END VECTOR_V6_ADD
| Vector |
python | tensorflow__tensorflow | tensorflow/python/keras/legacy_tf_layers/pooling.py | {
"start": 939,
"end": 3663
} | class ____(keras_layers.AveragePooling1D, base.Layer):
"""Average Pooling layer for 1D inputs.
Args:
pool_size: An integer or tuple/list of a single integer,
representing the size of the pooling window.
strides: An integer or tuple/list of a single integer, specifying the
strides of the pooling operation.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, length)`.
name: A string, the name of the layer.
"""
def __init__(self, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
if strides is None:
raise ValueError('Argument `strides` must not be None.')
super(AveragePooling1D, self).__init__(
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
name=name,
**kwargs)
def average_pooling1d(inputs, pool_size, strides,
padding='valid', data_format='channels_last',
name=None):
"""Average Pooling layer for 1D inputs.
Args:
inputs: The tensor over which to pool. Must have rank 3.
pool_size: An integer or tuple/list of a single integer,
representing the size of the pooling window.
strides: An integer or tuple/list of a single integer, specifying the
strides of the pooling operation.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, length)`.
name: A string, the name of the layer.
Returns:
The output tensor, of rank 3.
Raises:
ValueError: if eager execution is enabled.
"""
warnings.warn('`tf.layers.average_pooling1d` is deprecated and '
'will be removed in a future version. '
'Please use `tf.keras.layers.AveragePooling1D` instead.')
layer = AveragePooling1D(pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
return layer.apply(inputs)
| AveragePooling1D |
python | realpython__materials | python-built-in-exceptions/square.py | {
"start": 0,
"end": 343
} | class ____:
def __init__(self, values):
self.values = values
self.index = 0
def __iter__(self):
return self
def __next__(self):
if self.index >= len(self.values):
raise StopIteration
square = self.values[self.index] ** 2
self.index += 1
return square
| SquareIterator |
python | python-markdown__markdown | tests/test_syntax/extensions/test_admonition.py | {
"start": 781,
"end": 6873
} | class ____(TestCase):
def test_with_lists(self):
self.assertMarkdownRenders(
self.dedent(
'''
- List
!!! note "Admontion"
- Paragraph
Paragraph
'''
),
self.dedent(
'''
<ul>
<li>
<p>List</p>
<div class="admonition note">
<p class="admonition-title">Admontion</p>
<ul>
<li>
<p>Paragraph</p>
<p>Paragraph</p>
</li>
</ul>
</div>
</li>
</ul>
'''
),
extensions=['admonition']
)
def test_with_big_lists(self):
self.assertMarkdownRenders(
self.dedent(
'''
- List
!!! note "Admontion"
- Paragraph
Paragraph
- Paragraph
paragraph
'''
),
self.dedent(
'''
<ul>
<li>
<p>List</p>
<div class="admonition note">
<p class="admonition-title">Admontion</p>
<ul>
<li>
<p>Paragraph</p>
<p>Paragraph</p>
</li>
<li>
<p>Paragraph</p>
<p>paragraph</p>
</li>
</ul>
</div>
</li>
</ul>
'''
),
extensions=['admonition']
)
def test_with_complex_lists(self):
self.assertMarkdownRenders(
self.dedent(
'''
- List
!!! note "Admontion"
- Paragraph
!!! note "Admontion"
1. Paragraph
Paragraph
'''
),
self.dedent(
'''
<ul>
<li>
<p>List</p>
<div class="admonition note">
<p class="admonition-title">Admontion</p>
<ul>
<li>
<p>Paragraph</p>
<div class="admonition note">
<p class="admonition-title">Admontion</p>
<ol>
<li>
<p>Paragraph</p>
<p>Paragraph</p>
</li>
</ol>
</div>
</li>
</ul>
</div>
</li>
</ul>
'''
),
extensions=['admonition']
)
def test_definition_list(self):
self.assertMarkdownRenders(
self.dedent(
'''
- List
!!! note "Admontion"
Term
: Definition
More text
: Another
definition
Even more text
'''
),
self.dedent(
'''
<ul>
<li>
<p>List</p>
<div class="admonition note">
<p class="admonition-title">Admontion</p>
<dl>
<dt>Term</dt>
<dd>
<p>Definition</p>
<p>More text</p>
</dd>
<dd>
<p>Another
definition</p>
<p>Even more text</p>
</dd>
</dl>
</div>
</li>
</ul>
'''
),
extensions=['admonition', 'def_list']
)
def test_with_preceding_text(self):
self.assertMarkdownRenders(
self.dedent(
'''
foo
**foo**
!!! note "Admonition"
'''
),
self.dedent(
'''
<p>foo
<strong>foo</strong></p>
<div class="admonition note">
<p class="admonition-title">Admonition</p>
</div>
'''
),
extensions=['admonition']
)
def test_admontion_detabbing(self):
self.assertMarkdownRenders(
self.dedent(
'''
!!! note "Admonition"
- Parent 1
- Child 1
- Child 2
'''
),
self.dedent(
'''
<div class="admonition note">
<p class="admonition-title">Admonition</p>
<ul>
<li>
<p>Parent 1</p>
<ul>
<li>Child 1</li>
<li>Child 2</li>
</ul>
</li>
</ul>
</div>
'''
),
extensions=['admonition']
)
def test_admonition_first_indented(self):
self.assertMarkdownRenders(
self.dedent(
'''
!!! danger "This is not"
one long admonition title
'''
),
self.dedent(
'''
<div class="admonition danger">
<p class="admonition-title">This is not</p>
<pre><code>one long admonition title
</code></pre>
</div>
'''
),
extensions=['admonition']
)
| TestAdmonition |
python | pytorch__pytorch | benchmarks/fastrnns/custom_lstms.py | {
"start": 6928,
"end": 7486
} | class ____(jit.ScriptModule):
def __init__(self, cell, *cell_args):
super().__init__()
self.cell = cell(*cell_args)
@jit.script_method
def forward(
self, input: Tensor, state: tuple[Tensor, Tensor]
) -> tuple[Tensor, tuple[Tensor, Tensor]]:
inputs = reverse(input.unbind(0))
outputs = jit.annotate(list[Tensor], [])
for i in range(len(inputs)):
out, state = self.cell(inputs[i], state)
outputs += [out]
return torch.stack(reverse(outputs)), state
| ReverseLSTMLayer |
python | MongoEngine__mongoengine | mongoengine/queryset/field_list.py | {
"start": 32,
"end": 2964
} | class ____:
"""Object that handles combinations of .only() and .exclude() calls"""
ONLY = 1
EXCLUDE = 0
def __init__(
self, fields=None, value=ONLY, always_include=None, _only_called=False
):
"""The QueryFieldList builder
:param fields: A list of fields used in `.only()` or `.exclude()`
:param value: How to handle the fields; either `ONLY` or `EXCLUDE`
:param always_include: Any fields to always_include eg `_cls`
:param _only_called: Has `.only()` been called? If so its a set of fields
otherwise it performs a union.
"""
self.value = value
self.fields = set(fields or [])
self.always_include = set(always_include or [])
self._id = None
self._only_called = _only_called
self.slice = {}
def __add__(self, f):
if isinstance(f.value, dict):
for field in f.fields:
self.slice[field] = f.value
if not self.fields:
self.fields = f.fields
elif not self.fields:
self.fields = f.fields
self.value = f.value
self.slice = {}
elif self.value is self.ONLY and f.value is self.ONLY:
self._clean_slice()
if self._only_called:
self.fields = self.fields.union(f.fields)
else:
self.fields = f.fields
elif self.value is self.EXCLUDE and f.value is self.EXCLUDE:
self.fields = self.fields.union(f.fields)
self._clean_slice()
elif self.value is self.ONLY and f.value is self.EXCLUDE:
self.fields -= f.fields
self._clean_slice()
elif self.value is self.EXCLUDE and f.value is self.ONLY:
self.value = self.ONLY
self.fields = f.fields - self.fields
self._clean_slice()
if "_id" in f.fields:
self._id = f.value
if self.always_include:
if self.value is self.ONLY and self.fields:
if sorted(self.slice.keys()) != sorted(self.fields):
self.fields = self.fields.union(self.always_include)
else:
self.fields -= self.always_include
if getattr(f, "_only_called", False):
self._only_called = True
return self
def __bool__(self):
return bool(self.fields)
def as_dict(self):
field_list = {field: self.value for field in self.fields}
if self.slice:
field_list.update(self.slice)
if self._id is not None:
field_list["_id"] = self._id
return field_list
def reset(self):
self.fields = set()
self.slice = {}
self.value = self.ONLY
def _clean_slice(self):
if self.slice:
for field in set(self.slice.keys()) - self.fields:
del self.slice[field]
| QueryFieldList |
python | doocs__leetcode | solution/0200-0299/0247.Strobogrammatic Number II/Solution.py | {
"start": 0,
"end": 474
} | class ____:
def findStrobogrammatic(self, n: int) -> List[str]:
def dfs(u):
if u == 0:
return ['']
if u == 1:
return ['0', '1', '8']
ans = []
for v in dfs(u - 2):
for l, r in ('11', '88', '69', '96'):
ans.append(l + v + r)
if u != n:
ans.append('0' + v + '0')
return ans
return dfs(n)
| Solution |
python | django__django | tests/generic_relations_regress/models.py | {
"start": 3735,
"end": 3982
} | class ____(models.Model):
flag = models.BooleanField(null=True)
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey("content_type", "object_id")
| A |
python | aimacode__aima-python | probability.py | {
"start": 5226,
"end": 6556
} | class ____:
"""Bayesian network containing only boolean-variable nodes."""
def __init__(self, node_specs=None):
"""Nodes must be ordered with parents before children."""
self.nodes = []
self.variables = []
node_specs = node_specs or []
for node_spec in node_specs:
self.add(node_spec)
def add(self, node_spec):
"""Add a node to the net. Its parents must already be in the
net, and its variable must not."""
node = BayesNode(*node_spec)
assert node.variable not in self.variables
assert all((parent in self.variables) for parent in node.parents)
self.nodes.append(node)
self.variables.append(node.variable)
for parent in node.parents:
self.variable_node(parent).children.append(node)
def variable_node(self, var):
"""Return the node for the variable named var.
>>> burglary.variable_node('Burglary').variable
'Burglary'"""
for n in self.nodes:
if n.variable == var:
return n
raise Exception("No such variable: {}".format(var))
def variable_values(self, var):
"""Return the domain of var."""
return [True, False]
def __repr__(self):
return 'BayesNet({0!r})'.format(self.nodes)
| BayesNet |
python | streamlit__streamlit | lib/streamlit/elements/lib/column_types.py | {
"start": 5135,
"end": 5388
} | class ____(TypedDict):
type: Literal["time"]
format: NotRequired[str | Literal["localized", "iso8601"] | None]
min_value: NotRequired[str | None]
max_value: NotRequired[str | None]
step: NotRequired[int | float | None]
| TimeColumnConfig |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/dataflow.py | {
"start": 2180,
"end": 8066
} | class ____:
"""
Dataflow configuration for BeamRunJavaPipelineOperator and BeamRunPythonPipelineOperator.
.. seealso::
:class:`~airflow.providers.apache.beam.operators.beam.BeamRunJavaPipelineOperator`
and :class:`~airflow.providers.apache.beam.operators.beam.BeamRunPythonPipelineOperator`.
:param job_name: The 'jobName' to use when executing the Dataflow job
(templated). This ends up being set in the pipeline options, so any entry
with key ``'jobName'`` or ``'job_name'``in ``options`` will be overwritten.
:param append_job_name: True if unique suffix has to be appended to job name.
:param project_id: Optional, the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param location: Job location.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param poll_sleep: The time in seconds to sleep between polling Google
Cloud Platform for the dataflow job status while the job is in the
JOB_STATE_RUNNING state.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
.. warning::
This option requires Apache Beam 2.39.0 or newer.
:param drain_pipeline: Optional, set to True if want to stop streaming job by draining it
instead of canceling during killing task instance. See:
https://cloud.google.com/dataflow/docs/guides/stopping-a-pipeline
:param cancel_timeout: How long (in seconds) operator should wait for the pipeline to be
successfully cancelled when task is being killed. (optional) default to 300s
:param wait_until_finished: (Optional)
If True, wait for the end of pipeline execution before exiting.
If False, only submits job.
If None, default behavior.
The default behavior depends on the type of pipeline:
* for the streaming pipeline, wait for jobs to start,
* for the batch pipeline, wait for the jobs to complete.
.. warning::
You cannot call ``PipelineResult.wait_until_finish`` method in your pipeline code for the operator
to work properly. i. e. you must use asynchronous execution. Otherwise, your pipeline will
always wait until finished. For more information, look at:
`Asynchronous execution
<https://cloud.google.com/dataflow/docs/guides/specifying-exec-params#python_10>`__
The process of starting the Dataflow job in Airflow consists of two steps:
* running a subprocess and reading the stderr/stderr log for the job id.
* loop waiting for the end of the job ID from the previous step by checking its status.
Step two is started just after step one has finished, so if you have wait_until_finished in your
pipeline code, step two will not start until the process stops. When this process stops,
steps two will run, but it will only execute one iteration as the job will be in a terminal state.
If you in your pipeline do not call the wait_for_pipeline method but pass wait_until_finish=True
to the operator, the second loop will wait for the job's terminal state.
If you in your pipeline do not call the wait_for_pipeline method, and pass wait_until_finish=False
to the operator, the second loop will check once is job not in terminal state and exit the loop.
:param multiple_jobs: If pipeline creates multiple jobs then monitor all jobs. Supported only by
:class:`~airflow.providers.apache.beam.operators.beam.BeamRunJavaPipelineOperator`.
:param check_if_running: Before running job, validate that a previous run is not in process.
Supported only by:
:class:`~airflow.providers.apache.beam.operators.beam.BeamRunJavaPipelineOperator`.
:param service_account: Run the job as a specific service account, instead of the default GCE robot.
"""
template_fields: Sequence[str] = ("job_name", "location")
def __init__(
self,
*,
job_name: str | None = None,
append_job_name: bool = True,
project_id: str = PROVIDE_PROJECT_ID,
location: str | None = DEFAULT_DATAFLOW_LOCATION,
gcp_conn_id: str = "google_cloud_default",
poll_sleep: int = 10,
impersonation_chain: str | Sequence[str] | None = None,
drain_pipeline: bool = False,
cancel_timeout: int | None = 5 * 60,
wait_until_finished: bool | None = None,
multiple_jobs: bool | None = None,
check_if_running: CheckJobRunning = CheckJobRunning.WaitForRun,
service_account: str | None = None,
) -> None:
self.job_name = job_name
self.append_job_name = append_job_name
self.project_id = project_id
self.location = location
self.gcp_conn_id = gcp_conn_id
self.poll_sleep = poll_sleep
self.impersonation_chain = impersonation_chain
self.drain_pipeline = drain_pipeline
self.cancel_timeout = cancel_timeout
self.wait_until_finished = wait_until_finished
self.multiple_jobs = multiple_jobs
self.check_if_running = check_if_running
self.service_account = service_account
| DataflowConfiguration |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/execution/plan/handle.py | {
"start": 2406,
"end": 3532
} | class ____(
NamedTuple(
"_ResolvedFromDynamicStepHandle",
[("node_handle", NodeHandle), ("mapping_key", str), ("key", str)],
)
):
"""A reference to an ExecutionStep that came from resolving an UnresolvedMappedExecutionStep
(and associated UnresolvedStepHandle) downstream of a dynamic output after it has
completed successfully.
"""
def __new__(cls, node_handle: NodeHandle, mapping_key: str, key: Optional[str] = None):
return super().__new__(
cls,
node_handle=check.inst_param(node_handle, "node_handle", NodeHandle),
mapping_key=check.str_param(mapping_key, "mapping_key"),
# mypy can't tell that if default is set, this is guaranteed to be a str
key=cast(
"str",
check.opt_str_param(key, "key", default=f"{node_handle}[{mapping_key}]"),
),
)
def to_key(self) -> str:
return self.key
@property
def unresolved_form(self) -> UnresolvedStepHandle:
return UnresolvedStepHandle(node_handle=self.node_handle)
| ResolvedFromDynamicStepHandle |
python | pypa__pipenv | pipenv/patched/pip/_vendor/distlib/database.py | {
"start": 11313,
"end": 16293
} | class ____(object):
"""
A base class for distributions, whether installed or from indexes.
Either way, it must have some metadata, so that's all that's needed
for construction.
"""
build_time_dependency = False
"""
Set to True if it's known to be only a build-time dependency (i.e.
not needed after installation).
"""
requested = False
"""A boolean that indicates whether the ``REQUESTED`` metadata file is
present (in other words, whether the package was installed by user
request or it was installed as a dependency)."""
def __init__(self, metadata):
"""
Initialise an instance.
:param metadata: The instance of :class:`Metadata` describing this
distribution.
"""
self.metadata = metadata
self.name = metadata.name
self.key = self.name.lower() # for case-insensitive comparisons
self.version = metadata.version
self.locator = None
self.digest = None
self.extras = None # additional features requested
self.context = None # environment marker overrides
self.download_urls = set()
self.digests = {}
@property
def source_url(self):
"""
The source archive download URL for this distribution.
"""
return self.metadata.source_url
download_url = source_url # Backward compatibility
@property
def name_and_version(self):
"""
A utility property which displays the name and version in parentheses.
"""
return '%s (%s)' % (self.name, self.version)
@property
def provides(self):
"""
A set of distribution names and versions provided by this distribution.
:return: A set of "name (version)" strings.
"""
plist = self.metadata.provides
s = '%s (%s)' % (self.name, self.version)
if s not in plist:
plist.append(s)
return plist
def _get_requirements(self, req_attr):
md = self.metadata
reqts = getattr(md, req_attr)
logger.debug('%s: got requirements %r from metadata: %r', self.name, req_attr, reqts)
return set(md.get_requirements(reqts, extras=self.extras, env=self.context))
@property
def run_requires(self):
return self._get_requirements('run_requires')
@property
def meta_requires(self):
return self._get_requirements('meta_requires')
@property
def build_requires(self):
return self._get_requirements('build_requires')
@property
def test_requires(self):
return self._get_requirements('test_requires')
@property
def dev_requires(self):
return self._get_requirements('dev_requires')
def matches_requirement(self, req):
"""
Say if this instance matches (fulfills) a requirement.
:param req: The requirement to match.
:rtype req: str
:return: True if it matches, else False.
"""
# Requirement may contain extras - parse to lose those
# from what's passed to the matcher
r = parse_requirement(req)
scheme = get_scheme(self.metadata.scheme)
try:
matcher = scheme.matcher(r.requirement)
except UnsupportedVersionError:
# XXX compat-mode if cannot read the version
logger.warning('could not read version %r - using name only', req)
name = req.split()[0]
matcher = scheme.matcher(name)
name = matcher.key # case-insensitive
result = False
for p in self.provides:
p_name, p_ver = parse_name_and_version(p)
if p_name != name:
continue
try:
result = matcher.match(p_ver)
break
except UnsupportedVersionError:
pass
return result
def __repr__(self):
"""
Return a textual representation of this instance,
"""
if self.source_url:
suffix = ' [%s]' % self.source_url
else:
suffix = ''
return '<Distribution %s (%s)%s>' % (self.name, self.version, suffix)
def __eq__(self, other):
"""
See if this distribution is the same as another.
:param other: The distribution to compare with. To be equal to one
another. distributions must have the same type, name,
version and source_url.
:return: True if it is the same, else False.
"""
if type(other) is not type(self):
result = False
else:
result = (self.name == other.name and self.version == other.version and self.source_url == other.source_url)
return result
def __hash__(self):
"""
Compute hash in a way which matches the equality test.
"""
return hash(self.name) + hash(self.version) + hash(self.source_url)
| Distribution |
python | scipy__scipy | scipy/stats/_continuous_distns.py | {
"start": 61690,
"end": 64639
} | class ____(rv_continuous):
r"""An exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `expon` is:
.. math::
f(x) = \exp(-x)
for :math:`x \ge 0`.
%(after_notes)s
A common parameterization for `expon` is in terms of the rate parameter
``lambda``, such that ``pdf = lambda * exp(-lambda * x)``. This
parameterization corresponds to using ``scale = 1 / lambda``.
The exponential distribution is a special case of the gamma
distributions, with gamma shape parameter ``a = 1``.
%(example)s
"""
def _shape_info(self):
return []
def _rvs(self, size=None, random_state=None):
return random_state.standard_exponential(size)
def _pdf(self, x):
# expon.pdf(x) = exp(-x)
return np.exp(-x)
def _logpdf(self, x):
return -x
def _cdf(self, x):
return -sc.expm1(-x)
def _ppf(self, q):
return -sc.log1p(-q)
def _sf(self, x):
return np.exp(-x)
def _logsf(self, x):
return -x
def _isf(self, q):
return -np.log(q)
def _stats(self):
return 1.0, 1.0, 2.0, 6.0
def _entropy(self):
return 1.0
@_call_super_mom
@replace_notes_in_docstring(rv_continuous, notes="""\
When `method='MLE'`,
this function uses explicit formulas for the maximum likelihood
estimation of the exponential distribution parameters, so the
`optimizer`, `loc` and `scale` keyword arguments are
ignored.\n\n""")
def fit(self, data, *args, **kwds):
if len(args) > 0:
raise TypeError("Too many arguments.")
floc = kwds.pop('floc', None)
fscale = kwds.pop('fscale', None)
_remove_optimizer_parameters(kwds)
if floc is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
data = np.asarray(data)
if not np.isfinite(data).all():
raise ValueError("The data contains non-finite values.")
data_min = data.min()
if floc is None:
# ML estimate of the location is the minimum of the data.
loc = data_min
else:
loc = floc
if data_min < loc:
# There are values that are less than the specified loc.
raise FitDataError("expon", lower=floc, upper=np.inf)
if fscale is None:
# ML estimate of the scale is the shifted mean.
scale = data.mean() - loc
else:
scale = fscale
# We expect the return values to be floating point, so ensure it
# by explicitly converting to float.
return float(loc), float(scale)
expon = expon_gen(a=0.0, name='expon')
| expon_gen |
python | ray-project__ray | python/ray/dashboard/modules/job/tests/test_job_manager_standalone.py | {
"start": 321,
"end": 2600
} | class ____:
"""NOTE: PLEASE READ CAREFULLY BEFORE MODIFYING
This test is extracted into a standalone module such that it can bootstrap its own
(standalone) Ray cluster while avoiding affecting the shared one used by other
JobManager tests
"""
@pytest.mark.parametrize(
"tracing_enabled",
[
False,
# TODO(issues/38633): local code loading is broken when tracing is enabled
# True,
],
)
async def test_user_provided_job_config_honored_by_worker(
self, tracing_enabled, tmp_path
):
"""Ensures that the JobConfig instance injected into ray.init in the driver
script is honored even in case when job is submitted via JobManager.submit_job
API (involving RAY_JOB_CONFIG_JSON_ENV_VAR being set in child process env)
"""
if tracing_enabled:
tracing_startup_hook = (
"ray.util.tracing.setup_local_tmp_tracing:setup_tracing"
)
else:
tracing_startup_hook = None
with create_ray_cluster(_tracing_startup_hook=tracing_startup_hook) as cluster:
job_manager = create_job_manager(cluster, tmp_path)
driver_script_path = _driver_script_path(
"check_code_search_path_is_propagated.py"
)
job_id = await job_manager.submit_job(
entrypoint=f"python {driver_script_path}",
# NOTE: We inject runtime_env in here, but also specify the JobConfig in
# the driver script: settings to JobConfig (other than the
# runtime_env) passed in via ray.init(...) have to be respected
# along with the runtime_env passed from submit_job API
runtime_env={"env_vars": {"TEST_SUBPROCESS_RANDOM_VAR": "0xDEEDDEED"}},
)
await async_wait_for_condition(
check_job_succeeded, job_manager=job_manager, job_id=job_id
)
logs = job_manager.get_job_logs(job_id)
assert "Code search path is propagated" in logs, logs
assert "0xDEEDDEED" in logs, logs
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| TestRuntimeEnvStandalone |
python | cython__cython | Cython/Compiler/Tests/TestTreeFragment.py | {
"start": 160,
"end": 2155
} | class ____(CythonTest):
def test_basic(self):
F = self.fragment("x = 4")
T = F.copy()
self.assertCode("x = 4", T)
def test_copy_is_taken(self):
F = self.fragment("if True: x = 4")
T1 = F.root
T2 = F.copy()
self.assertEqual("x", T2.stats[0].if_clauses[0].body.lhs.name)
T2.stats[0].if_clauses[0].body.lhs.name = "other"
self.assertEqual("x", T1.stats[0].if_clauses[0].body.lhs.name)
def test_substitutions_are_copied(self):
T = self.fragment("y + y").substitute({"y": NameNode(pos=None, name="x")})
self.assertEqual("x", T.stats[0].expr.operand1.name)
self.assertEqual("x", T.stats[0].expr.operand2.name)
self.assertTrue(T.stats[0].expr.operand1 is not T.stats[0].expr.operand2)
def test_substitution(self):
F = self.fragment("x = 4")
y = NameNode(pos=None, name="y")
T = F.substitute({"x" : y})
self.assertCode("y = 4", T)
def test_exprstat(self):
F = self.fragment("PASS")
pass_stat = PassStatNode(pos=None)
T = F.substitute({"PASS" : pass_stat})
self.assertTrue(isinstance(T.stats[0], PassStatNode), T)
def test_pos_is_transferred(self):
F = self.fragment("""
x = y
x = u * v ** w
""")
T = F.substitute({"v" : NameNode(pos=None, name="a")})
v = F.root.stats[1].rhs.operand2.operand1
a = T.stats[1].rhs.operand2.operand1
self.assertEqual(v.pos, a.pos)
def test_temps(self):
TemplateTransform.temp_name_counter = 0
F = self.fragment("""
TMP
x = TMP
""")
T = F.substitute(temps=["TMP"])
s = T.body.stats
self.assertTrue(isinstance(s[0].expr, TempRefNode))
self.assertTrue(isinstance(s[1].rhs, TempRefNode))
self.assertTrue(s[0].expr.handle is s[1].rhs.handle)
if __name__ == "__main__":
import unittest
unittest.main()
| TestTreeFragments |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-oci-data-science/tests/test_oci_data_science_utils.py | {
"start": 5322,
"end": 6916
} | class ____:
"""Unit tests for _from_token_logprob_dicts function."""
def test_conversion(self):
"""Ensures multiple token logprobs are converted correctly."""
token_logprob_dicts = [
{
"token": "Hello",
"logprob": -0.1,
"top_logprobs": [
{"token": "Hello", "logprob": -0.1, "bytes": [1, 2, 3]},
{"token": "Hi", "logprob": -1.0, "bytes": [1, 2, 3]},
],
},
{
"token": "world",
"logprob": -0.2,
"top_logprobs": [
{"token": "world", "logprob": -0.2, "bytes": [2, 3, 4]},
{"token": "earth", "logprob": -1.2, "bytes": [2, 3, 4]},
],
},
]
expected_result = [
[
LogProb(token="Hello", logprob=-0.1, bytes=[1, 2, 3]),
LogProb(token="Hi", logprob=-1.0, bytes=[1, 2, 3]),
],
[
LogProb(token="world", logprob=-0.2, bytes=[2, 3, 4]),
LogProb(token="earth", logprob=-1.2, bytes=[2, 3, 4]),
],
]
result = _from_token_logprob_dicts(token_logprob_dicts)
assert result == expected_result
def test_empty_input(self):
"""Ensures function returns empty list when input is empty."""
token_logprob_dicts = []
expected_result = []
result = _from_token_logprob_dicts(token_logprob_dicts)
assert result == expected_result
| TestFromTokenLogprobs |
python | PrefectHQ__prefect | src/integrations/prefect-gitlab/prefect_gitlab/credentials.py | {
"start": 248,
"end": 3167
} | class ____(Block):
"""
Store a GitLab personal access token to interact with private GitLab
repositories.
Attributes:
token: The personal access token to authenticate with GitLab.
url: URL to self-hosted GitLab instances.
Examples:
Load stored GitLab credentials:
```python
from prefect_gitlab import GitLabCredentials
gitlab_credentials_block = GitLabCredentials.load("BLOCK_NAME")
```
"""
_block_type_name = "GitLab Credentials"
_logo_url = "https://images.ctfassets.net/gm98wzqotmnx/55edIimT4g9gbjhkh5a3Sp/dfdb9391d8f45c2e93e72e3a4d350771/gitlab-logo-500.png?h=250"
token: Optional[SecretStr] = Field(
title="Personal Access Token",
default=None,
description="A GitLab Personal Access Token with read_repository scope.",
)
url: Optional[str] = Field(
default=None, title="URL", description="URL to self-hosted GitLab instances."
)
def format_git_credentials(self, url: str) -> str:
"""
Format and return the full git URL with GitLab credentials embedded.
Handles both personal access tokens and deploy tokens correctly:
- Personal access tokens: prefixed with "oauth2:"
- Deploy tokens (username:token format): used as-is
- Already prefixed tokens: not double-prefixed
Args:
url: Repository URL (e.g., "https://gitlab.com/org/repo.git")
Returns:
Complete URL with credentials embedded
Raises:
ValueError: If token is not configured
"""
if not self.token:
raise ValueError("Token is required for GitLab authentication")
token_value = self.token.get_secret_value()
# Deploy token detection: contains ":" but not "oauth2:" prefix
# Deploy tokens should not have oauth2: prefix (GitLab 16.3.4+ rejects them)
# See: https://github.com/PrefectHQ/prefect/issues/10832
if ":" in token_value and not token_value.startswith("oauth2:"):
credentials = token_value
# Personal access token: add oauth2: prefix
# See: https://github.com/PrefectHQ/prefect/issues/16836
elif not token_value.startswith("oauth2:"):
credentials = f"oauth2:{token_value}"
else:
# Already prefixed
credentials = token_value
# Insert credentials into URL
parsed = urlparse(url)
return urlunparse(parsed._replace(netloc=f"{credentials}@{parsed.netloc}"))
def get_client(self) -> Gitlab:
"""
Gets an authenticated GitLab client.
Returns:
An authenticated GitLab client.
"""
# ref: https://python-gitlab.readthedocs.io/en/stable/
gitlab = Gitlab(url=self.url, oauth_token=self.token.get_secret_value())
gitlab.auth()
return gitlab
| GitLabCredentials |
python | spack__spack | lib/spack/spack/vendor/ruamel/yaml/comments.py | {
"start": 22457,
"end": 22722
} | class ____(Sized):
__slots__ = ('_mapping',)
def __init__(self, mapping):
# type: (Any) -> None
self._mapping = mapping
def __len__(self):
# type: () -> int
count = len(self._mapping)
return count
| CommentedMapView |
python | readthedocs__readthedocs.org | readthedocs/builds/migrations/0023_add_status_code.py | {
"start": 149,
"end": 565
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("builds", "0022_migrate_protected_versions"),
]
operations = [
migrations.AddField(
model_name="build",
name="status_code",
field=models.BooleanField(
blank=True, default=None, null=True, verbose_name="Status code"
),
),
]
| Migration |
python | tqdm__tqdm | tqdm/dask.py | {
"start": 178,
"end": 1319
} | class ____(Callback):
"""Dask callback for task progress."""
def __init__(self, start=None, pretask=None, tqdm_class=tqdm_auto,
**tqdm_kwargs):
"""
Parameters
----------
tqdm_class : optional
`tqdm` class to use for bars [default: `tqdm.auto.tqdm`].
tqdm_kwargs : optional
Any other arguments used for all bars.
"""
super().__init__(start=start, pretask=pretask)
if tqdm_kwargs:
tqdm_class = partial(tqdm_class, **tqdm_kwargs)
self.tqdm_class = tqdm_class
def _start_state(self, _, state):
self.pbar = self.tqdm_class(total=sum(
len(state[k]) for k in ['ready', 'waiting', 'running', 'finished']))
def _posttask(self, *_, **__):
self.pbar.update()
def _finish(self, *_, **__):
self.pbar.close()
def display(self):
"""Displays in the current cell in Notebooks."""
container = getattr(self.bar, 'container', None)
if container is None:
return
from .notebook import display
display(container)
| TqdmCallback |
python | ansible__ansible | lib/ansible/galaxy/dependency_resolution/providers.py | {
"start": 1515,
"end": 19310
} | class ____(AbstractProvider):
"""Delegate providing a requirement interface for the resolver."""
def __init__(
self,
apis: MultiGalaxyAPIProxy,
concrete_artifacts_manager: ConcreteArtifactsManager,
preferred_candidates: _c.Iterable[Candidate] | None = None,
with_deps: bool = True,
with_pre_releases: bool = False,
upgrade: bool = False,
include_signatures: bool = True,
) -> None:
r"""Initialize helper attributes.
:param api: An instance of the multiple Galaxy APIs wrapper.
:param concrete_artifacts_manager: An instance of the caching \
concrete artifacts manager.
:param with_deps: A flag specifying whether the resolver \
should attempt to pull-in the deps of the \
requested requirements. On by default.
:param with_pre_releases: A flag specifying whether the \
resolver should skip pre-releases. \
Off by default.
:param upgrade: A flag specifying whether the resolver should \
skip matching versions that are not upgrades. \
Off by default.
:param include_signatures: A flag to determine whether to retrieve \
signatures from the Galaxy APIs and \
include signatures in matching Candidates. \
On by default.
"""
self._api_proxy = apis
self._make_req_from_dict = functools.partial(
Requirement.from_requirement_dict,
art_mgr=concrete_artifacts_manager,
)
self._preferred_candidates = set(preferred_candidates or ())
self._with_deps = with_deps
self._with_pre_releases = with_pre_releases
self._upgrade = upgrade
self._include_signatures = include_signatures
def identify(
self,
requirement_or_candidate: Candidate | Requirement,
) -> str:
"""Given requirement or candidate, return an identifier for it.
This is used to identify a requirement or candidate, e.g.
whether two requirements should have their specifier parts
(version ranges or pins) merged, whether two candidates would
conflict with each other (because they have same name but
different versions).
"""
return requirement_or_candidate.canonical_package_id
def get_preference(
self,
identifier: str,
resolutions: _c.Mapping[str, Candidate],
candidates: _c.Mapping[str, _c.Iterator[Candidate]],
information: _c.Mapping[
str,
_c.Iterator[RequirementInformation[Requirement, Candidate]],
],
backtrack_causes: _c.Sequence[
RequirementInformation[Requirement, Candidate],
],
) -> float | int:
"""Return sort key function return value for given requirement.
This result should be based on preference that is defined as
"I think this requirement should be resolved first".
The lower the return value is, the more preferred this
group of arguments is.
:param identifier: The value returned by ``identify()``.
:param resolutions: Mapping of identifier, candidate pairs.
:param candidates: Possible candidates for the identifier.
Mapping of identifier, list of candidate pairs.
:param information: Requirement information of each package.
Mapping of identifier, list of named tuple pairs.
The named tuples have the entries ``requirement`` and ``parent``.
:param backtrack_causes: Sequence of requirement information that were
the requirements that caused the resolver to most recently backtrack.
The preference could depend on various of issues, including
(not necessarily in this order):
* Is this package pinned in the current resolution result?
* How relaxed is the requirement? Stricter ones should
probably be worked on first? (I don't know, actually.)
* How many possibilities are there to satisfy this
requirement? Those with few left should likely be worked on
first, I guess?
* Are there any known conflicts for this requirement?
We should probably work on those with the most
known conflicts.
A sortable value should be returned (this will be used as the
`key` parameter of the built-in sorting function). The smaller
the value is, the more preferred this requirement is (i.e. the
sorting function is called with ``reverse=False``).
"""
if any(
candidate in self._preferred_candidates
for candidate in candidates
):
# NOTE: Prefer pre-installed candidates over newer versions
# NOTE: available from Galaxy or other sources.
return float('-inf')
return len(candidates)
def find_matches(
self,
identifier: str,
requirements: _c.Mapping[str, _c.Iterator[Requirement]],
incompatibilities: _c.Mapping[str, _c.Iterator[Candidate]],
) -> list[Candidate]:
r"""Find all possible candidates satisfying given requirements.
This tries to get candidates based on the requirements' types.
For concrete requirements (SCM, dir, namespace dir, local or
remote archives), the one-and-only match is returned
For a "named" requirement, Galaxy-compatible APIs are consulted
to find concrete candidates for this requirement. If there's a
pre-installed candidate, it's prepended in front of others.
"""
return [
match for match in self._find_matches(list(requirements[identifier]))
if not any(match.ver == incompat.ver for incompat in incompatibilities[identifier])
]
def _find_matches(self, requirements: list[Requirement]) -> list[Candidate]:
# FIXME: The first requirement may be a Git repo followed by
# FIXME: its cloned tmp dir. Using only the first one creates
# FIXME: loops that prevent any further dependency exploration.
# FIXME: We need to figure out how to prevent this.
first_req = requirements[0]
fqcn = first_req.fqcn
# The fqcn is guaranteed to be the same
version_req = "A SemVer-compliant version or '*' is required. See https://semver.org to learn how to compose it correctly. "
version_req += "This is an issue with the collection."
# If we're upgrading collections, we can't calculate preinstalled_candidates until the latest matches are found.
# Otherwise, we can potentially avoid a Galaxy API call by doing this first.
preinstalled_candidates = set()
if not self._upgrade and first_req.type == 'galaxy':
preinstalled_candidates = {
candidate for candidate in self._preferred_candidates
if candidate.fqcn == fqcn and
all(self.is_satisfied_by(requirement, candidate) for requirement in requirements)
}
try:
coll_versions: _c.Iterable[tuple[str, GalaxyAPI]] = (
[] if preinstalled_candidates
else self._api_proxy.get_collection_versions(first_req)
)
except TypeError as exc:
if first_req.is_concrete_artifact:
# Non hashable versions will cause a TypeError
raise ValueError(
f"Invalid version found for the collection '{first_req}'. {version_req}"
) from exc
# Unexpected error from a Galaxy server
raise
if first_req.is_concrete_artifact:
# FIXME: do we assume that all the following artifacts are also concrete?
# FIXME: does using fqcn==None cause us problems here?
# Ensure the version found in the concrete artifact is SemVer-compliant
for version, req_src in coll_versions:
version_err = f"Invalid version found for the collection '{first_req}': {version} ({type(version)}). {version_req}"
# NOTE: The known cases causing the version to be a non-string object come from
# NOTE: the differences in how the YAML parser normalizes ambiguous values and
# NOTE: how the end-users sometimes expect them to be parsed. Unless the users
# NOTE: explicitly use the double quotes of one of the multiline string syntaxes
# NOTE: in the collection metadata file, PyYAML will parse a value containing
# NOTE: two dot-separated integers as `float`, a single integer as `int`, and 3+
# NOTE: integers as a `str`. In some cases, they may also use an empty value
# NOTE: which is normalized as `null` and turned into `None` in the Python-land.
# NOTE: Another known mistake is setting a minor part of the SemVer notation
# NOTE: skipping the "patch" bit like "1.0" which is assumed non-compliant even
# NOTE: after the conversion to string.
if not isinstance(version, str):
raise ValueError(version_err)
elif version != '*':
try:
SemanticVersion(version)
except ValueError as ex:
raise ValueError(version_err) from ex
return [
Candidate(fqcn, version, _none_src_server, first_req.type, None)
for version, _none_src_server in coll_versions
]
latest_matches = []
signatures = []
extra_signature_sources: list[str] = []
discarding_pre_releases_acceptable = any(
not is_pre_release(candidate_version)
for candidate_version, _src_server in coll_versions
)
# NOTE: The optimization of conditionally looping over the requirements
# NOTE: is used to skip having to compute the pinned status of all
# NOTE: requirements and apply version normalization to the found ones.
all_pinned_requirement_version_numbers = {
# NOTE: Pinned versions can start with a number, but also with an
# NOTE: equals sign. Stripping it at the beginning should be
# NOTE: enough. If there's a space after equals, the second strip
# NOTE: will take care of it.
# NOTE: Without this conversion, requirements versions like
# NOTE: '1.2.3-alpha.4' work, but '=1.2.3-alpha.4' don't.
requirement.ver.lstrip('=').strip()
for requirement in requirements
if requirement.is_pinned
} if discarding_pre_releases_acceptable else set()
for version, src_server in coll_versions:
tmp_candidate = Candidate(fqcn, version, src_server, 'galaxy', None)
for requirement in requirements:
candidate_satisfies_requirement = self.is_satisfied_by(
requirement, tmp_candidate,
)
if not candidate_satisfies_requirement:
break
should_disregard_pre_release_candidate = (
# NOTE: Do not discard pre-release candidates in the
# NOTE: following cases:
# NOTE: * the end-user requested pre-releases explicitly;
# NOTE: * the candidate is a concrete artifact (e.g. a
# NOTE: Git repository, subdirs, a tarball URL, or a
# NOTE: local dir or file etc.);
# NOTE: * the candidate's pre-release version exactly
# NOTE: matches a version specifically requested by one
# NOTE: of the requirements in the current match
# NOTE: discovery round (i.e. matching a requirement
# NOTE: that is not a range but an explicit specific
# NOTE: version pin). This works when some requirements
# NOTE: request version ranges but others (possibly on
# NOTE: different dependency tree level depths) demand
# NOTE: pre-release dependency versions, even if those
# NOTE: dependencies are transitive.
is_pre_release(tmp_candidate.ver)
and discarding_pre_releases_acceptable
and not (
self._with_pre_releases
or tmp_candidate.is_concrete_artifact
or version in all_pinned_requirement_version_numbers
)
)
if should_disregard_pre_release_candidate:
break
# FIXME
# candidate_is_from_requested_source = (
# requirement.src is None # if this is true for some candidates but not all it will break key param - Nonetype can't be compared to str
# or requirement.src == candidate.src
# )
# if not candidate_is_from_requested_source:
# break
if not self._include_signatures:
continue
extra_signature_sources.extend(requirement.signature_sources or [])
else: # candidate satisfies requirements, `break` never happened
if self._include_signatures:
for extra_source in extra_signature_sources:
signatures.append(get_signature_from_source(extra_source))
latest_matches.append(
Candidate(fqcn, version, src_server, 'galaxy', frozenset(signatures))
)
latest_matches.sort(
key=lambda candidate: (
SemanticVersion(candidate.ver), candidate.src,
),
reverse=True, # prefer newer versions over older ones
)
if not preinstalled_candidates:
preinstalled_candidates = {
candidate for candidate in self._preferred_candidates
if candidate.fqcn == fqcn and
(
# check if an upgrade is necessary
all(self.is_satisfied_by(requirement, candidate) for requirement in requirements) and
(
not self._upgrade or
# check if an upgrade is preferred
all(SemanticVersion(latest.ver) <= SemanticVersion(candidate.ver) for latest in latest_matches)
)
)
}
return list(preinstalled_candidates) + latest_matches
def is_satisfied_by(
self,
requirement: Requirement,
candidate: Candidate,
) -> bool:
r"""Whether the given requirement is satisfiable by a candidate.
:param requirement: A requirement that produced the `candidate`.
:param candidate: A pinned candidate supposedly matching the \
`requirement` specifier. It is guaranteed to \
have been generated from the `requirement`.
:returns: Indication whether the `candidate` is a viable \
solution to the `requirement`.
"""
# NOTE: This is a set of Pipenv-inspired optimizations. Ref:
# https://github.com/sarugaku/passa/blob/2ac00f1/src/passa/models/providers.py#L58-L74
if (
requirement.is_virtual or
candidate.is_virtual or
requirement.ver == '*'
):
return True
return meets_requirements(
version=candidate.ver,
requirements=requirement.ver,
)
def get_dependencies(self, candidate: Candidate) -> list[Requirement]:
r"""Get direct dependencies of a candidate.
:returns: A collection of requirements that `candidate` \
specifies as its dependencies.
"""
# FIXME: If there's several galaxy servers set, there may be a
# FIXME: situation when the metadata of the same collection
# FIXME: differs. So how do we resolve this case? Priority?
# FIXME: Taking into account a pinned hash? Exploding on
# FIXME: any differences?
# NOTE: The underlying implementation currently uses first found
req_map = self._api_proxy.get_collection_dependencies(candidate)
# NOTE: This guard expression MUST perform an early exit only
# NOTE: after the `get_collection_dependencies()` call because
# NOTE: internally it populates the artifact URL of the candidate,
# NOTE: its SHA hash and the Galaxy API token. These are still
# NOTE: necessary with `--no-deps` because even with the disabled
# NOTE: dependency resolution the outer layer will still need to
# NOTE: know how to download and validate the artifact.
#
# NOTE: Virtual candidates should always return dependencies
# NOTE: because they are ephemeral and non-installable.
if not self._with_deps and not candidate.is_virtual:
return []
return [
self._make_req_from_dict({'name': dep_name, 'version': dep_req})
for dep_name, dep_req in req_map.items()
]
| CollectionDependencyProvider |
python | pytest-dev__pytest | testing/example_scripts/unittest/test_setup_skip_module.py | {
"start": 229,
"end": 297
} | class ____(unittest.TestCase):
def test(self):
assert 0
| Base |
python | apache__airflow | providers/edge3/tests/unit/edge3/cli/test_dataclasses.py | {
"start": 1031,
"end": 1255
} | class ____:
def test_maintenance_marker_json(self):
marker = MaintenanceMarker(maintenance="maintenance", comments="comments")
assert marker == MaintenanceMarker.from_json(marker.json)
| TestMaintenanceMarker |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/class_interval.py | {
"start": 3642,
"end": 3725
} | class ____:
def foo(self, x):
return _test_source() # Interval: [3,4]
| B8 |
python | huggingface__transformers | src/transformers/models/maskformer/modeling_maskformer_swin.py | {
"start": 13421,
"end": 18071
} | class ____(nn.Module):
def __init__(self, config, dim, num_heads, window_size):
super().__init__()
if dim % num_heads != 0:
raise ValueError(
f"The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})"
)
self.num_attention_heads = num_heads
self.attention_head_size = int(dim / num_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.window_size = (
window_size if isinstance(window_size, collections.abc.Iterable) else (window_size, window_size)
)
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1), num_heads)
)
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(meshgrid([coords_h, coords_w], indexing="ij"))
coords_flatten = torch.flatten(coords, 1)
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]
relative_coords = relative_coords.permute(1, 2, 0).contiguous()
relative_coords[:, :, 0] += self.window_size[0] - 1
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1)
self.register_buffer("relative_position_index", relative_position_index)
self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
) -> tuple[torch.Tensor]:
batch_size, dim, num_channels = hidden_states.shape
hidden_shape = (batch_size, dim, -1, self.attention_head_size)
query_layer = self.query(hidden_states).view(hidden_shape).transpose(1, 2)
key_layer = self.key(hidden_states).view(hidden_shape).transpose(1, 2)
value_layer = self.value(hidden_states).view(hidden_shape).transpose(1, 2)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)]
relative_position_bias = relative_position_bias.view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1
)
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous()
attention_scores = attention_scores + relative_position_bias.unsqueeze(0)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in MaskFormerSwinModel forward() function)
mask_shape = attention_mask.shape[0]
attention_scores = attention_scores.view(
batch_size // mask_shape, mask_shape, self.num_attention_heads, dim, dim
)
attention_scores = attention_scores + attention_mask.unsqueeze(1).unsqueeze(0)
attention_scores = attention_scores.view(-1, self.num_attention_heads, dim, dim)
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
# Copied from transformers.models.swin.modeling_swin.SwinSelfOutput with Swin->MaskFormerSwin
| MaskFormerSwinSelfAttention |
python | pytorch__pytorch | torch/cuda/memory.py | {
"start": 1079,
"end": 1238
} | class ____(TypedDict):
"""Memory block information."""
size: int
requested_size: int
address: int
state: str
frames: list[_Frame]
| _Block |
python | realpython__materials | pyqt-calculator-tutorial/pycalc/pycalc.py | {
"start": 2262,
"end": 3588
} | class ____:
"""PyCalc's controller class."""
def __init__(self, model, view):
self._evaluate = model
self._view = view
self._connectSignalsAndSlots()
def _calculateResult(self):
result = self._evaluate(expression=self._view.displayText())
self._view.setDisplayText(result)
def _buildExpression(self, subExpression):
if self._view.displayText() == ERROR_MSG:
self._view.clearDisplay()
expression = self._view.displayText() + subExpression
self._view.setDisplayText(expression)
def _connectSignalsAndSlots(self):
for keySymbol, button in self._view.buttonMap.items():
if keySymbol not in {"=", "C"}:
button.clicked.connect(
partial(self._buildExpression, keySymbol)
)
self._view.buttonMap["="].clicked.connect(self._calculateResult)
self._view.display.returnPressed.connect(self._calculateResult)
self._view.buttonMap["C"].clicked.connect(self._view.clearDisplay)
def main():
"""PyCalc's main function."""
pycalcApp = QApplication([])
pycalcWindow = PyCalcWindow()
pycalcWindow.show()
PyCalc(model=evaluateExpression, view=pycalcWindow)
sys.exit(pycalcApp.exec())
if __name__ == "__main__":
main()
| PyCalc |
python | chroma-core__chroma | chromadb/segment/impl/vector/hnsw_params.py | {
"start": 2485,
"end": 3162
} | class ____(HnswParams):
batch_size: int
sync_threshold: int
def __init__(self, metadata: Metadata):
super().__init__(metadata)
self.batch_size = int(metadata.get("hnsw:batch_size", 100))
self.sync_threshold = int(metadata.get("hnsw:sync_threshold", 1000))
@staticmethod
def extract(metadata: Metadata) -> Metadata:
"""Returns only the relevant hnsw params"""
all_validators = {**param_validators, **persistent_param_validators}
segment_metadata = PersistentHnswParams._select(metadata)
PersistentHnswParams._validate(segment_metadata, all_validators)
return segment_metadata
| PersistentHnswParams |
python | numba__numba | numba/tests/test_dictobject.py | {
"start": 34652,
"end": 35488
} | class ____(TestCase, DictIterableCtor):
def setUp(self):
self.jit_enabled = True
def test_exception_no_iterable_arg(self):
@njit
def ctor():
return Dict(3)
msg = ".*No implementation of function.*"
with self.assertRaisesRegex(TypingError, msg):
ctor()
def test_exception_dict_mapping(self):
@njit
def ctor():
return Dict({1: 2, 3: 4})
msg = ".*No implementation of function.*"
with self.assertRaisesRegex(TypingError, msg):
ctor()
def test_exception_setitem(self):
@njit
def ctor():
return Dict(((1, 'a'), (2, 'b', 3)))
msg = ".*No implementation of function.*"
with self.assertRaisesRegex(TypingError, msg):
ctor()
| TestDictIterableCtorJit |
python | nryoung__algorithms | tests/test_sorting.py | {
"start": 4176,
"end": 4423
} | class ____(SortingAlgorithmTestCase):
"""
Tests Strand sort on a small range from 0-9
"""
def test_strandsort(self):
self.output = strand_sort.sort(self.input)
self.assertEqual(self.correct, self.output)
| TestStrandSort |
python | ray-project__ray | python/ray/tests/conftest_docker.py | {
"start": 7631,
"end": 8566
} | class ____:
def __call__(self):
with open("file.txt") as f:
return f.read().strip()
app = Model.bind()
"""
run_in_container(
[
["bash", "-c", "echo helloworldalice >> /tmp/file.txt"],
["bash", "-c", f"echo '{serve_app}' >> /tmp/serve_application.py"],
["podman", "create", "--name", "tmp_container", IMAGE_NAME],
["podman", "cp", "/tmp/file.txt", "tmp_container:/home/ray/file.txt"],
[
"podman",
"cp",
"/tmp/serve_application.py",
"tmp_container:/home/ray/serve_application.py",
],
["podman", "commit", "tmp_container", NESTED_IMAGE_NAME],
],
container_id,
)
# For debugging
run_in_container([["podman", "image", "ls"]], container_id)
yield container_id
subprocess.check_call(["docker", "kill", container_id])
| Model |
python | modin-project__modin | modin/core/storage_formats/base/query_compiler.py | {
"start": 3351,
"end": 5125
} | class ____(IntEnum): # noqa: PR01
"""
Coercion costs between different Query Compiler backends.
Coercion costs between query compilers can be expressed
as integers in the range 0 to 1000, where 1000 is
considered impossible. Since coercion costs can be a
function of many variables ( dataset size, partitioning,
network throughput, and query time ) we define a set range
of cost values to simplify comparisons between two query
compilers / engines in a unified way.
COST_ZERO means there is no cost associated, or that the query compilers
are the same.
COST_IMPOSSIBLE means the coercion is effectively impossible, which can
occur if the target system is unable to store the data as a result
of the coercion. Currently this does not prevent coercion.
"""
COST_ZERO = 0
COST_LOW = 250
COST_MEDIUM = 500
COST_HIGH = 750
COST_IMPOSSIBLE = 1000
@classmethod
def validate_coersion_cost(cls, cost: QCCoercionCost):
"""
Validate that the coercion cost is within range.
Parameters
----------
cost : QCCoercionCost
"""
if int(cost) < int(QCCoercionCost.COST_ZERO) or int(cost) > int(
QCCoercionCost.COST_IMPOSSIBLE
):
raise ValueError("Query compiler coercsion cost out of range")
# FIXME: many of the BaseQueryCompiler methods are hiding actual arguments
# by using *args and **kwargs. They should be spread into actual parameters.
# Currently actual arguments are placed in the methods docstrings, but since they're
# not presented in the function's signature it makes linter to raise `PR02: unknown parameters`
# warning. For now, they're silenced by using `noqa` (Modin issue #3108).
| QCCoercionCost |
python | tensorflow__tensorflow | tensorflow/python/feature_column/feature_column_v2_test.py | {
"start": 40246,
"end": 56560
} | class ____(test.TestCase):
def test_keys_empty(self):
with self.assertRaisesRegex(ValueError,
'keys must be a list with length > 1'):
fc.crossed_column([], 10)
def test_keys_length_one(self):
with self.assertRaisesRegex(ValueError,
'keys must be a list with length > 1'):
fc.crossed_column(['a'], 10)
def test_key_type_unsupported(self):
with self.assertRaisesRegex(ValueError, 'Unsupported key type'):
fc.crossed_column(['a', fc.numeric_column('c')], 10)
with self.assertRaisesRegex(
ValueError, 'categorical_column_with_hash_bucket is not supported'):
fc.crossed_column(
['a', fc.categorical_column_with_hash_bucket('c', 10)], 10)
def test_hash_bucket_size_negative(self):
with self.assertRaisesRegex(ValueError, 'hash_bucket_size must be > 1'):
fc.crossed_column(['a', 'c'], -1)
def test_hash_bucket_size_zero(self):
with self.assertRaisesRegex(ValueError, 'hash_bucket_size must be > 1'):
fc.crossed_column(['a', 'c'], 0)
def test_hash_bucket_size_none(self):
with self.assertRaisesRegex(ValueError, 'hash_bucket_size must be > 1'):
fc.crossed_column(['a', 'c'], None)
def test_name(self):
a = fc.numeric_column('a', dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
crossed1 = fc.crossed_column(['d1', 'd2'], 10)
self.assertTrue(crossed1._is_v2_column)
crossed2 = fc.crossed_column([b, 'c', crossed1], 10)
self.assertTrue(crossed2._is_v2_column)
self.assertEqual('a_bucketized_X_c_X_d1_X_d2', crossed2.name)
def test_is_v2_column(self):
a = fc_old._numeric_column('a', dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
crossed1 = fc.crossed_column(['d1', 'd2'], 10)
self.assertTrue(crossed1._is_v2_column)
crossed2 = fc.crossed_column([b, 'c', crossed1], 10)
self.assertFalse(crossed2._is_v2_column)
self.assertEqual('a_bucketized_X_c_X_d1_X_d2', crossed2.name)
def test_name_ordered_alphabetically(self):
"""Tests that the name does not depend on the order of given columns."""
a = fc.numeric_column('a', dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
crossed1 = fc.crossed_column(['d1', 'd2'], 10)
crossed2 = fc.crossed_column([crossed1, 'c', b], 10)
self.assertEqual('a_bucketized_X_c_X_d1_X_d2', crossed2.name)
def test_name_leaf_keys_ordered_alphabetically(self):
"""Tests that the name does not depend on the order of given columns."""
a = fc.numeric_column('a', dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
crossed1 = fc.crossed_column(['d2', 'c'], 10)
crossed2 = fc.crossed_column([crossed1, 'd1', b], 10)
self.assertEqual('a_bucketized_X_c_X_d1_X_d2', crossed2.name)
def test_parse_spec(self):
a = fc.numeric_column('a', shape=[2], dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
crossed = fc.crossed_column([b, 'c'], 10)
self.assertEqual({
'a': parsing_ops.FixedLenFeature((2,), dtype=dtypes.int32),
'c': parsing_ops.VarLenFeature(dtypes.string),
}, crossed.parse_example_spec)
def test_num_buckets(self):
a = fc.numeric_column('a', shape=[2], dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
crossed = fc.crossed_column([b, 'c'], 15)
self.assertEqual(15, crossed.num_buckets)
def test_deep_copy(self):
a = fc.numeric_column('a', dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
crossed1 = fc.crossed_column(['d1', 'd2'], 10)
crossed2 = fc.crossed_column([b, 'c', crossed1], 15, hash_key=5)
crossed2_copy = copy.deepcopy(crossed2)
self.assertEqual(
'a_bucketized_X_c_X_d1_X_d2',
crossed2_copy.name,
)
self.assertEqual(15, crossed2_copy.hash_bucket_size)
self.assertEqual(5, crossed2_copy.hash_key)
def test_parse_example(self):
price = fc.numeric_column('price', shape=[2])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 50])
price_cross_wire = fc.crossed_column([bucketized_price, 'wire'], 10)
data = example_pb2.Example(
features=feature_pb2.Features(
feature={
'price':
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=[20., 110.])),
'wire':
feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(
value=[b'omar', b'stringer'])),
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec_v2([price_cross_wire]))
self.assertIn('price', features)
self.assertIn('wire', features)
self.assertAllEqual([[20., 110.]], self.evaluate(features['price']))
wire_sparse = features['wire']
self.assertAllEqual([[0, 0], [0, 1]], self.evaluate(wire_sparse.indices))
# Use byte constants to pass the open-source test.
self.assertAllEqual([b'omar', b'stringer'],
self.evaluate(wire_sparse.values))
self.assertAllEqual([1, 2], self.evaluate(wire_sparse.dense_shape))
def test_transform_feature(self):
price = fc.numeric_column('price', shape=[2])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 50])
hash_bucket_size = 10
price_cross_wire = fc.crossed_column([bucketized_price, 'wire'],
hash_bucket_size)
features = {
'price':
constant_op.constant([[1., 2.], [5., 6.]]),
'wire':
sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2]),
}
outputs = fc._transform_features_v2(features, [price_cross_wire], None)
output = outputs[price_cross_wire]
output_val = self.evaluate(output)
self.assertAllEqual([[0, 0], [0, 1], [1, 0], [1, 1], [1, 2], [1, 3]],
output_val.indices)
for val in output_val.values:
self.assertIn(val, list(range(hash_bucket_size)))
self.assertAllEqual([2, 4], output_val.dense_shape)
def test_get_sparse_tensors(self):
a = fc.numeric_column('a', dtype=dtypes.int32, shape=(2,))
b = fc.bucketized_column(a, boundaries=(0, 1))
crossed1 = fc.crossed_column(['d1', 'd2'], 10)
crossed2 = fc.crossed_column([b, 'c', crossed1], 15, hash_key=5)
with ops.Graph().as_default():
transformation_cache = fc.FeatureTransformationCache({
'a':
constant_op.constant(((-1., .5), (.5, 1.))),
'c':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['cA', 'cB', 'cC'],
dense_shape=(2, 2)),
'd1':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['d1A', 'd1B', 'd1C'],
dense_shape=(2, 2)),
'd2':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['d2A', 'd2B', 'd2C'],
dense_shape=(2, 2)),
})
id_weight_pair = crossed2.get_sparse_tensors(transformation_cache, None)
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
id_tensor_eval = self.evaluate(id_weight_pair.id_tensor)
self.assertAllEqual(
((0, 0), (0, 1), (1, 0), (1, 1), (1, 2), (1, 3), (1, 4), (1, 5),
(1, 6), (1, 7), (1, 8), (1, 9), (1, 10), (1, 11), (1, 12), (1, 13),
(1, 14), (1, 15)), id_tensor_eval.indices)
# Check exact hashed output. If hashing changes this test will break.
# All values are within [0, hash_bucket_size).
expected_values = (6, 14, 0, 13, 8, 8, 10, 12, 2, 0, 1, 9, 8, 12, 2, 0,
10, 11)
self.assertAllEqual(expected_values, id_tensor_eval.values)
self.assertAllEqual((2, 16), id_tensor_eval.dense_shape)
def test_get_sparse_tensors_simple(self):
"""Same as test_get_sparse_tensors, but with simpler values."""
a = fc.numeric_column('a', dtype=dtypes.int32, shape=(2,))
b = fc.bucketized_column(a, boundaries=(0, 1))
crossed = fc.crossed_column([b, 'c'], hash_bucket_size=5, hash_key=5)
with ops.Graph().as_default():
transformation_cache = fc.FeatureTransformationCache({
'a':
constant_op.constant(((-1., .5), (.5, 1.))),
'c':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['cA', 'cB', 'cC'],
dense_shape=(2, 2)),
})
id_weight_pair = crossed.get_sparse_tensors(transformation_cache, None)
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
id_tensor_eval = self.evaluate(id_weight_pair.id_tensor)
self.assertAllEqual(((0, 0), (0, 1), (1, 0), (1, 1), (1, 2), (1, 3)),
id_tensor_eval.indices)
# Check exact hashed output. If hashing changes this test will break.
# All values are within [0, hash_bucket_size).
expected_values = (1, 0, 1, 3, 4, 2)
self.assertAllEqual(expected_values, id_tensor_eval.values)
self.assertAllEqual((2, 4), id_tensor_eval.dense_shape)
def test_old_linear_model(self):
"""Tests linear_model.
Uses data from test_get_sparse_tensors_simple.
"""
a = fc.numeric_column('a', dtype=dtypes.int32, shape=(2,))
b = fc.bucketized_column(a, boundaries=(0, 1))
crossed = fc.crossed_column([b, 'c'], hash_bucket_size=5, hash_key=5)
with ops.Graph().as_default():
predictions = fc_old.linear_model({
'a':
constant_op.constant(((-1., .5), (.5, 1.))),
'c':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['cA', 'cB', 'cC'],
dense_shape=(2, 2)),
}, (crossed,))
bias = get_linear_model_bias()
crossed_var = get_linear_model_column_var(crossed)
with _initialized_session() as sess:
self.assertAllClose((0.,), self.evaluate(bias))
self.assertAllClose(((0.,), (0.,), (0.,), (0.,), (0.,)),
self.evaluate(crossed_var))
self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions))
sess.run(crossed_var.assign(((1.,), (2.,), (3.,), (4.,), (5.,))))
# Expected ids after cross = (1, 0, 1, 3, 4, 2)
self.assertAllClose(((3.,), (14.,)), self.evaluate(predictions))
sess.run(bias.assign((.1,)))
self.assertAllClose(((3.1,), (14.1,)), self.evaluate(predictions))
def test_old_linear_model_with_weights(self):
class _TestColumnWithWeights(BaseFeatureColumnForTests,
fc.CategoricalColumn,
fc_old._CategoricalColumn):
"""Produces sparse IDs and sparse weights."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
return 'test_column'
@property
def parse_example_spec(self):
return {
self.name:
parsing_ops.VarLenFeature(dtypes.int32),
'{}_weights'.format(self.name):
parsing_ops.VarLenFeature(dtypes.float32),
}
@property
def _parse_example_spec(self):
return self.parse_example_spec
@property
def num_buckets(self):
return 5
@property
def _num_buckets(self):
return self.num_buckets
def transform_feature(self, transformation_cache, state_manager):
raise ValueError('Should not be called.')
def _transform_feature(self, inputs):
return (inputs.get(self.name),
inputs.get('{}_weights'.format(self.name)))
def get_sparse_tensors(self, transformation_cache, state_manager):
raise ValueError('Should not be called.')
def _get_sparse_tensors(self,
inputs,
weight_collections=None,
trainable=None):
"""Populates both id_tensor and weight_tensor."""
ids_and_weights = inputs.get(self)
return fc.CategoricalColumn.IdWeightPair(
id_tensor=ids_and_weights[0], weight_tensor=ids_and_weights[1])
t = _TestColumnWithWeights()
crossed = fc.crossed_column([t, 'c'], hash_bucket_size=5, hash_key=5)
with ops.Graph().as_default():
with self.assertRaisesRegex(
ValueError,
'crossed_column does not support weight_tensor.*{}'.format(t.name)):
fc_old.linear_model({
t.name:
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=[0, 1, 2],
dense_shape=(2, 2)),
'{}_weights'.format(t.name):
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=[1., 10., 2.],
dense_shape=(2, 2)),
'c':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['cA', 'cB', 'cC'],
dense_shape=(2, 2)),
}, (crossed,))
def test_old_linear_model_old_numeric(self):
"""Tests linear_model.
Uses data from test_get_sparse_tensors_simple.
"""
a = fc_old._numeric_column('a', dtype=dtypes.int32, shape=(2,))
b = fc.bucketized_column(a, boundaries=(0, 1))
crossed = fc.crossed_column([b, 'c'], hash_bucket_size=5, hash_key=5)
with ops.Graph().as_default():
predictions = fc_old.linear_model({
'a':
constant_op.constant(((-1., .5), (.5, 1.))),
'c':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['cA', 'cB', 'cC'],
dense_shape=(2, 2)),
}, (crossed,))
bias = get_linear_model_bias()
crossed_var = get_linear_model_column_var(crossed)
with _initialized_session() as sess:
self.assertAllClose((0.,), self.evaluate(bias))
self.assertAllClose(((0.,), (0.,), (0.,), (0.,), (0.,)),
self.evaluate(crossed_var))
self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions))
sess.run(crossed_var.assign(((1.,), (2.,), (3.,), (4.,), (5.,))))
# Expected ids after cross = (1, 0, 1, 3, 4, 2)
self.assertAllClose(((3.,), (14.,)), self.evaluate(predictions))
sess.run(bias.assign((.1,)))
self.assertAllClose(((3.1,), (14.1,)), self.evaluate(predictions))
def test_serialization(self):
a = fc.numeric_column('a', dtype=dtypes.int32, shape=(2,))
b = fc.bucketized_column(a, boundaries=(0, 1))
crossed = fc.crossed_column([b, 'c'], hash_bucket_size=5, hash_key=5)
self.assertEqual([b, 'c'], crossed.parents)
config = crossed.get_config()
self.assertEqual({
'hash_bucket_size':
5,
'hash_key':
5,
'keys': ({
'config': {
'boundaries': (0, 1),
'source_column': {
'config': {
'dtype': 'int32',
'default_value': None,
'key': 'a',
'normalizer_fn': None,
'shape': (2,)
},
'class_name': 'NumericColumn'
}
},
'class_name': 'BucketizedColumn'
}, 'c')
}, config)
new_crossed = fc.CrossedColumn.from_config(config)
self.assertEqual(crossed, new_crossed)
self.assertIsNot(b, new_crossed.keys[0])
new_crossed = fc.CrossedColumn.from_config(
config,
columns_by_name={serialization._column_name_with_class_name(b): b})
self.assertEqual(crossed, new_crossed)
self.assertIs(b, new_crossed.keys[0])
| CrossedColumnTest |
python | coleifer__peewee | tests/prefetch_tests.py | {
"start": 808,
"end": 928
} | class ____(TestModel):
name = TextField()
parent = ForeignKeyField('self', backref='children', null=True)
| Category |
python | django__django | tests/admin_views/admin.py | {
"start": 28695,
"end": 29027
} | class ____(admin.ModelAdmin):
inlines = [RestaurantInlineAdmin]
view_on_site = True
def get_formset_kwargs(self, request, obj, inline, prefix):
return {
**super().get_formset_kwargs(request, obj, inline, prefix),
"form_kwargs": {"initial": {"name": "overridden_name"}},
}
| CityAdmin |
python | python__mypy | mypyc/irbuild/match.py | {
"start": 1324,
"end": 12246
} | class ____(TraverserVisitor):
builder: IRBuilder
code_block: BasicBlock
next_block: BasicBlock
final_block: BasicBlock
subject: Value
match: MatchStmt
as_pattern: AsPattern | None = None
def __init__(self, builder: IRBuilder, match_node: MatchStmt) -> None:
self.builder = builder
self.code_block = BasicBlock()
self.next_block = BasicBlock()
self.final_block = BasicBlock()
self.match = match_node
self.subject = builder.accept(match_node.subject)
def build_match_body(self, index: int) -> None:
self.builder.activate_block(self.code_block)
guard = self.match.guards[index]
if guard:
self.code_block = BasicBlock()
cond = self.builder.accept(guard)
self.builder.add_bool_branch(cond, self.code_block, self.next_block)
self.builder.activate_block(self.code_block)
self.builder.accept(self.match.bodies[index])
self.builder.goto(self.final_block)
def visit_match_stmt(self, m: MatchStmt) -> None:
for i, pattern in enumerate(m.patterns):
self.code_block = BasicBlock()
self.next_block = BasicBlock()
pattern.accept(self)
self.build_match_body(i)
self.builder.activate_block(self.next_block)
self.builder.goto_and_activate(self.final_block)
def visit_value_pattern(self, pattern: ValuePattern) -> None:
value = self.builder.accept(pattern.expr)
cond = self.builder.binary_op(self.subject, value, "==", pattern.expr.line)
self.bind_as_pattern(value)
self.builder.add_bool_branch(cond, self.code_block, self.next_block)
def visit_or_pattern(self, pattern: OrPattern) -> None:
backup_block = self.next_block
self.next_block = BasicBlock()
for p in pattern.patterns:
# Hack to ensure the as pattern is bound to each pattern in the
# "or" pattern, but not every subpattern
backup = self.as_pattern
p.accept(self)
self.as_pattern = backup
self.builder.activate_block(self.next_block)
self.next_block = BasicBlock()
self.next_block = backup_block
self.builder.goto(self.next_block)
def visit_class_pattern(self, pattern: ClassPattern) -> None:
# TODO: use faster instance check for native classes (while still
# making sure to account for inheritance)
isinstance_op = (
fast_isinstance_op
if self.builder.is_builtin_ref_expr(pattern.class_ref)
else slow_isinstance_op
)
cond = self.builder.primitive_op(
isinstance_op, [self.subject, self.builder.accept(pattern.class_ref)], pattern.line
)
self.builder.add_bool_branch(cond, self.code_block, self.next_block)
self.bind_as_pattern(self.subject, new_block=True)
if pattern.positionals:
if pattern.class_ref.fullname in MATCHABLE_BUILTINS:
self.builder.activate_block(self.code_block)
self.code_block = BasicBlock()
pattern.positionals[0].accept(self)
return
node = pattern.class_ref.node
assert isinstance(node, TypeInfo), node
match_args = extract_dunder_match_args_names(node)
for i, expr in enumerate(pattern.positionals):
self.builder.activate_block(self.code_block)
self.code_block = BasicBlock()
# TODO: use faster "get_attr" method instead when calling on native or
# builtin objects
positional = self.builder.py_get_attr(self.subject, match_args[i], expr.line)
with self.enter_subpattern(positional):
expr.accept(self)
for key, value in zip(pattern.keyword_keys, pattern.keyword_values):
self.builder.activate_block(self.code_block)
self.code_block = BasicBlock()
# TODO: same as above "get_attr" comment
attr = self.builder.py_get_attr(self.subject, key, value.line)
with self.enter_subpattern(attr):
value.accept(self)
def visit_as_pattern(self, pattern: AsPattern) -> None:
if pattern.pattern:
old_pattern = self.as_pattern
self.as_pattern = pattern
pattern.pattern.accept(self)
self.as_pattern = old_pattern
elif pattern.name:
target = self.builder.get_assignment_target(pattern.name)
self.builder.assign(target, self.subject, pattern.line)
self.builder.goto(self.code_block)
def visit_singleton_pattern(self, pattern: SingletonPattern) -> None:
if pattern.value is None:
obj = self.builder.none_object()
elif pattern.value is True:
obj = self.builder.true()
else:
obj = self.builder.false()
cond = self.builder.binary_op(self.subject, obj, "is", pattern.line)
self.builder.add_bool_branch(cond, self.code_block, self.next_block)
def visit_mapping_pattern(self, pattern: MappingPattern) -> None:
is_dict = self.builder.call_c(supports_mapping_protocol, [self.subject], pattern.line)
self.builder.add_bool_branch(is_dict, self.code_block, self.next_block)
keys: list[Value] = []
for key, value in zip(pattern.keys, pattern.values):
self.builder.activate_block(self.code_block)
self.code_block = BasicBlock()
key_value = self.builder.accept(key)
keys.append(key_value)
exists = self.builder.call_c(mapping_has_key, [self.subject, key_value], pattern.line)
self.builder.add_bool_branch(exists, self.code_block, self.next_block)
self.builder.activate_block(self.code_block)
self.code_block = BasicBlock()
item = self.builder.gen_method_call(
self.subject, "__getitem__", [key_value], object_rprimitive, pattern.line
)
with self.enter_subpattern(item):
value.accept(self)
if pattern.rest:
self.builder.activate_block(self.code_block)
self.code_block = BasicBlock()
rest = self.builder.primitive_op(dict_copy, [self.subject], pattern.rest.line)
target = self.builder.get_assignment_target(pattern.rest)
self.builder.assign(target, rest, pattern.rest.line)
for i, key_name in enumerate(keys):
self.builder.call_c(dict_del_item, [rest, key_name], pattern.keys[i].line)
self.builder.goto(self.code_block)
def visit_sequence_pattern(self, seq_pattern: SequencePattern) -> None:
star_index, capture, patterns = prep_sequence_pattern(seq_pattern)
is_list = self.builder.call_c(supports_sequence_protocol, [self.subject], seq_pattern.line)
self.builder.add_bool_branch(is_list, self.code_block, self.next_block)
self.builder.activate_block(self.code_block)
self.code_block = BasicBlock()
actual_len = self.builder.call_c(generic_ssize_t_len_op, [self.subject], seq_pattern.line)
min_len = len(patterns)
is_long_enough = self.builder.binary_op(
actual_len,
self.builder.load_int(min_len),
"==" if star_index is None else ">=",
seq_pattern.line,
)
self.builder.add_bool_branch(is_long_enough, self.code_block, self.next_block)
for i, pattern in enumerate(patterns):
self.builder.activate_block(self.code_block)
self.code_block = BasicBlock()
if star_index is not None and i >= star_index:
current = self.builder.binary_op(
actual_len, self.builder.load_int(min_len - i), "-", pattern.line
)
else:
current = self.builder.load_int(i)
item = self.builder.call_c(sequence_get_item, [self.subject, current], pattern.line)
with self.enter_subpattern(item):
pattern.accept(self)
if capture and star_index is not None:
self.builder.activate_block(self.code_block)
self.code_block = BasicBlock()
capture_end = self.builder.binary_op(
actual_len, self.builder.load_int(min_len - star_index), "-", capture.line
)
rest = self.builder.call_c(
sequence_get_slice,
[self.subject, self.builder.load_int(star_index), capture_end],
capture.line,
)
target = self.builder.get_assignment_target(capture)
self.builder.assign(target, rest, capture.line)
self.builder.goto(self.code_block)
def bind_as_pattern(self, value: Value, new_block: bool = False) -> None:
if self.as_pattern and self.as_pattern.pattern and self.as_pattern.name:
if new_block:
self.builder.activate_block(self.code_block)
self.code_block = BasicBlock()
target = self.builder.get_assignment_target(self.as_pattern.name)
self.builder.assign(target, value, self.as_pattern.pattern.line)
self.as_pattern = None
if new_block:
self.builder.goto(self.code_block)
@contextmanager
def enter_subpattern(self, subject: Value) -> Generator[None]:
old_subject = self.subject
self.subject = subject
yield
self.subject = old_subject
def prep_sequence_pattern(
seq_pattern: SequencePattern,
) -> tuple[int | None, NameExpr | None, list[Pattern]]:
star_index: int | None = None
capture: NameExpr | None = None
patterns: list[Pattern] = []
for i, pattern in enumerate(seq_pattern.patterns):
if isinstance(pattern, StarredPattern):
star_index = i
capture = pattern.capture
else:
patterns.append(pattern)
return star_index, capture, patterns
def extract_dunder_match_args_names(info: TypeInfo) -> list[str]:
ty = info.names.get("__match_args__")
assert ty
match_args_type = get_proper_type(ty.type)
assert isinstance(match_args_type, TupleType), match_args_type
match_args: list[str] = []
for item in match_args_type.items:
proper_item = get_proper_type(item)
match_arg = None
if isinstance(proper_item, Instance) and proper_item.last_known_value:
match_arg = proper_item.last_known_value.value
elif isinstance(proper_item, LiteralType):
match_arg = proper_item.value
assert isinstance(match_arg, str), f"Unrecognized __match_args__ item: {item}"
match_args.append(match_arg)
return match_args
| MatchVisitor |
python | ApeWorX__ape | src/ape/types/coverage.py | {
"start": 6912,
"end": 9675
} | class ____(BaseModel):
"""
An individual contract's coverage.
"""
name: str
"""
The name of the contract.
"""
functions: list[FunctionCoverage] = []
"""
The coverage of each function individually.
"""
@property
def statements(self) -> list[CoverageStatement]:
"""
All valid coverage lines from every function in this contract.
"""
return list(itertools.chain.from_iterable(f.statements for f in self.functions))
@property
def lines_covered(self) -> NonNegativeInt:
"""
All lines that have a hit count greater than zero.
"""
return sum(funcs.lines_covered for funcs in self.functions)
@property
def lines_valid(self) -> NonNegativeInt:
"""
The number of lines valid for coverage.
"""
return len(self.statements)
@property
def miss_count(self) -> NonNegativeInt:
"""
The number of lines missed.
"""
return self.lines_valid - self.lines_covered
@property
def line_rate(self) -> float:
"""
The number of lines hit divided by number of lines.
"""
return self.lines_covered / self.lines_valid
@property
def function_hits(self) -> NonNegativeInt:
"""
The number of functions with a hit counter greater than zero.
"""
return len([fn for fn in self.functions if fn.hit_count > 0])
@property
def function_rate(self) -> float:
"""
The rate of functions hit versus total functions.
"""
return self.function_hits / len(self.functions)
def __getitem__(self, function_name: str) -> FunctionCoverage:
func = self.get_function(function_name)
if func:
return func
raise KeyError(f"Function '{function_name}' not found.")
def model_dump(self, *args, **kwargs) -> dict:
attribs = super().model_dump(*args, **kwargs)
# Add coverage stats.
attribs["lines_covered"] = self.lines_covered
attribs["lines_valid"] = self.lines_valid
attribs["line_rate"] = self.line_rate
return attribs
def include(self, name: str, full_name: str) -> FunctionCoverage:
# Make sure function is included in coverage.
func_cov = self.get_function(full_name)
if func_cov:
return func_cov
func_cov = FunctionCoverage(name=name, full_name=full_name)
self.functions.append(func_cov)
return func_cov
def get_function(self, full_name: str) -> Optional[FunctionCoverage]:
for func in self.functions:
if func.full_name == full_name:
return func
return None
| ContractCoverage |
python | xlwings__xlwings | tests/test_conversion.py | {
"start": 682,
"end": 4764
} | class ____(TestBase):
def test_transpose(self):
self.wb1.sheets[0].range("A1").options(transpose=True).value = [
[1.0, 2.0],
[3.0, 4.0],
]
self.assertEqual(
self.wb1.sheets[0].range("A1:B2").value, [[1.0, 3.0], [2.0, 4.0]]
)
def test_dictionary(self):
d = {"a": 1.0, "b": 2.0}
self.wb1.sheets[0].range("A1").value = d
self.assertEqual(d, self.wb1.sheets[0].range("A1:B2").options(dict).value)
def test_ordereddictionary(self):
d = OrderedDict({"a": 1.0, "b": 2.0})
self.wb1.sheets[0].range("A1").value = d
self.assertEqual(
d, self.wb1.sheets[0].range("A1:B2").options(OrderedDict).value
)
def test_integers(self):
"""test_integers: Covers GH 227"""
self.wb1.sheets[0].range("A99").value = 2147483647 # max SInt32
self.assertEqual(self.wb1.sheets[0].range("A99").value, 2147483647)
self.wb1.sheets[0].range("A100").value = 2147483648 # SInt32 < x < SInt64
self.assertEqual(self.wb1.sheets[0].range("A100").value, 2147483648)
self.wb1.sheets[0].range("A101").value = 10000000000000000000 # long
self.assertEqual(self.wb1.sheets[0].range("A101").value, 10000000000000000000)
def test_datetime_timezone(self):
eastern = pytz.timezone("US/Eastern")
dt_naive = dt.datetime(2002, 10, 27, 6, 0, 0)
dt_tz = eastern.localize(dt_naive)
self.wb1.sheets[0].range("F34").value = dt_tz
self.assertEqual(self.wb1.sheets[0].range("F34").value, dt_naive)
def test_date(self):
date_1 = dt.date(2000, 12, 3)
self.wb1.sheets[0].range("X1").value = date_1
date_2 = self.wb1.sheets[0].range("X1").value
self.assertEqual(date_1, dt.date(date_2.year, date_2.month, date_2.day))
def test_list(self):
# 1d List Row
list_row_1d = [1.1, None, 3.3]
self.wb1.sheets[0].range("A27").value = list_row_1d
cells = self.wb1.sheets[0].range("A27:C27").value
self.assertEqual(list_row_1d, cells)
# 2d List Row
list_row_2d = [[1.1, None, 3.3]]
self.wb1.sheets[0].range("A29").value = list_row_2d
cells = self.wb1.sheets[0].range("A29:C29").options(ndim=2).value
self.assertEqual(list_row_2d, cells)
# 1d List Col
list_col = [[1.1], [None], [3.3]]
self.wb1.sheets[0].range("A31").value = list_col
cells = self.wb1.sheets[0].range("A31:A33").value
self.assertEqual([i[0] for i in list_col], cells)
# 2d List Col
cells = self.wb1.sheets[0].range("A31:A33").options(ndim=2).value
self.assertEqual(list_col, cells)
def test_none(self):
"""test_none: Covers GH Issue #16"""
# None
self.wb1.sheets[0].range("A7").value = None
self.assertEqual(None, self.wb1.sheets[0].range("A7").value)
# List
self.wb1.sheets[0].range("A7").value = [None, None]
self.assertEqual(
None, self.wb1.sheets[0].range("A7").expand("horizontal").value
)
def test_ndim2_scalar(self):
"""test_atleast_2d_scalar: Covers GH Issue #53a"""
self.wb1.sheets[0].range("A50").value = 23
result = self.wb1.sheets[0].range("A50").options(ndim=2).value
self.assertEqual([[23]], result)
def test_write_single_value_to_multicell_range(self):
self.wb1.sheets[0].range("A1:B2").value = 5
self.assertEqual(
self.wb1.sheets[0].range("A1:B2").value, [[5.0, 5.0], [5.0, 5.0]]
)
def test_formatter(self):
self.wb1.sheets[0]["A50"].options(formatter=table_formatter).value = [
["one", "two"],
[1, 2],
[3, 4],
[5, 6],
]
self.assertEqual(self.wb1.sheets[0]["A50:B50"].color, (169, 208, 142))
self.assertEqual(self.wb1.sheets[0]["A51:B51"].color, (208, 206, 206))
self.assertIsNone(self.wb1.sheets[0]["A52:B52"].color)
@unittest.skipIf(np is None, "numpy missing")
| TestConverter |
python | getsentry__sentry | fixtures/page_objects/explore_logs.py | {
"start": 124,
"end": 1871
} | class ____(BasePage):
def __init__(self, browser, client):
super().__init__(browser)
self.client = client
self.global_selection = GlobalSelectionPage(browser)
def visit_explore_logs(self, org):
self.browser.get(f"/organizations/{org}/explore/logs/")
self.wait_until_loaded()
def toggle_log_row_with_message(self, message):
row = self.get_log_row_with_message(message)
try:
expanded_count = len(
self.browser.find_elements(By.CSS_SELECTOR, '*[data-test-id="fields-tree"]')
)
except Exception:
expanded_count = 0
if expanded_count > 0:
row.click()
# If this is breaking make sure to only have one row expanded at a time.
# TODO: Target the correct field-tree with xpath.
self.browser.wait_until_not('[data-test-id="fields-tree"]')
else:
row.click()
self.browser.wait_until('[data-test-id="fields-tree"]')
return row
def get_log_row_with_message(self, message):
row = self.browser.find_element(
by=By.XPATH,
value=f'//*[@data-test-id="log-table-row" and .//*[contains(text(),"{message}")]]',
)
return row
def get_log_row_columns(self, row):
# The expanded row actually makes a new sibling row that contains the fields-tree.
columns = row.find_elements(
By.XPATH, 'following-sibling::*[1]//*[@data-test-id="attribute-tree-column"]'
)
return columns
def wait_until_loaded(self):
self.browser.wait_until_not('[data-test-id="loading-indicator"]')
self.browser.wait_until_test_id("logs-table")
| ExploreLogsPage |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-hubspot/llama_index/readers/hubspot/base.py | {
"start": 148,
"end": 1399
} | class ____(BaseReader):
"""
Hubspot reader. Reads data from a Hubspot account.
Args:
access_token(str): Hubspot API key.
"""
def __init__(self, access_token: str) -> None:
"""Initialize Hubspot reader."""
self.access_token = access_token
def load_data(self) -> List[Document]:
"""
Load deals, contacts and companies data from Hubspot.
Returns:
List[Document]: List of documents, where each document represensts a list of Hubspot objects
"""
from hubspot import HubSpot
api_client = HubSpot(access_token=self.access_token)
all_deals = api_client.crm.deals.get_all()
all_contacts = api_client.crm.contacts.get_all()
all_companies = api_client.crm.companies.get_all()
return [
Document(
text=f"{all_deals}".replace("\n", ""), extra_info={"type": "deals"}
),
Document(
text=f"{all_contacts}".replace("\n", ""),
extra_info={"type": "contacts"},
),
Document(
text=f"{all_companies}".replace("\n", ""),
extra_info={"type": "companies"},
),
]
| HubspotReader |
python | sqlalchemy__sqlalchemy | examples/asyncio/async_orm.py | {
"start": 731,
"end": 1000
} | class ____(Base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True)
data: Mapped[Optional[str]]
create_date: Mapped[datetime.datetime] = mapped_column(
server_default=func.now()
)
bs: Mapped[List[B]] = relationship()
| A |
python | scrapy__scrapy | tests/test_spidermiddleware_referer.py | {
"start": 32529,
"end": 32856
} | class ____(
MixinNoReferrerWhenDowngrade, TestRefererMiddleware
):
"""
The empty string means "no-referrer-when-downgrade"
"""
settings = {
"REFERRER_POLICY": "scrapy.spidermiddlewares.referer.OriginWhenCrossOriginPolicy"
}
resp_headers = {"Referrer-Policy": ""}
| TestPolicyHeaderPrecedence004 |
python | mlflow__mlflow | mlflow/types/responses_helpers.py | {
"start": 4127,
"end": 5132
} | class ____(BaseModel):
model_config = ConfigDict(extra="allow")
type: str
@model_validator(mode="after")
def check_type(self) -> "OutputItem":
if self.type == "message":
ResponseOutputMessage(**self.model_dump())
elif self.type == "function_call":
ResponseFunctionToolCall(**self.model_dump())
elif self.type == "reasoning":
ResponseReasoningItem(**self.model_dump())
elif self.type == "function_call_output":
FunctionCallOutput(**self.model_dump())
elif self.type == "mcp_approval_request":
McpApprovalRequest(**self.model_dump())
elif self.type == "mcp_approval_response":
McpApprovalResponse(**self.model_dump())
elif self.type not in {
"file_search_call",
"computer_call",
"web_search_call",
}:
raise ValueError(f"Invalid type: {self.type} for {self.__class__.__name__}")
return self
| OutputItem |
python | mahmoud__glom | glom/streaming.py | {
"start": 12926,
"end": 14099
} | class ____:
"""Get the first element of an iterable which matches *key*, if there
is one, otherwise return *default* (``None`` if unset).
>>> is_odd = lambda x: x % 2
>>> glom([0, 1, 2, 3], First(is_odd))
1
>>> glom([0, 2, 4], First(is_odd, default=False))
False
"""
# The impl of this ain't pretty and basically just exists for a
# nicer-looking repr. (Iter(), First()) is the equivalent of doing
# (Iter().filter(spec), Call(first, args=(T,), kwargs={'default':
# default}))
__slots__ = ('_spec', '_default', '_first')
def __init__(self, key=T, default=None):
self._spec = key
self._default = default
spec_glom = Spec(Call(partial, args=(Spec(self._spec).glom,), kwargs={'scope': S}))
self._first = Call(first, args=(T,), kwargs={'default': default, 'key': spec_glom})
def glomit(self, target, scope):
return self._first.glomit(target, scope)
def __repr__(self):
cn = self.__class__.__name__
if self._default is None:
return f'{cn}({bbrepr(self._spec)})'
return f'{cn}({bbrepr(self._spec)}, default={bbrepr(self._default)})'
| First |
python | ray-project__ray | python/ray/data/tests/test_projection_fusion.py | {
"start": 1176,
"end": 53647
} | class ____:
"""Test topological sorting in projection pushdown fusion."""
@pytest.fixture(autouse=True)
def setup(self):
"""Set up test fixtures."""
self.context = DataContext.get_current()
# Create UDFs for testing
@udf(return_dtype=DataType.int64())
def multiply_by_two(x: pa.Array) -> pa.Array:
return pc.multiply(x, 2)
@udf(return_dtype=DataType.int64())
def add_one(x: pa.Array) -> pa.Array:
return pc.add(x, 1)
@udf(return_dtype=DataType.float64())
def divide_by_three(x: pa.Array) -> pa.Array:
# Convert to float to ensure floating point division
return pc.divide(pc.cast(x, pa.float64()), 3.0)
self.udfs = {
"multiply_by_two": multiply_by_two,
"add_one": add_one,
"divide_by_three": divide_by_three,
}
def _create_input_op(self):
"""Create a dummy input operator."""
return InputData(input_data=[])
def _parse_expression(self, expr_desc: str):
"""Parse expression description into actual expression object."""
# Enhanced parser for test expressions
expr_map = {
"col('id')": col("id"),
"col('id') + 10": col("id") + 10,
"col('id') * 2": col("id") * 2,
"col('id') - 5": col("id") - 5,
"col('id') + 1": col("id") + 1,
"col('id') - 1": col("id") - 1,
"col('id') - 3": col("id") - 3,
"col('step1') * 2": col("step1") * 2,
"col('step2') + 1": col("step2") + 1,
"col('a') + col('b')": col("a") + col("b"),
"col('c') + col('d')": col("c") + col("d"),
"col('e') * 3": col("e") * 3,
"col('a') + 1": col("a") + 1,
"multiply_by_two(col('id'))": self.udfs["multiply_by_two"](col("id")),
"multiply_by_two(col('id')) + col('plus_ten')": (
self.udfs["multiply_by_two"](col("id")) + col("plus_ten")
),
"col('times_three') > col('plus_ten')": (
col("times_three") > col("plus_ten")
),
"multiply_by_two(col('x'))": self.udfs["multiply_by_two"](col("x")),
"add_one(col('id'))": self.udfs["add_one"](col("id")),
"multiply_by_two(col('plus_one'))": self.udfs["multiply_by_two"](
col("plus_one")
),
"divide_by_three(col('times_two'))": self.udfs["divide_by_three"](
col("times_two")
),
}
if expr_desc in expr_map:
return expr_map[expr_desc]
else:
raise ValueError(f"Unknown expression: {expr_desc}")
def _create_project_chain(self, input_op, expressions_list: List[Dict[str, str]]):
"""Create a chain of Project operators from expression descriptions."""
current_op = input_op
for expr_dict in expressions_list:
# Convert dictionary to list of named expressions
exprs = []
for name, desc in expr_dict.items():
expr = self._parse_expression(desc)
named_expr = expr.alias(name)
exprs.append(named_expr)
current_op = Project(current_op, exprs=[star()] + exprs, ray_remote_args={})
return current_op
def _extract_levels_from_plan(self, plan: LogicalPlan) -> List[Set[str]]:
"""Extract expression levels from optimized plan."""
current = plan.dag
levels = []
while isinstance(current, Project):
# Extract names, ignoring StarExpr (not a named column)
levels.append(
{expr.name for expr in current.exprs if not isinstance(expr, StarExpr)}
)
current = current.input_dependency
return list(reversed(levels)) # Return bottom-up order
def _count_project_operators(self, plan: LogicalPlan) -> int:
"""Count the number of Project operators in the plan."""
current = plan.dag
count = 0
while current:
if isinstance(current, Project):
count += 1
current = getattr(current, "input_dependency", None)
return count
def _describe_plan_structure(self, plan: LogicalPlan) -> str:
"""Generate a description of the plan structure."""
current = plan.dag
operators = []
while current:
if isinstance(current, Project):
expr_count = len(current.exprs) if current.exprs else 0
operators.append(f"Project({expr_count} exprs)")
else:
operators.append(current.__class__.__name__)
current = getattr(current, "input_dependency", None)
return " -> ".join(operators)
@pytest.mark.parametrize(
"test_case",
[
FusionTestCase(
name="no_dependencies",
expressions_list=[
{"doubled": "col('id') * 2", "plus_five": "col('id') + 10"},
{"minus_three": "col('id') - 3"},
],
expected_levels=1,
expected_level_contents=[{"doubled", "plus_five", "minus_three"}],
description="Independent expressions should fuse into single operator",
),
FusionTestCase(
name="simple_chain",
expressions_list=[
{"step1": "col('id') + 10"},
{"step2": "col('step1') * 2"},
{"step3": "col('step2') + 1"},
],
expected_levels=1,
expected_level_contents=[
{"step1", "step2", "step3"}
], # All in one level
description="All expressions fuse into single operator with OrderedDict preservation",
),
FusionTestCase(
name="mixed_udf_regular",
expressions_list=[
{"plus_ten": "col('id') + 10"},
{"times_three": "multiply_by_two(col('id'))"},
{"minus_five": "col('id') - 5"},
{
"udf_plus_regular": "multiply_by_two(col('id')) + col('plus_ten')"
},
{"comparison": "col('times_three') > col('plus_ten')"},
],
expected_levels=1,
expected_level_contents=[
{
"plus_ten",
"times_three",
"minus_five",
"udf_plus_regular",
"comparison",
}
],
description="All expressions fuse into single operator",
),
FusionTestCase(
name="complex_graph",
expressions_list=[
{"a": "col('id') + 1", "b": "col('id') * 2"},
{"c": "col('a') + col('b')"},
{"d": "col('id') - 1"},
{"e": "col('c') + col('d')"},
{"f": "col('e') * 3"},
],
expected_levels=1,
expected_level_contents=[{"a", "b", "c", "d", "e", "f"}],
description="All expressions fuse into single operator",
),
FusionTestCase(
name="udf_dependency_chain",
expressions_list=[
{"plus_one": "add_one(col('id'))"},
{"times_two": "multiply_by_two(col('plus_one'))"},
{"div_three": "divide_by_three(col('times_two'))"},
],
expected_levels=1, # Changed from 3 to 1
expected_level_contents=[{"plus_one", "times_two", "div_three"}],
description="All UDF expressions fuse into single operator with preserved order",
),
],
)
def test_fusion_scenarios(self, test_case: FusionTestCase):
"""Test various fusion scenarios with simplified single-operator fusion."""
input_op = self._create_input_op()
final_op = self._create_project_chain(input_op, test_case.expressions_list)
# Apply projection pushdown
plan = LogicalPlan(final_op, self.context)
rule = ProjectionPushdown()
optimized_plan = rule.apply(plan)
# Extract levels from optimized plan
actual_levels = self._extract_levels_from_plan(optimized_plan)
# Verify number of levels
assert len(actual_levels) == test_case.expected_levels, (
f"{test_case.name}: Expected {test_case.expected_levels} operators, "
f"got {len(actual_levels)}. Actual operators: {actual_levels}"
)
# Verify level contents (more flexible matching)
for i, expected_content in enumerate(test_case.expected_level_contents):
assert expected_content.issubset(actual_levels[i]), (
f"{test_case.name}: Operator {i} missing expressions. "
f"Expected {expected_content} to be subset of {actual_levels[i]}"
)
def test_pairwise_fusion_behavior(self, ray_start_regular_shared):
"""Test to understand how pairwise fusion works in practice."""
input_data = [{"id": i} for i in range(10)]
# Test with 2 operations (should fuse to 1)
ds2 = ray.data.from_items(input_data)
ds2 = ds2.with_column("col1", col("id") + 1)
ds2 = ds2.with_column("col2", col("id") * 2)
count2 = self._count_project_operators(ds2._logical_plan)
print(f"2 operations -> {count2} operators")
# Test with 3 operations
ds3 = ray.data.from_items(input_data)
ds3 = ds3.with_column("col1", col("id") + 1)
ds3 = ds3.with_column("col2", col("id") * 2)
ds3 = ds3.with_column("col3", col("id") - 1)
count3 = self._count_project_operators(ds3._logical_plan)
print(f"3 operations -> {count3} operators")
# Test with 4 operations
ds4 = ray.data.from_items(input_data)
ds4 = ds4.with_column("col1", col("id") + 1)
ds4 = ds4.with_column("col2", col("id") * 2)
ds4 = ds4.with_column("col3", col("id") - 1)
ds4 = ds4.with_column("col4", col("id") + 5)
count4 = self._count_project_operators(ds4._logical_plan)
print(f"4 operations -> {count4} operators")
# Verify that fusion is happening (fewer operators than original)
assert count2 <= 2, f"2 operations should result in ≤2 operators, got {count2}"
assert count3 <= 3, f"3 operations should result in ≤3 operators, got {count3}"
assert count4 <= 4, f"4 operations should result in ≤4 operators, got {count4}"
# Verify correctness
result2 = ds2.take(1)[0]
result3 = ds3.take(1)[0]
result4 = ds4.take(1)[0]
assert result2 == {"id": 0, "col1": 1, "col2": 0}
assert result3 == {"id": 0, "col1": 1, "col2": 0, "col3": -1}
assert result4 == {"id": 0, "col1": 1, "col2": 0, "col3": -1, "col4": 5}
def test_optimal_fusion_with_single_chain(self, ray_start_regular_shared):
"""Test fusion when all operations are added in a single chain (ideal case)."""
input_data = [{"id": i} for i in range(10)]
# Create a single Project operator with multiple expressions
# This simulates what would happen with perfect fusion
ds = ray.data.from_items(input_data)
# Apply multiple operations that should all be independent
expressions = {
"col1": col("id") + 1,
"col2": col("id") * 2,
"col3": col("id") - 1,
"col4": col("id") + 5,
"col5": col("id") * 3,
}
# Use map_batches to create a single operation that does everything
def apply_all_expressions(batch):
import pyarrow.compute as pc
result = batch.to_pydict()
result["col1"] = pc.add(batch["id"], 1)
result["col2"] = pc.multiply(batch["id"], 2)
result["col3"] = pc.subtract(batch["id"], 1)
result["col4"] = pc.add(batch["id"], 5)
result["col5"] = pc.multiply(batch["id"], 3)
return pa.table(result)
ds_optimal = ds.map_batches(apply_all_expressions, batch_format="pyarrow")
# Compare with the with_column approach
ds_with_column = ds
for col_name, expr in expressions.items():
ds_with_column = ds_with_column.with_column(col_name, expr)
# Convert both to pandas for reliable comparison
result_optimal_df = ds_optimal.to_pandas()
result_with_column_df = ds_with_column.to_pandas()
# Sort columns before comparison
result_optimal_df = result_optimal_df[sorted(result_optimal_df.columns)]
result_with_column_df = result_with_column_df[
sorted(result_with_column_df.columns)
]
# Compare using rows_same (deterministic, ignores order)
assert rows_same(result_optimal_df, result_with_column_df)
def test_basic_fusion_works(self, ray_start_regular_shared):
"""Test that basic fusion of two independent operations works."""
input_data = [{"id": i} for i in range(5)]
# Create dataset with two independent operations
ds = ray.data.from_items(input_data)
ds = ds.with_column("doubled", col("id") * 2)
ds = ds.with_column("plus_one", col("id") + 1)
# Check before optimization
original_count = self._count_project_operators(ds._logical_plan)
print(f"Before optimization: {original_count} operators")
# Apply optimization
rule = ProjectionPushdown()
optimized_plan = rule.apply(ds._logical_plan)
# Check after optimization
optimized_count = self._count_project_operators(optimized_plan)
print(f"After optimization: {optimized_count} operators")
# Two independent operations should fuse into one
assert (
optimized_count == 1
), f"Two independent operations should fuse to 1 operator, got {optimized_count}"
# Verify correctness using rows_same
from ray.data.dataset import Dataset
optimized_ds = Dataset(ds._plan, optimized_plan)
result_df = optimized_ds.to_pandas()
expected_df = pd.DataFrame(
{
"id": [0, 1, 2, 3, 4],
"doubled": [0, 2, 4, 6, 8],
"plus_one": [1, 2, 3, 4, 5],
}
)
# Sort columns before comparison
result_df = result_df[sorted(result_df.columns)]
expected_df = expected_df[sorted(expected_df.columns)]
assert rows_same(result_df, expected_df)
def test_dependency_prevents_fusion(self, ray_start_regular_shared):
"""Test that dependencies are handled in single operator with OrderedDict."""
input_data = [{"id": i} for i in range(5)]
# Create dataset with dependency chain
ds = ray.data.from_items(input_data)
ds = ds.with_column("doubled", col("id") * 2)
ds = ds.with_column(
"doubled_plus_one", col("doubled") + 1
) # Depends on doubled
# Check before optimization
original_count = self._count_project_operators(ds._logical_plan)
print(f"Before optimization: {original_count} operators")
# Apply optimization
rule = ProjectionPushdown()
optimized_plan = rule.apply(ds._logical_plan)
# Check after optimization
optimized_count = self._count_project_operators(optimized_plan)
print(f"After optimization: {optimized_count} operators")
# Should have 1 operator now (changed from 2)
assert (
optimized_count == 1
), f"All operations should fuse into 1 operator, got {optimized_count}"
# Verify correctness using rows_same
from ray.data.dataset import Dataset
optimized_ds = Dataset(ds._plan, optimized_plan)
result_df = optimized_ds.to_pandas()
expected_df = pd.DataFrame(
{
"id": [0, 1, 2, 3, 4],
"doubled": [0, 2, 4, 6, 8],
"doubled_plus_one": [1, 3, 5, 7, 9],
}
)
# Sort columns before comparison
result_df = result_df[sorted(result_df.columns)]
expected_df = expected_df[sorted(expected_df.columns)]
assert rows_same(result_df, expected_df)
def test_mixed_udf_regular_end_to_end(self, ray_start_regular_shared):
"""Test the exact failing scenario from the original issue."""
input_data = [{"id": i} for i in range(5)]
# Create dataset with mixed UDF and regular expressions (the failing test case)
ds = ray.data.from_items(input_data)
ds = ds.with_column("plus_ten", col("id") + 10)
ds = ds.with_column(
"times_three", self.udfs["multiply_by_two"](col("id"))
) # Actually multiply by 2
ds = ds.with_column("minus_five", col("id") - 5)
ds = ds.with_column(
"udf_plus_regular",
self.udfs["multiply_by_two"](col("id")) + col("plus_ten"),
)
ds = ds.with_column("comparison", col("times_three") > col("plus_ten"))
# Apply optimization
rule = ProjectionPushdown()
optimized_plan = rule.apply(ds._logical_plan)
# Verify execution correctness
from ray.data.dataset import Dataset
optimized_ds = Dataset(ds._plan, optimized_plan)
result_df = optimized_ds.to_pandas()
expected_df = pd.DataFrame(
{
"id": [0, 1, 2, 3, 4],
"plus_ten": [10, 11, 12, 13, 14], # id + 10
"times_three": [0, 2, 4, 6, 8], # id * 2 (multiply_by_two UDF)
"minus_five": [-5, -4, -3, -2, -1], # id - 5
"udf_plus_regular": [10, 13, 16, 19, 22], # (id * 2) + (id + 10)
"comparison": [
False,
False,
False,
False,
False,
], # times_three > plus_ten
}
)
# Sort columns before comparison
result_df = result_df[sorted(result_df.columns)]
expected_df = expected_df[sorted(expected_df.columns)]
assert rows_same(result_df, expected_df)
# Verify that we have 1 operator (changed from multiple)
optimized_count = self._count_project_operators(optimized_plan)
assert (
optimized_count == 1
), f"Expected 1 operator with all expressions fused, got {optimized_count}"
def test_optimal_fusion_comparison(self, ray_start_regular_shared):
"""Compare optimized with_column approach against manual map_batches."""
input_data = [{"id": i} for i in range(10)]
# Create dataset using with_column (will be optimized)
ds_with_column = ray.data.from_items(input_data)
ds_with_column = ds_with_column.with_column("col1", col("id") + 1)
ds_with_column = ds_with_column.with_column("col2", col("id") * 2)
ds_with_column = ds_with_column.with_column("col3", col("id") - 1)
ds_with_column = ds_with_column.with_column("col4", col("id") + 5)
ds_with_column = ds_with_column.with_column("col5", col("id") * 3)
# Apply optimization
rule = ProjectionPushdown()
optimized_plan = rule.apply(ds_with_column._logical_plan)
from ray.data.dataset import Dataset
optimized_ds = Dataset(ds_with_column._plan, optimized_plan)
# Create dataset using single map_batches (optimal case)
ds_optimal = ray.data.from_items(input_data)
def apply_all_expressions(batch):
import pyarrow.compute as pc
result = batch.to_pydict()
result["col1"] = pc.add(batch["id"], 1)
result["col2"] = pc.multiply(batch["id"], 2)
result["col3"] = pc.subtract(batch["id"], 1)
result["col4"] = pc.add(batch["id"], 5)
result["col5"] = pc.multiply(batch["id"], 3)
return pa.table(result)
ds_optimal = ds_optimal.map_batches(
apply_all_expressions, batch_format="pyarrow"
)
# Compare results using rows_same
result_optimized = optimized_ds.to_pandas()
result_optimal = ds_optimal.to_pandas()
# Sort columns before comparison
result_optimized = result_optimized[sorted(result_optimized.columns)]
result_optimal = result_optimal[sorted(result_optimal.columns)]
assert rows_same(result_optimized, result_optimal)
def test_chained_udf_dependencies(self, ray_start_regular_shared):
"""Test multiple non-vectorized UDFs in a dependency chain."""
input_data = [{"id": i} for i in range(5)]
# Create dataset with chained UDF dependencies
ds = ray.data.from_items(input_data)
ds = ds.with_column("plus_one", self.udfs["add_one"](col("id")))
ds = ds.with_column("times_two", self.udfs["multiply_by_two"](col("plus_one")))
ds = ds.with_column("div_three", self.udfs["divide_by_three"](col("times_two")))
# Apply optimization
rule = ProjectionPushdown()
optimized_plan = rule.apply(ds._logical_plan)
# Verify 1 operator (changed from 3)
assert self._count_project_operators(optimized_plan) == 1
assert (
self._describe_plan_structure(optimized_plan)
== "Project(4 exprs) -> FromItems" # Changed from multiple operators
)
# Verify execution correctness
from ray.data.dataset import Dataset
optimized_ds = Dataset(ds._plan, optimized_plan)
result_df = optimized_ds.to_pandas()
expected_df = pd.DataFrame(
{
"id": [0, 1, 2, 3, 4],
"plus_one": [1, 2, 3, 4, 5],
"times_two": [2, 4, 6, 8, 10],
"div_three": [2 / 3, 4 / 3, 2.0, 8 / 3, 10 / 3],
}
)
# Sort columns before comparison
result_df = result_df[sorted(result_df.columns)]
expected_df = expected_df[sorted(expected_df.columns)]
assert rows_same(result_df, expected_df)
def test_performance_impact_of_udf_chains(self, ray_start_regular_shared):
"""Test performance characteristics of UDF dependency chains vs independent UDFs."""
input_data = [{"id": i} for i in range(100)]
# Case 1: Independent UDFs (should fuse)
ds_independent = ray.data.from_items(input_data)
ds_independent = ds_independent.with_column(
"udf1", self.udfs["add_one"](col("id"))
)
ds_independent = ds_independent.with_column(
"udf2", self.udfs["multiply_by_two"](col("id"))
)
ds_independent = ds_independent.with_column(
"udf3", self.udfs["divide_by_three"](col("id"))
)
# Case 2: Chained UDFs (should also fuse now)
ds_chained = ray.data.from_items(input_data)
ds_chained = ds_chained.with_column("step1", self.udfs["add_one"](col("id")))
ds_chained = ds_chained.with_column(
"step2", self.udfs["multiply_by_two"](col("step1"))
)
ds_chained = ds_chained.with_column(
"step3", self.udfs["divide_by_three"](col("step2"))
)
# Apply optimization
rule = ProjectionPushdown()
optimized_independent = rule.apply(ds_independent._logical_plan)
optimized_chained = rule.apply(ds_chained._logical_plan)
# Verify fusion behavior (both should be 1 now)
assert self._count_project_operators(optimized_independent) == 1
assert (
self._count_project_operators(optimized_chained) == 1
) # Changed from 3 to 1
assert (
self._describe_plan_structure(optimized_independent)
== "Project(4 exprs) -> FromItems"
)
assert (
self._describe_plan_structure(optimized_chained)
== "Project(4 exprs) -> FromItems" # Changed from multiple operators
)
@pytest.mark.parametrize(
"operations,expected",
[
# Single operations
([("rename", {"a": "A"})], {"A": 1, "b": 2, "c": 3}),
([("select", ["a", "b"])], {"a": 1, "b": 2}),
([("with_column", "d", 4)], {"a": 1, "b": 2, "c": 3, "d": 4}),
# Two operations - rename then select
([("rename", {"a": "A"}), ("select", ["A"])], {"A": 1}),
([("rename", {"a": "A"}), ("select", ["b"])], {"b": 2}),
(
[("rename", {"a": "A", "b": "B"}), ("select", ["A", "B"])],
{"A": 1, "B": 2},
),
# Two operations - select then rename
([("select", ["a", "b"]), ("rename", {"a": "A"})], {"A": 1, "b": 2}),
([("select", ["a"]), ("rename", {"a": "x"})], {"x": 1}),
# Two operations - with_column combinations
([("with_column", "d", 4), ("select", ["a", "d"])], {"a": 1, "d": 4}),
([("select", ["a"]), ("with_column", "d", 4)], {"a": 1, "d": 4}),
(
[("rename", {"a": "A"}), ("with_column", "d", 4)],
{"A": 1, "b": 2, "c": 3, "d": 4},
),
(
[("with_column", "d", 4), ("rename", {"d": "D"})],
{"a": 1, "b": 2, "c": 3, "D": 4},
),
# Three operations
(
[
("rename", {"a": "A"}),
("select", ["A", "b"]),
("with_column", "d", 4),
],
{"A": 1, "b": 2, "d": 4},
),
(
[
("with_column", "d", 4),
("rename", {"a": "A"}),
("select", ["A", "d"]),
],
{"A": 1, "d": 4},
),
(
[
("select", ["a", "b"]),
("rename", {"a": "x"}),
("with_column", "d", 4),
],
{"x": 1, "b": 2, "d": 4},
),
# Column swap (no actual changes)
([("rename", {"a": "b", "b": "a"}), ("select", ["a"])], {"a": 2}),
([("rename", {"a": "b", "b": "a"}), ("select", ["b"])], {"b": 1}),
# Multiple same operations
(
[("rename", {"a": "x"}), ("rename", {"x": "y"})],
{"y": 1, "b": 2, "c": 3},
),
([("select", ["a", "b"]), ("select", ["a"])], {"a": 1}),
(
[("with_column", "d", 4), ("with_column", "e", 5)],
{"a": 1, "b": 2, "c": 3, "d": 4, "e": 5},
),
# Complex expressions with with_column
(
[("rename", {"a": "x"}), ("with_column_expr", "sum", "x", 10)],
{"x": 1, "b": 2, "c": 3, "sum": 10},
),
(
[
("with_column", "d", 4),
("with_column", "e", 5),
("select", ["d", "e"]),
],
{"d": 4, "e": 5},
),
],
)
def test_projection_operations_comprehensive(
self, ray_start_regular_shared, operations, expected
):
"""Comprehensive test for projection operations combinations."""
from ray.data.expressions import col, lit
# Create initial dataset
ds = ray.data.range(1).map(lambda row: {"a": 1, "b": 2, "c": 3})
# Apply operations
for op in operations:
if op[0] == "rename":
ds = ds.rename_columns(op[1])
elif op[0] == "select":
ds = ds.select_columns(op[1])
elif op[0] == "with_column":
ds = ds.with_column(op[1], lit(op[2]))
elif op[0] == "with_column_expr":
# Special case for expressions referencing columns
ds = ds.with_column(op[1], col(op[2]) * op[3])
# Verify result using rows_same
result_df = ds.to_pandas()
expected_df = pd.DataFrame([expected])
# Ensure columns are in the same order for comparison
result_df = result_df[sorted(result_df.columns)]
expected_df = expected_df[sorted(expected_df.columns)]
assert rows_same(result_df, expected_df)
@pytest.mark.parametrize(
"operations,expected",
[
# Basic count operations
([("count",)], 3), # All 3 rows
([("rename", {"a": "A"}), ("count",)], 3),
([("select", ["a", "b"]), ("count",)], 3),
([("with_column", "d", 4), ("count",)], 3),
# Filter operations affecting count
([("filter", col("a") > 1), ("count",)], 2), # 2 rows have a > 1
([("filter", col("b") == 2), ("count",)], 3), # All rows have b == 2
([("filter", col("c") < 10), ("count",)], 3), # All rows have c < 10
([("filter", col("a") == 1), ("count",)], 1), # 1 row has a == 1
# Projection then filter then count
([("rename", {"a": "A"}), ("filter", col("A") > 1), ("count",)], 2),
([("select", ["a", "b"]), ("filter", col("a") > 1), ("count",)], 2),
([("with_column", "d", 4), ("filter", col("d") == 4), ("count",)], 3),
# Filter then projection then count
([("filter", col("a") > 1), ("rename", {"a": "A"}), ("count",)], 2),
([("filter", col("b") == 2), ("select", ["a", "b"]), ("count",)], 3),
([("filter", col("c") < 10), ("with_column", "d", 4), ("count",)], 3),
# Multiple projections with filter and count
(
[
("rename", {"a": "A"}),
("select", ["A", "b"]),
("filter", col("A") > 1),
("count",),
],
2,
),
(
[
("with_column", "d", 4),
("rename", {"d": "D"}),
("filter", col("D") == 4),
("count",),
],
3,
),
(
[
("select", ["a", "b"]),
("filter", col("a") > 1),
("rename", {"a": "x"}),
("count",),
],
2,
),
# Complex combinations
(
[
("filter", col("a") > 0),
("rename", {"b": "B"}),
("select", ["a", "B"]),
("filter", col("B") == 2),
("count",),
],
3,
),
(
[
("with_column", "sum", 99),
("filter", col("a") > 1),
("select", ["a", "sum"]),
("count",),
],
2,
),
(
[
("rename", {"a": "A", "b": "B"}),
("filter", (col("A") + col("B")) > 3),
("select", ["A"]),
("count",),
],
2,
),
],
)
def test_projection_fusion_with_count_and_filter(
self, ray_start_regular_shared, operations, expected
):
"""Test projection fusion with count operations including filters."""
from ray.data.expressions import lit
# Create dataset with 3 rows: {"a": 1, "b": 2, "c": 3}, {"a": 2, "b": 2, "c": 3}, {"a": 3, "b": 2, "c": 3}
ds = ray.data.from_items(
[
{"a": 1, "b": 2, "c": 3},
{"a": 2, "b": 2, "c": 3},
{"a": 3, "b": 2, "c": 3},
]
)
# Apply operations
for op in operations:
if op[0] == "rename":
ds = ds.rename_columns(op[1])
elif op[0] == "select":
ds = ds.select_columns(op[1])
elif op[0] == "with_column":
ds = ds.with_column(op[1], lit(op[2]))
elif op[0] == "filter":
# Use the predicate expression directly
ds = ds.filter(expr=op[1])
elif op[0] == "count":
# Count returns a scalar, not a dataset
result = ds.count()
assert result == expected
return # Early return since count() terminates the pipeline
# This should not be reached for count operations
assert False, "Count operation should have returned early"
@pytest.mark.parametrize(
"invalid_operations,error_type,error_message_contains",
[
# Try to filter on a column that doesn't exist yet
(
[("filter", col("d") > 0), ("with_column", "d", 4)],
(KeyError, ray.exceptions.RayTaskError),
"d",
),
# Try to filter on a renamed column before the rename
(
[("filter", col("A") > 1), ("rename", {"a": "A"})],
(KeyError, ray.exceptions.RayTaskError),
"A",
),
# Try to use a column that was removed by select
(
[("select", ["a"]), ("filter", col("b") == 2)],
(KeyError, ray.exceptions.RayTaskError),
"b",
),
# Try to filter on a column after it was removed by select
(
[("select", ["a", "b"]), ("filter", col("c") < 10)],
(KeyError, ray.exceptions.RayTaskError),
"c",
),
# Try to use with_column referencing a non-existent column
(
[("select", ["a"]), ("with_column", "new_col", col("b") + 1)],
(KeyError, ray.exceptions.RayTaskError),
"b",
),
# Try to filter on a column that was renamed away
(
[("rename", {"b": "B"}), ("filter", col("b") == 2)],
(KeyError, ray.exceptions.RayTaskError),
"b",
),
# Try to use with_column with old column name after rename
(
[("rename", {"a": "A"}), ("with_column", "result", col("a") + 1)],
(KeyError, ray.exceptions.RayTaskError),
"a",
),
# Try to select using old column name after rename
(
[("rename", {"b": "B"}), ("select", ["a", "b", "c"])],
(KeyError, ray.exceptions.RayTaskError),
"b",
),
# Try to filter on a computed column that was removed by select
(
[
("with_column", "d", 4),
("select", ["a", "b"]),
("filter", col("d") == 4),
],
(KeyError, ray.exceptions.RayTaskError),
"d",
),
# Try to rename a column that was removed by select
(
[("select", ["a", "b"]), ("rename", {"c": "C"})],
(KeyError, ray.exceptions.RayTaskError),
"c",
),
# Complex: rename, select (removing renamed source), then use old name
(
[
("rename", {"a": "A"}),
("select", ["b", "c"]),
("filter", col("a") > 0),
],
(KeyError, ray.exceptions.RayTaskError),
"a",
),
# Complex: with_column, select (keeping new column), filter on removed original
(
[
("with_column", "sum", col("a") + col("b")),
("select", ["sum"]),
("filter", col("a") > 0),
],
(KeyError, ray.exceptions.RayTaskError),
"a",
),
# Try to use column in with_column expression after it was removed
(
[
("select", ["a", "c"]),
("with_column", "result", col("a") + col("b")),
],
(KeyError, ray.exceptions.RayTaskError),
"b",
),
],
)
def test_projection_operations_invalid_order(
self,
ray_start_regular_shared,
invalid_operations,
error_type,
error_message_contains,
):
"""Test that operations fail gracefully when referencing non-existent columns."""
import ray
from ray.data.expressions import lit
# Create dataset with 3 rows: {"a": 1, "b": 2, "c": 3}, {"a": 2, "b": 2, "c": 3}, {"a": 3, "b": 2, "c": 3}
ds = ray.data.from_items(
[
{"a": 1, "b": 2, "c": 3},
{"a": 2, "b": 2, "c": 3},
{"a": 3, "b": 2, "c": 3},
]
)
# Apply operations and expect them to fail
with pytest.raises(error_type) as exc_info:
for op in invalid_operations:
if op[0] == "rename":
ds = ds.rename_columns(op[1])
elif op[0] == "select":
ds = ds.select_columns(op[1])
elif op[0] == "with_column":
if len(op) == 3 and not isinstance(op[2], (int, float, str)):
# Expression-based with_column (op[2] is an expression)
ds = ds.with_column(op[1], op[2])
else:
# Literal-based with_column
ds = ds.with_column(op[1], lit(op[2]))
elif op[0] == "filter":
ds = ds.filter(expr=op[1])
elif op[0] == "count":
ds.count()
return
# Force execution to trigger the error
result = ds.take_all()
print(f"Unexpected success: {result}")
# Verify the error message contains the expected column name
error_str = str(exc_info.value).lower()
assert (
error_message_contains.lower() in error_str
), f"Expected '{error_message_contains}' in error message: {error_str}"
@pytest.mark.parametrize(
"operations,expected_output",
[
# === Basic Select Operations ===
pytest.param(
[("select", ["a"])],
[{"a": 1}, {"a": 2}, {"a": 3}],
id="select_single_column",
),
pytest.param(
[("select", ["a", "b"])],
[{"a": 1, "b": 4}, {"a": 2, "b": 5}, {"a": 3, "b": 6}],
id="select_two_columns",
),
pytest.param(
[("select", ["a", "b", "c"])],
[
{"a": 1, "b": 4, "c": 7},
{"a": 2, "b": 5, "c": 8},
{"a": 3, "b": 6, "c": 9},
],
id="select_all_columns",
),
pytest.param(
[("select", ["c", "a"])],
[{"c": 7, "a": 1}, {"c": 8, "a": 2}, {"c": 9, "a": 3}],
id="select_reordered_columns",
),
# === Basic Rename Operations ===
pytest.param(
[("rename", {"a": "alpha"})],
[
{"alpha": 1, "b": 4, "c": 7},
{"alpha": 2, "b": 5, "c": 8},
{"alpha": 3, "b": 6, "c": 9},
],
id="rename_single_column",
),
pytest.param(
[("rename", {"a": "alpha", "b": "beta"})],
[
{"alpha": 1, "beta": 4, "c": 7},
{"alpha": 2, "beta": 5, "c": 8},
{"alpha": 3, "beta": 6, "c": 9},
],
id="rename_multiple_columns",
),
# === Basic with_column Operations ===
pytest.param(
[("with_column_expr", "sum", "add", "a", "b")],
[
{"a": 1, "b": 4, "c": 7, "sum": 5},
{"a": 2, "b": 5, "c": 8, "sum": 7},
{"a": 3, "b": 6, "c": 9, "sum": 9},
],
id="with_column_add_keep_all",
),
pytest.param(
[("with_column_expr", "product", "multiply", "b", "c")],
[
{"a": 1, "b": 4, "c": 7, "product": 28},
{"a": 2, "b": 5, "c": 8, "product": 40},
{"a": 3, "b": 6, "c": 9, "product": 54},
],
id="with_column_multiply_keep_all",
),
# === Chained Selects ===
pytest.param(
[("select", ["a", "b", "c"]), ("select", ["a", "b"])],
[{"a": 1, "b": 4}, {"a": 2, "b": 5}, {"a": 3, "b": 6}],
id="chained_selects_two_levels",
),
pytest.param(
[
("select", ["a", "b", "c"]),
("select", ["a", "b"]),
("select", ["a"]),
],
[{"a": 1}, {"a": 2}, {"a": 3}],
id="chained_selects_three_levels",
),
# === Rename → Select ===
pytest.param(
[("rename", {"a": "x"}), ("select", ["x", "b"])],
[{"x": 1, "b": 4}, {"x": 2, "b": 5}, {"x": 3, "b": 6}],
id="rename_then_select",
),
pytest.param(
[("rename", {"a": "x", "c": "z"}), ("select", ["x", "z"])],
[{"x": 1, "z": 7}, {"x": 2, "z": 8}, {"x": 3, "z": 9}],
id="rename_multiple_then_select",
),
# === Select → Rename ===
pytest.param(
[("select", ["a", "b"]), ("rename", {"a": "x"})],
[{"x": 1, "b": 4}, {"x": 2, "b": 5}, {"x": 3, "b": 6}],
id="select_then_rename",
),
pytest.param(
[("select", ["a", "b", "c"]), ("rename", {"a": "x", "b": "y"})],
[
{"x": 1, "y": 4, "c": 7},
{"x": 2, "y": 5, "c": 8},
{"x": 3, "y": 6, "c": 9},
],
id="select_all_then_rename_some",
),
# === Multiple Renames ===
pytest.param(
[("rename", {"a": "x"}), ("rename", {"x": "y"})],
[
{"y": 1, "b": 4, "c": 7},
{"y": 2, "b": 5, "c": 8},
{"y": 3, "b": 6, "c": 9},
],
id="chained_renames",
),
# === with_column → Select ===
pytest.param(
[("with_column_expr", "sum", "add", "a", "b"), ("select", ["sum"])],
[{"sum": 5}, {"sum": 7}, {"sum": 9}],
id="with_column_then_select_only_computed",
),
pytest.param(
[
("with_column_expr", "sum", "add", "a", "b"),
("select", ["a", "sum"]),
],
[{"a": 1, "sum": 5}, {"a": 2, "sum": 7}, {"a": 3, "sum": 9}],
id="with_column_then_select_mixed",
),
pytest.param(
[
("with_column_expr", "result", "multiply", "b", "c"),
("select", ["a", "result"]),
],
[
{"a": 1, "result": 28},
{"a": 2, "result": 40},
{"a": 3, "result": 54},
],
id="with_column_select_source_and_computed",
),
# === Multiple with_column Operations ===
pytest.param(
[
("with_column_expr", "sum", "add", "a", "b"),
("with_column_expr", "product", "multiply", "a", "c"),
],
[
{"a": 1, "b": 4, "c": 7, "sum": 5, "product": 7},
{"a": 2, "b": 5, "c": 8, "sum": 7, "product": 16},
{"a": 3, "b": 6, "c": 9, "sum": 9, "product": 27},
],
id="multiple_with_column_keep_all",
),
pytest.param(
[
("with_column_expr", "sum", "add", "a", "b"),
("with_column_expr", "product", "multiply", "a", "c"),
("select", ["sum", "product"]),
],
[
{"sum": 5, "product": 7},
{"sum": 7, "product": 16},
{"sum": 9, "product": 27},
],
id="multiple_with_column_then_select",
),
pytest.param(
[
("with_column_expr", "sum", "add", "a", "b"),
("with_column_expr", "diff", "add", "c", "a"),
("select", ["sum", "diff"]),
],
[{"sum": 5, "diff": 8}, {"sum": 7, "diff": 10}, {"sum": 9, "diff": 12}],
id="multiple_with_column_independent_sources",
),
# === with_column → Rename ===
pytest.param(
[
("with_column_expr", "sum", "add", "a", "b"),
("rename", {"sum": "total"}),
],
[
{"a": 1, "b": 4, "c": 7, "total": 5},
{"a": 2, "b": 5, "c": 8, "total": 7},
{"a": 3, "b": 6, "c": 9, "total": 9},
],
id="with_column_then_rename_computed",
),
# === Rename → with_column ===
pytest.param(
[
("rename", {"a": "x"}),
("with_column_expr", "x_plus_b", "add", "x", "b"),
],
[
{"x": 1, "b": 4, "c": 7, "x_plus_b": 5},
{"x": 2, "b": 5, "c": 8, "x_plus_b": 7},
{"x": 3, "b": 6, "c": 9, "x_plus_b": 9},
],
id="rename_then_with_column_using_renamed",
),
pytest.param(
[
("rename", {"a": "x"}),
("with_column_expr", "result", "add", "x", "b"),
("select", ["result"]),
],
[{"result": 5}, {"result": 7}, {"result": 9}],
id="rename_with_column_select_chain",
),
# === Select → with_column → Select ===
pytest.param(
[
("select", ["a", "b"]),
("with_column_expr", "sum", "add", "a", "b"),
("select", ["a", "sum"]),
],
[{"a": 1, "sum": 5}, {"a": 2, "sum": 7}, {"a": 3, "sum": 9}],
id="select_with_column_select_chain",
),
pytest.param(
[
("select", ["a", "b", "c"]),
("with_column_expr", "x", "add", "a", "b"),
("with_column_expr", "y", "multiply", "b", "c"),
("select", ["x", "y"]),
],
[{"x": 5, "y": 28}, {"x": 7, "y": 40}, {"x": 9, "y": 54}],
id="select_multiple_with_column_select_chain",
),
# === Complex Multi-Step Chains ===
pytest.param(
[
("select", ["a", "b", "c"]),
("rename", {"a": "x"}),
("with_column_expr", "result", "add", "x", "b"),
("select", ["result", "c"]),
],
[{"result": 5, "c": 7}, {"result": 7, "c": 8}, {"result": 9, "c": 9}],
id="complex_select_rename_with_column_select",
),
pytest.param(
[
("rename", {"a": "alpha", "b": "beta"}),
("select", ["alpha", "beta", "c"]),
("with_column_expr", "sum", "add", "alpha", "beta"),
("rename", {"sum": "total"}),
("select", ["total", "c"]),
],
[{"total": 5, "c": 7}, {"total": 7, "c": 8}, {"total": 9, "c": 9}],
id="complex_five_step_chain",
),
pytest.param(
[
("select", ["a", "b", "c"]),
("select", ["b", "c"]),
("select", ["c"]),
],
[{"c": 7}, {"c": 8}, {"c": 9}],
id="select_chain",
),
],
)
def test_projection_pushdown_into_parquet_read(
self, ray_start_regular_shared, tmp_path, operations, expected_output
):
"""Test that projection operations fuse and push down into parquet reads.
Verifies:
- Multiple projections fuse into single operator
- Fused projection pushes down into Read operator
- Only necessary columns are read from parquet
- Results are correct for select, rename, and with_column operations
"""
from ray.data.expressions import col
# Create test parquet file
df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
parquet_path = tmp_path / "test.parquet"
df.to_parquet(parquet_path, index=False)
# Build pipeline with operations
ds = ray.data.read_parquet(str(parquet_path))
for op_type, *op_args in operations:
if op_type == "select":
ds = ds.select_columns(op_args[0])
elif op_type == "rename":
ds = ds.rename_columns(op_args[0])
elif op_type == "with_column_expr":
col_name, operator, col1, col2 = op_args
if operator == "add":
ds = ds.with_column(col_name, col(col1) + col(col2))
elif operator == "multiply":
ds = ds.with_column(col_name, col(col1) * col(col2))
result_df = ds.to_pandas()
expected_df = pd.DataFrame(expected_output)
# Ensure columns are in the same order for comparison
result_df = result_df[sorted(result_df.columns)]
expected_df = expected_df[sorted(expected_df.columns)]
assert rows_same(result_df, expected_df)
@pytest.mark.parametrize("flavor", ["project_before", "project_after"])
def test_projection_pushdown_merge_rename_x(ray_start_regular_shared, flavor):
"""
Test that valid select and renaming merges correctly.
"""
path = "example://iris.parquet"
ds = ray.data.read_parquet(path)
ds = ds.map_batches(lambda d: d)
if flavor == "project_before":
ds = ds.select_columns(["sepal.length", "petal.width"])
# First projection renames 'sepal.length' to 'length'
ds = ds.rename_columns({"sepal.length": "length"})
# Second projection renames 'petal.width' to 'width'
ds = ds.rename_columns({"petal.width": "width"})
if flavor == "project_after":
ds = ds.select_columns(["length", "width"])
logical_plan = ds._plan._logical_plan
op = logical_plan.dag
assert isinstance(op, Project), op.name
optimized_logical_plan = LogicalOptimizer().optimize(logical_plan)
assert isinstance(optimized_logical_plan.dag, Project)
select_op = optimized_logical_plan.dag
# Check that both "sepal.length" and "petal.width" are present in the columns,
# regardless of their order.
assert select_op.exprs == [
# TODO fix (renaming doesn't remove prev columns)
col("sepal.length").alias("length"),
col("petal.width").alias("width"),
]
if __name__ == "__main__":
pytest.main([__file__, "-v"])
| TestProjectionFusion |
python | pandas-dev__pandas | asv_bench/benchmarks/multiindex_object.py | {
"start": 6148,
"end": 6618
} | class ____:
def setup(self):
self.mi = MultiIndex.from_product(
[
date_range("2000-01-01", periods=1000),
RangeIndex(1000),
]
)
self.mi_deepcopy = self.mi.copy(deep=True)
self.idx_non_object = RangeIndex(1)
def time_equals_deepcopy(self):
self.mi.equals(self.mi_deepcopy)
def time_equals_non_object_index(self):
self.mi.equals(self.idx_non_object)
| Equals |
python | simonw__datasette | datasette/views/table.py | {
"start": 1354,
"end": 11340
} | class ____:
def __init__(self, cells):
self.cells = cells
def __iter__(self):
return iter(self.cells)
def __getitem__(self, key):
for cell in self.cells:
if cell["column"] == key:
return cell["raw"]
raise KeyError
def display(self, key):
for cell in self.cells:
if cell["column"] == key:
return cell["value"]
return None
def __str__(self):
d = {
key: self[key]
for key in [
c["column"] for c in self.cells if not c.get("is_special_link_column")
]
}
return json.dumps(d, default=repr, indent=2)
async def run_sequential(*args):
# This used to be swappable for asyncio.gather() to run things in
# parallel, but this lead to hard-to-debug locking issues with
# in-memory databases: https://github.com/simonw/datasette/issues/2189
results = []
for fn in args:
results.append(await fn)
return results
def _redirect(datasette, request, path, forward_querystring=True, remove_args=None):
if request.query_string and "?" not in path and forward_querystring:
path = f"{path}?{request.query_string}"
if remove_args:
path = path_with_removed_args(request, remove_args, path=path)
r = Response.redirect(path)
r.headers["Link"] = f"<{path}>; rel=preload"
if datasette.cors:
add_cors_headers(r.headers)
return r
async def _redirect_if_needed(datasette, request, resolved):
# Handle ?_filter_column
redirect_params = filters_should_redirect(request.args)
if redirect_params:
return _redirect(
datasette,
request,
datasette.urls.path(path_with_added_args(request, redirect_params)),
forward_querystring=False,
)
# If ?_sort_by_desc=on (from checkbox) redirect to _sort_desc=(_sort)
if "_sort_by_desc" in request.args:
return _redirect(
datasette,
request,
datasette.urls.path(
path_with_added_args(
request,
{
"_sort_desc": request.args.get("_sort"),
"_sort_by_desc": None,
"_sort": None,
},
)
),
forward_querystring=False,
)
async def display_columns_and_rows(
datasette,
database_name,
table_name,
description,
rows,
link_column=False,
truncate_cells=0,
sortable_columns=None,
request=None,
):
"""Returns columns, rows for specified table - including fancy foreign key treatment"""
sortable_columns = sortable_columns or set()
db = datasette.databases[database_name]
column_descriptions = dict(
await datasette.get_internal_database().execute(
"""
SELECT
column_name,
value
FROM metadata_columns
WHERE database_name = ?
AND resource_name = ?
AND key = 'description'
""",
[database_name, table_name],
)
)
column_details = {
col.name: col for col in await db.table_column_details(table_name)
}
pks = await db.primary_keys(table_name)
pks_for_display = pks
if not pks_for_display:
pks_for_display = ["rowid"]
columns = []
for r in description:
if r[0] == "rowid" and "rowid" not in column_details:
type_ = "integer"
notnull = 0
else:
type_ = column_details[r[0]].type
notnull = column_details[r[0]].notnull
columns.append(
{
"name": r[0],
"sortable": r[0] in sortable_columns,
"is_pk": r[0] in pks_for_display,
"type": type_,
"notnull": notnull,
"description": column_descriptions.get(r[0]),
}
)
column_to_foreign_key_table = {
fk["column"]: fk["other_table"]
for fk in await db.foreign_keys_for_table(table_name)
}
cell_rows = []
base_url = datasette.setting("base_url")
for row in rows:
cells = []
# Unless we are a view, the first column is a link - either to the rowid
# or to the simple or compound primary key
if link_column:
is_special_link_column = len(pks) != 1
pk_path = path_from_row_pks(row, pks, not pks, False)
cells.append(
{
"column": pks[0] if len(pks) == 1 else "Link",
"value_type": "pk",
"is_special_link_column": is_special_link_column,
"raw": pk_path,
"value": markupsafe.Markup(
'<a href="{table_path}/{flat_pks_quoted}">{flat_pks}</a>'.format(
table_path=datasette.urls.table(database_name, table_name),
flat_pks=str(markupsafe.escape(pk_path)),
flat_pks_quoted=path_from_row_pks(row, pks, not pks),
)
),
}
)
for value, column_dict in zip(row, columns):
column = column_dict["name"]
if link_column and len(pks) == 1 and column == pks[0]:
# If there's a simple primary key, don't repeat the value as it's
# already shown in the link column.
continue
# First let the plugins have a go
# pylint: disable=no-member
plugin_display_value = None
for candidate in pm.hook.render_cell(
row=row,
value=value,
column=column,
table=table_name,
database=database_name,
datasette=datasette,
request=request,
):
candidate = await await_me_maybe(candidate)
if candidate is not None:
plugin_display_value = candidate
break
if plugin_display_value:
display_value = plugin_display_value
elif isinstance(value, bytes):
formatted = format_bytes(len(value))
display_value = markupsafe.Markup(
'<a class="blob-download" href="{}"{}><Binary: {:,} byte{}></a>'.format(
datasette.urls.row_blob(
database_name,
table_name,
path_from_row_pks(row, pks, not pks),
column,
),
(
' title="{}"'.format(formatted)
if "bytes" not in formatted
else ""
),
len(value),
"" if len(value) == 1 else "s",
)
)
elif isinstance(value, dict):
# It's an expanded foreign key - display link to other row
label = value["label"]
value = value["value"]
# The table we link to depends on the column
other_table = column_to_foreign_key_table[column]
link_template = LINK_WITH_LABEL if (label != value) else LINK_WITH_VALUE
display_value = markupsafe.Markup(
link_template.format(
database=tilde_encode(database_name),
base_url=base_url,
table=tilde_encode(other_table),
link_id=tilde_encode(str(value)),
id=str(markupsafe.escape(value)),
label=str(markupsafe.escape(label)) or "-",
)
)
elif value in ("", None):
display_value = markupsafe.Markup(" ")
elif is_url(str(value).strip()):
display_value = markupsafe.Markup(
'<a href="{url}">{truncated_url}</a>'.format(
url=markupsafe.escape(value.strip()),
truncated_url=markupsafe.escape(
truncate_url(value.strip(), truncate_cells)
),
)
)
else:
display_value = str(value)
if truncate_cells and len(display_value) > truncate_cells:
display_value = display_value[:truncate_cells] + "\u2026"
cells.append(
{
"column": column,
"value": display_value,
"raw": value,
"value_type": (
"none" if value is None else str(type(value).__name__)
),
}
)
cell_rows.append(Row(cells))
if link_column:
# Add the link column header.
# If it's a simple primary key, we have to remove and re-add that column name at
# the beginning of the header row.
first_column = None
if len(pks) == 1:
columns = [col for col in columns if col["name"] != pks[0]]
first_column = {
"name": pks[0],
"sortable": len(pks) == 1,
"is_pk": True,
"type": column_details[pks[0]].type,
"notnull": column_details[pks[0]].notnull,
}
else:
first_column = {
"name": "Link",
"sortable": False,
"is_pk": False,
"type": "",
"notnull": 0,
}
columns = [first_column] + columns
return columns, cell_rows
| Row |
python | numba__numba | numba/core/errors.py | {
"start": 17540,
"end": 17733
} | class ____(Exception):
"""Unsupported bytecode is non-recoverable
"""
def __init__(self, msg, loc=None):
super().__init__(f"{msg}. Raised from {loc}")
| UnsupportedBytecodeError |
python | tornadoweb__tornado | demos/blog/blog.py | {
"start": 4410,
"end": 4714
} | class ____(BaseHandler):
async def get(self):
entries = await self.query(
"SELECT * FROM entries ORDER BY published DESC LIMIT 5"
)
if not entries:
self.redirect("/compose")
return
self.render("home.html", entries=entries)
| HomeHandler |
python | optuna__optuna | optuna/cli.py | {
"start": 22585,
"end": 26581
} | class ____(_BaseCommand):
"""Create a new trial and suggest parameters."""
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument("--study-name", type=str, help="Name of study.")
parser.add_argument("--sampler", type=str, help="Class name of sampler object to create.")
parser.add_argument(
"--sampler-kwargs",
type=str,
help="Sampler object initialization keyword arguments as JSON.",
)
parser.add_argument(
"--search-space",
type=str,
help=(
"Search space as JSON. Keys are names and values are outputs from "
":func:`~optuna.distributions.distribution_to_json`."
),
)
parser.add_argument(
"-f",
"--format",
type=str,
choices=("value", "json", "table", "yaml"),
default="json",
help="Output format.",
)
parser.add_argument(
"--flatten",
default=False,
action="store_true",
help="Flatten nested columns such as params.",
)
def take_action(self, parsed_args: Namespace) -> int:
optuna_warn(
"'ask' is an experimental CLI command. The interface can change in the future.",
ExperimentalWarning,
)
storage = _get_storage(parsed_args.storage, parsed_args.storage_class)
create_study_kwargs = {
"storage": storage,
"study_name": parsed_args.study_name,
"load_if_exists": True,
}
if parsed_args.sampler is not None:
if parsed_args.sampler_kwargs is not None:
sampler_kwargs = json.loads(parsed_args.sampler_kwargs)
else:
sampler_kwargs = {}
sampler_cls = getattr(optuna.samplers, parsed_args.sampler)
sampler = sampler_cls(**sampler_kwargs)
create_study_kwargs["sampler"] = sampler
else:
if parsed_args.sampler_kwargs is not None:
raise ValueError(
"`--sampler_kwargs` is set without `--sampler`. Please specify `--sampler` as"
" well or omit `--sampler-kwargs`."
)
if parsed_args.search_space is not None:
# The search space is expected to be a JSON serialized string, e.g.
# '{"x": {"name": "FloatDistribution", "attributes": {"low": 0.0, "high": 1.0}},
# "y": ...}'.
search_space = {
name: optuna.distributions.json_to_distribution(json.dumps(dist))
for name, dist in json.loads(parsed_args.search_space).items()
}
else:
search_space = {}
try:
study = optuna.load_study(
study_name=create_study_kwargs["study_name"],
storage=create_study_kwargs["storage"],
sampler=create_study_kwargs.get("sampler"),
)
except KeyError:
raise KeyError(
"Implicit study creation within the 'ask' command was dropped in Optuna v4.0.0. "
"Please use the 'create-study' command beforehand."
)
trial = study.ask(fixed_distributions=search_space)
self.logger.info(f"Asked trial {trial.number} with parameters {trial.params}.")
record: dict[tuple[str, str], Any] = {("number", ""): trial.number}
columns = [("number", "")]
if len(trial.params) == 0 and not parsed_args.flatten:
record[("params", "")] = {}
columns.append(("params", ""))
else:
for param_name, param_value in trial.params.items():
record[("params", param_name)] = param_value
columns.append(("params", param_name))
print(_format_output(record, columns, parsed_args.format, parsed_args.flatten))
return 0
| _Ask |
python | allegroai__clearml | clearml/backend_api/services/v2_13/tasks.py | {
"start": 217070,
"end": 218918
} | class ____(Response):
"""
Response of tasks.edit endpoint.
:param updated: Number of tasks updated (0 or 1)
:type updated: int
:param fields: Updated fields names and values
:type fields: dict
"""
_service = "tasks"
_action = "edit"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"fields": {
"additionalProperties": True,
"description": "Updated fields names and values",
"type": ["object", "null"],
},
"updated": {
"description": "Number of tasks updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(self, updated: Optional[int] = None, fields: Optional[dict] = None, **kwargs: Any) -> None:
super(EditResponse, self).__init__(**kwargs)
self.updated = updated
self.fields = fields
@schema_property("updated")
def updated(self) -> Optional[int]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[int]) -> None:
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
@schema_property("fields")
def fields(self) -> Optional[dict]:
return self._property_fields
@fields.setter
def fields(self, value: Optional[dict]) -> None:
if value is None:
self._property_fields = None
return
self.assert_isinstance(value, "fields", (dict,))
self._property_fields = value
| EditResponse |
python | anthropics__anthropic-sdk-python | src/anthropic/types/beta/beta_web_fetch_tool_20250910_param.py | {
"start": 446,
"end": 1775
} | class ____(TypedDict, total=False):
name: Required[Literal["web_fetch"]]
"""Name of the tool.
This is how the tool will be called by the model and in `tool_use` blocks.
"""
type: Required[Literal["web_fetch_20250910"]]
allowed_callers: List[Literal["direct", "code_execution_20250825"]]
allowed_domains: Optional[SequenceNotStr[str]]
"""List of domains to allow fetching from"""
blocked_domains: Optional[SequenceNotStr[str]]
"""List of domains to block fetching from"""
cache_control: Optional[BetaCacheControlEphemeralParam]
"""Create a cache control breakpoint at this content block."""
citations: Optional[BetaCitationsConfigParam]
"""Citations configuration for fetched documents.
Citations are disabled by default.
"""
defer_loading: bool
"""If true, tool will not be included in initial system prompt.
Only loaded when returned via tool_reference from tool search.
"""
max_content_tokens: Optional[int]
"""Maximum number of tokens used by including web page text content in the context.
The limit is approximate and does not apply to binary content such as PDFs.
"""
max_uses: Optional[int]
"""Maximum number of times the tool can be used in the API request."""
strict: bool
| BetaWebFetchTool20250910Param |
python | tensorflow__tensorflow | tensorflow/python/distribute/cross_device_utils_test.py | {
"start": 5268,
"end": 7020
} | class ____(test.TestCase):
def testPreferLargerPack(self):
# Each packs except the last one should be equal or larger than
# bytes_per_pack.
values = [
# size = 2 * 4 * 4 * 4 = 128
array_ops.ones([2, 4, 4], dtype=dtypes.float32),
# size = 8 * 4 = 32
array_ops.ones([8], dtype=dtypes.int32),
# size = 10 * 10 * 8 = 800
array_ops.ones([10, 10], dtype=dtypes.int64),
# size = 1 * 4 = 4
array_ops.ones([1], dtype=dtypes.int32),
]
packs = cross_device_utils.group_by_size(values, bytes_per_pack=200)
self.assertLen(packs, 2)
self.assertLen(packs[0], 3)
self.assertEqual(packs[0][0].shape, [2, 4, 4])
self.assertEqual(packs[0][1].shape, [8])
self.assertEqual(packs[0][2].shape, [10, 10])
self.assertLen(packs[1], 1)
self.assertEqual(packs[1][0].shape, [1])
def testZeroBytesPerPack(self):
values = [
array_ops.ones([1], dtype=dtypes.float32),
array_ops.ones([2], dtype=dtypes.float32),
]
packs = cross_device_utils.group_by_size(values, bytes_per_pack=0)
self.assertLen(packs, 1)
self.assertLen(packs[0], 2)
self.assertEqual(packs[0][0].shape, [1])
self.assertEqual(packs[0][1].shape, [2])
def testUnknownShape(self):
def create_placeholder(shape, dtype):
with ops.Graph().as_default():
return array_ops.placeholder(dtype=dtype, shape=shape)
values = [
array_ops.ones([10, 10], dtype=dtypes.float32),
create_placeholder([None, 10], dtype=dtypes.float32),
]
packs = cross_device_utils.group_by_size(values, bytes_per_pack=1)
self.assertLen(packs, 1)
self.assertEqual(packs[0], values)
if __name__ == "__main__":
test.main()
| GroupBySizeTest |
python | pandas-dev__pandas | pandas/tests/indexing/test_coercion.py | {
"start": 815,
"end": 4384
} | class ____(CoercionBase):
method = "setitem"
# disable comprehensiveness tests, as most of these have been moved to
# tests.series.indexing.test_setitem in SetitemCastingEquivalents subclasses.
klasses: list[str] = []
def test_setitem_series_no_coercion_from_values_list(self):
# GH35865 - int casted to str when internally calling np.array(ser.values)
ser = pd.Series(["a", 1])
ser[:] = list(ser.values)
expected = pd.Series(["a", 1])
tm.assert_series_equal(ser, expected)
def _assert_setitem_index_conversion(
self, original_series, loc_key, expected_index, expected_dtype
):
"""test index's coercion triggered by assign key"""
temp = original_series.copy()
# GH#33469 pre-2.0 with int loc_key and temp.index.dtype == np.float64
# `temp[loc_key] = 5` treated loc_key as positional
temp[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
assert temp.index.dtype == expected_dtype
temp = original_series.copy()
temp.loc[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
assert temp.index.dtype == expected_dtype
@pytest.mark.parametrize(
"val,exp_dtype", [("x", object), (5, IndexError), (1.1, object)]
)
def test_setitem_index_object(self, val, exp_dtype):
obj = pd.Series([1, 2, 3, 4], index=pd.Index(list("abcd"), dtype=object))
assert obj.index.dtype == object
exp_index = pd.Index(list("abcd") + [val], dtype=object)
self._assert_setitem_index_conversion(obj, val, exp_index, exp_dtype)
@pytest.mark.parametrize(
"val,exp_dtype", [(5, np.int64), (1.1, np.float64), ("x", object)]
)
def test_setitem_index_int64(self, val, exp_dtype):
obj = pd.Series([1, 2, 3, 4])
assert obj.index.dtype == np.int64
exp_index = pd.Index([0, 1, 2, 3, val])
self._assert_setitem_index_conversion(obj, val, exp_index, exp_dtype)
@pytest.mark.parametrize(
"val,exp_dtype", [(5, np.float64), (5.1, np.float64), ("x", object)]
)
def test_setitem_index_float64(self, val, exp_dtype, request):
obj = pd.Series([1, 2, 3, 4], index=[1.1, 2.1, 3.1, 4.1])
assert obj.index.dtype == np.float64
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, val])
self._assert_setitem_index_conversion(obj, val, exp_index, exp_dtype)
@pytest.mark.xfail(reason="Test not implemented")
def test_setitem_series_period(self):
raise NotImplementedError
@pytest.mark.xfail(reason="Test not implemented")
def test_setitem_index_complex128(self):
raise NotImplementedError
@pytest.mark.xfail(reason="Test not implemented")
def test_setitem_index_bool(self):
raise NotImplementedError
@pytest.mark.xfail(reason="Test not implemented")
def test_setitem_index_datetime64(self):
raise NotImplementedError
@pytest.mark.xfail(reason="Test not implemented")
def test_setitem_index_datetime64tz(self):
raise NotImplementedError
@pytest.mark.xfail(reason="Test not implemented")
def test_setitem_index_timedelta64(self):
raise NotImplementedError
@pytest.mark.xfail(reason="Test not implemented")
def test_setitem_index_period(self):
raise NotImplementedError
| TestSetitemCoercion |
python | pytorch__pytorch | torchgen/model.py | {
"start": 13344,
"end": 14088
} | class ____(Enum):
aliasing = auto()
aliasing_inplace = auto()
non_aliasing = auto()
# The basic input to the code generation is native_functions.yaml.
# The name "native", BTW, comes from the distinction between native
# functions and legacy TH functions. The legacy TH functions are gone,
# but the "native" descriptor has stuck.
#
# NativeFunction models a single entry in native_functions.yaml. Its
# fields roughly correspond to what you would see in the YAML itself,
# but after canonicalization and parsing has occurred.
#
# You can see some of the overall design patterns for how we setup
# dataclasses in this class, but we will defer a complete discussion
# of this at FunctionSchema.
@dataclass(frozen=True)
| ViewSchemaKind |
python | huggingface__transformers | src/transformers/models/gpt_oss/modeling_gpt_oss.py | {
"start": 2964,
"end": 7418
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.intermediate_size = config.intermediate_size
self.num_experts = config.num_local_experts
self.hidden_size = config.hidden_size
self.expert_dim = self.intermediate_size
self.gate_up_proj = nn.Parameter(torch.zeros(self.num_experts, self.hidden_size, 2 * self.expert_dim))
self.gate_up_proj_bias = nn.Parameter(torch.zeros(self.num_experts, 2 * self.expert_dim))
self.down_proj = nn.Parameter(torch.empty((self.num_experts, self.expert_dim, self.hidden_size)))
self.down_proj_bias = nn.Parameter(torch.zeros(self.num_experts, self.hidden_size))
self.alpha = 1.702
self.limit = 7.0
def forward(self, hidden_states: torch.Tensor, router_indices=None, routing_weights=None) -> torch.Tensor:
"""
When training it is more efficient to just loop over the experts and compute the output for each expert
as otherwise the memory would explode.
For inference we can sacrifice some memory and compute the output for all experts at once. By repeating the inputs.
Args:
hidden_states (torch.Tensor): (batch_size, seq_len, hidden_size)
selected_experts (torch.Tensor): (batch_size * token_num, top_k)
routing_weights (torch.Tensor): (batch_size * token_num, num_experts)
Returns:
torch.Tensor
"""
batch_size = hidden_states.shape[0]
hidden_states = hidden_states.reshape(-1, self.hidden_size) # (num_tokens, hidden_size)
num_experts = routing_weights.shape[1]
if hidden_states.device.type == "cpu" or self.training:
next_states = torch.zeros_like(hidden_states, dtype=hidden_states.dtype, device=hidden_states.device)
with torch.no_grad():
expert_mask = torch.nn.functional.one_hot(
router_indices, num_classes=num_experts + 1
) # masking is also a class
expert_mask = expert_mask.permute(2, 1, 0)
# we sum on the top_k and on the sequence length to get which experts
# are hit this time around
expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero()
for expert_idx in expert_hit[:]:
# expert_idx only have 1 element, so we can use scale for fast indexing
expert_idx = expert_idx[0]
# skip masking index
if expert_idx == num_experts:
continue
with torch.no_grad():
_, token_idx = torch.where(expert_mask[expert_idx])
current_state = hidden_states[token_idx]
gate_up = current_state @ self.gate_up_proj[expert_idx] + self.gate_up_proj_bias[expert_idx]
gate, up = gate_up[..., ::2], gate_up[..., 1::2]
gate = gate.clamp(min=None, max=self.limit)
up = up.clamp(min=-self.limit, max=self.limit)
glu = gate * torch.sigmoid(gate * self.alpha)
gated_output = (up + 1) * glu
out = gated_output @ self.down_proj[expert_idx] + self.down_proj_bias[expert_idx]
weighted_output = out * routing_weights[token_idx, expert_idx, None]
next_states.index_add_(0, token_idx, weighted_output.to(hidden_states.dtype))
next_states = next_states.view(batch_size, -1, self.hidden_size)
else:
hidden_states = hidden_states.repeat(num_experts, 1)
hidden_states = hidden_states.view(num_experts, -1, self.hidden_size)
gate_up = torch.bmm(hidden_states, self.gate_up_proj) + self.gate_up_proj_bias[..., None, :]
gate, up = gate_up[..., ::2], gate_up[..., 1::2]
gate = gate.clamp(min=None, max=self.limit)
up = up.clamp(min=-self.limit, max=self.limit)
glu = gate * torch.sigmoid(gate * self.alpha)
next_states = torch.bmm(((up + 1) * glu), self.down_proj)
next_states = next_states + self.down_proj_bias[..., None, :]
next_states = next_states.view(num_experts, batch_size, -1, self.hidden_size)
next_states = next_states * routing_weights.transpose(0, 1).view(num_experts, batch_size, -1)[..., None]
next_states = next_states.sum(dim=0)
return next_states
| GptOssExperts |
python | django__django | tests/admin_ordering/models.py | {
"start": 600,
"end": 698
} | class ____(admin.StackedInline):
model = Song
ordering = ("duration",)
| SongInlineNewOrdering |
python | huggingface__transformers | src/transformers/models/qwen3_vl_moe/modular_qwen3_vl_moe.py | {
"start": 17232,
"end": 17299
} | class ____(Qwen3MoeDecoderLayer):
pass
| Qwen3VLMoeTextDecoderLayer |
python | pallets__werkzeug | examples/simplewiki/database.py | {
"start": 1697,
"end": 2483
} | class ____:
"""
Represents one revision of a page.
This is useful for editing particular revision of pages or creating
new revisions. It's also used for the diff system and the revision
log.
"""
query = session.query_property()
def __init__(self, page, text, change_note="", timestamp=None):
if isinstance(page, int):
self.page_id = page
else:
self.page = page
self.text = text
self.change_note = change_note
self.timestamp = timestamp or datetime.utcnow()
def render(self):
"""Render the page text into a genshi stream."""
return parse_creole(self.text)
def __repr__(self):
return f"<{type(self).__name__} {self.page_id!r}:{self.revision_id!r}>"
| Revision |
python | huggingface__transformers | src/transformers/models/convnext/modeling_convnext.py | {
"start": 11894,
"end": 13692
} | class ____(ConvNextPreTrainedModel):
accepts_loss_kwargs = False
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.convnext = ConvNextModel(config)
# Classifier head
if config.num_labels > 0:
self.classifier = nn.Linear(config.hidden_sizes[-1], config.num_labels)
else:
self.classifier = nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self, pixel_values: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, **kwargs
) -> ImageClassifierOutputWithNoAttention:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
outputs: BaseModelOutputWithPoolingAndNoAttention = self.convnext(pixel_values, **kwargs)
pooled_output = outputs.pooler_output
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
loss = self.loss_function(labels=labels, pooled_logits=logits, config=self.config)
return ImageClassifierOutputWithNoAttention(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
)
@auto_docstring(
custom_intro="""
ConvNeXt backbone, to be used with frameworks like DETR and MaskFormer.
"""
)
| ConvNextForImageClassification |
python | sqlalchemy__sqlalchemy | test/base/test_utils.py | {
"start": 59722,
"end": 60632
} | class ____(fixtures.TestBase):
def assert_eq(self, identityset, expected_iterable):
expected = [id(o) for o in expected_iterable]
found = [id(o) for o in identityset]
eq_(found, expected)
def test_add(self):
elem = object
s = util.OrderedIdentitySet()
s.add(elem())
s.add(elem())
def test_intersection(self):
elem = object
eq_ = self.assert_eq
a, b, c, d, e, f, g = (
elem(),
elem(),
elem(),
elem(),
elem(),
elem(),
elem(),
)
s1 = util.OrderedIdentitySet([a, b, c])
s2 = util.OrderedIdentitySet([d, e, f])
s3 = util.OrderedIdentitySet([a, d, f, g])
eq_(s1.intersection(s2), [])
eq_(s1.intersection(s3), [a])
eq_(s1.union(s2).intersection(s3), [a, d, f])
| OrderedIdentitySetTest |
python | astral-sh__uv | scripts/benchmark/src/benchmark/resolver.py | {
"start": 21743,
"end": 29470
} | class ____(Suite):
def __init__(self, *, python: str, path: str | None = None) -> None:
self.python = python
self.name = path or "pdm"
self.path = path or "pdm"
def setup(self, requirements_file: str, *, cwd: str) -> None:
"""Initialize a PDM project from a requirements file."""
import tomli
import tomli_w
from packaging.requirements import Requirement
# Parse all dependencies from the requirements file.
with open(requirements_file) as fp:
requirements = [
Requirement(line)
for line in fp
if not line.lstrip().startswith("#") and len(line.strip()) > 0
]
# Create a PDM project.
subprocess.check_call(
[self.path, "init", "--non-interactive", "--python", self.python],
cwd=cwd,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
# Parse the pyproject.toml.
with open(os.path.join(cwd, "pyproject.toml"), "rb") as fp:
pyproject = tomli.load(fp)
# Add the dependencies to the pyproject.toml.
pyproject["project"]["dependencies"] = [
str(requirement) for requirement in requirements
]
with open(os.path.join(cwd, "pyproject.toml"), "wb") as fp:
tomli_w.dump(pyproject, fp)
def resolve_cold(self, requirements_file: str, *, cwd: str) -> Command | None:
self.setup(requirements_file, cwd=cwd)
pdm_lock = os.path.join(cwd, "pdm.lock")
cache_dir = os.path.join(cwd, "cache", "pdm")
return Command(
name=f"{self.name} ({Benchmark.RESOLVE_COLD.value})",
prepare=f"rm -rf {cache_dir} && rm -rf {pdm_lock} && {self.path} config cache_dir {cache_dir}",
command=[
self.path,
"lock",
"--project",
cwd,
],
)
def resolve_warm(self, requirements_file: str, *, cwd: str) -> Command | None:
self.setup(requirements_file, cwd=cwd)
pdm_lock = os.path.join(cwd, "pdm.lock")
cache_dir = os.path.join(cwd, "cache", "pdm")
return Command(
name=f"{self.name} ({Benchmark.RESOLVE_WARM.value})",
prepare=f"rm -rf {pdm_lock} && {self.path} config cache_dir {cache_dir}",
command=[
self.path,
"lock",
"--project",
cwd,
],
)
def resolve_incremental(
self, requirements_file: str, *, cwd: str
) -> Command | None:
import tomli
import tomli_w
self.setup(requirements_file, cwd=cwd)
pdm_lock = os.path.join(cwd, "pdm.lock")
assert not os.path.exists(pdm_lock), f"Lockfile already exists at: {pdm_lock}"
# Run a resolution, to ensure that the lockfile exists.
# TODO(charlie): Make this a `setup`.
subprocess.check_call(
[self.path, "lock"],
cwd=cwd,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
assert os.path.exists(pdm_lock), f"Lockfile doesn't exist at: {pdm_lock}"
# Add a dependency to the requirements file.
with open(os.path.join(cwd, "pyproject.toml"), "rb") as fp:
pyproject = tomli.load(fp)
# Add the dependencies to the pyproject.toml.
pyproject["project"]["dependencies"] += [INCREMENTAL_REQUIREMENT]
with open(os.path.join(cwd, "pyproject.toml"), "wb") as fp:
tomli_w.dump(pyproject, fp)
# Store the baseline lockfile.
baseline = os.path.join(cwd, "baseline.lock")
shutil.copyfile(pdm_lock, baseline)
pdm_lock = os.path.join(cwd, "pdm.lock")
cache_dir = os.path.join(cwd, "cache", "pdm")
return Command(
name=f"{self.name} ({Benchmark.RESOLVE_INCREMENTAL.value})",
prepare=f"rm -f {pdm_lock} && cp {baseline} {pdm_lock} && {self.path} config cache_dir {cache_dir}",
command=[
self.path,
"lock",
"--update-reuse",
"--project",
cwd,
],
)
def resolve_noop(self, requirements_file: str, *, cwd: str) -> Command | None:
self.setup(requirements_file, cwd=cwd)
pdm_lock = os.path.join(cwd, "pdm.lock")
assert not os.path.exists(pdm_lock), f"Lockfile already exists at: {pdm_lock}"
# Run a resolution, to ensure that the lockfile exists.
# TODO(charlie): Make this a `setup`.
subprocess.check_call(
[self.path, "lock"],
cwd=cwd,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
assert os.path.exists(pdm_lock), f"Lockfile doesn't exist at: {pdm_lock}"
cache_dir = os.path.join(cwd, "cache", "pdm")
return Command(
name=f"{self.name} ({Benchmark.RESOLVE_NOOP.value})",
prepare=f"{self.path} config cache_dir {cache_dir}",
command=[
self.path,
"lock",
"--update-reuse",
"--project",
cwd,
],
)
def install_cold(self, requirements_file: str, *, cwd: str) -> Command | None:
self.setup(requirements_file, cwd=cwd)
pdm_lock = os.path.join(cwd, "pdm.lock")
assert not os.path.exists(pdm_lock), f"Lockfile already exists at: {pdm_lock}"
# Run a resolution, to ensure that the lockfile exists.
# TODO(charlie): Make this a `setup`.
subprocess.check_call(
[self.path, "lock"],
cwd=cwd,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
assert os.path.exists(pdm_lock), f"Lockfile doesn't exist at: {pdm_lock}"
venv_dir = os.path.join(cwd, ".venv")
cache_dir = os.path.join(cwd, "cache", "pdm")
return Command(
name=f"{self.name} ({Benchmark.INSTALL_COLD.value})",
prepare=(
f"rm -rf {cache_dir} && "
f"{self.path} config cache_dir {cache_dir} && "
f"virtualenv --clear -p {self.python} {venv_dir} --no-seed"
),
command=[
f"VIRTUAL_ENV={venv_dir}",
self.path,
"sync",
"--project",
cwd,
],
)
def install_warm(self, requirements_file: str, *, cwd: str) -> Command | None:
self.setup(requirements_file, cwd=cwd)
pdm_lock = os.path.join(cwd, "pdm.lock")
assert not os.path.exists(pdm_lock), f"Lockfile already exists at: {pdm_lock}"
# Run a resolution, to ensure that the lockfile exists.
# TODO(charlie): Make this a `setup`.
subprocess.check_call(
[self.path, "lock"],
cwd=cwd,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
assert os.path.exists(pdm_lock), f"Lockfile doesn't exist at: {pdm_lock}"
venv_dir = os.path.join(cwd, ".venv")
cache_dir = os.path.join(cwd, "cache", "pdm")
return Command(
name=f"{self.name} ({Benchmark.INSTALL_WARM.value})",
prepare=(
f"{self.path} config cache_dir {cache_dir} && "
f"virtualenv --clear -p {self.python} {venv_dir} --no-seed"
),
command=[
f"VIRTUAL_ENV={venv_dir}",
self.path,
"sync",
"--project",
cwd,
],
)
| Pdm |
python | PyCQA__pydocstyle | src/pydocstyle/parser.py | {
"start": 9306,
"end": 9996
} | class ____(Exception):
"""Raised when there is a problem with __all__ when parsing."""
def __init__(self, message):
"""Initialize the error with a more specific message."""
Exception.__init__(
self,
message
+ textwrap.dedent(
"""
That means pydocstyle cannot decide which definitions are
public. Variable __all__ should be present at most once in
each file, in form
`__all__ = ('a_public_function', 'APublicClass', ...)`.
More info on __all__: http://stackoverflow.com/q/44834/. ')
"""
),
)
| AllError |
python | django__django | django/db/models/expressions.py | {
"start": 72414,
"end": 72635
} | class ____(Enum):
CURRENT_ROW = "CURRENT ROW"
GROUP = "GROUP"
TIES = "TIES"
NO_OTHERS = "NO OTHERS"
def __repr__(self):
return f"{self.__class__.__qualname__}.{self._name_}"
| WindowFrameExclusion |
python | celery__celery | celery/utils/log.py | {
"start": 3221,
"end": 5131
} | class ____(logging.Formatter):
"""Logging formatter that adds colors based on severity."""
#: Loglevel -> Color mapping.
COLORS = colored().names
colors = {
'DEBUG': COLORS['blue'],
'WARNING': COLORS['yellow'],
'ERROR': COLORS['red'],
'CRITICAL': COLORS['magenta'],
}
def __init__(self, fmt=None, use_color=True):
super().__init__(fmt)
self.use_color = use_color
def formatException(self, ei):
if ei and not isinstance(ei, tuple):
ei = sys.exc_info()
r = super().formatException(ei)
return r
def format(self, record):
msg = super().format(record)
color = self.colors.get(record.levelname)
# reset exception info later for other handlers...
einfo = sys.exc_info() if record.exc_info == 1 else record.exc_info
if color and self.use_color:
try:
# safe_str will repr the color object
# and color will break on non-string objects
# so need to reorder calls based on type.
# Issue #427
try:
if isinstance(msg, str):
return str(color(safe_str(msg)))
return safe_str(color(msg))
except UnicodeDecodeError: # pragma: no cover
return safe_str(msg) # skip colors
except Exception as exc: # pylint: disable=broad-except
prev_msg, record.exc_info, record.msg = (
record.msg, 1, '<Unrepresentable {!r}: {!r}>'.format(
type(msg), exc
),
)
try:
return super().format(record)
finally:
record.msg, record.exc_info = prev_msg, einfo
else:
return safe_str(msg)
| ColorFormatter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.