language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | django-compressor__django-compressor | compressor/tests/test_offline.py | {
"start": 23396,
"end": 24398
} | class ____(OfflineTestCaseMixin, TestCase):
templates_dir = "test_error_handling"
def _test_offline(self, engine, verbosity=0):
count, result = CompressCommand().handle_inner(
engines=[engine], verbosity=verbosity
)
if engine == "django":
self.assertEqual(2, count)
else:
# Because we use env.parse in Jinja2Parser, the engine does not
# actually load the 'extends' and 'includes' templates, and so
# it is unable to detect that they are missing. So all the
# 'compress' nodes are processed correctly.
self.assertEqual(4, count)
self.assertEqual(engine, "jinja2")
self.assertIn(self._render_link("187e2ce75808"), result)
self.assertIn(self._render_link("fffafcdf428e"), result)
self.assertIn(self._render_script("eeabdac29232"), result)
self.assertIn(self._render_script("9a7f06880ce3"), result)
| OfflineCompressTestCaseErrors |
python | kubernetes-client__python | kubernetes/client/models/v1_deployment_strategy.py | {
"start": 383,
"end": 4426
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'rolling_update': 'V1RollingUpdateDeployment',
'type': 'str'
}
attribute_map = {
'rolling_update': 'rollingUpdate',
'type': 'type'
}
def __init__(self, rolling_update=None, type=None, local_vars_configuration=None): # noqa: E501
"""V1DeploymentStrategy - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._rolling_update = None
self._type = None
self.discriminator = None
if rolling_update is not None:
self.rolling_update = rolling_update
if type is not None:
self.type = type
@property
def rolling_update(self):
"""Gets the rolling_update of this V1DeploymentStrategy. # noqa: E501
:return: The rolling_update of this V1DeploymentStrategy. # noqa: E501
:rtype: V1RollingUpdateDeployment
"""
return self._rolling_update
@rolling_update.setter
def rolling_update(self, rolling_update):
"""Sets the rolling_update of this V1DeploymentStrategy.
:param rolling_update: The rolling_update of this V1DeploymentStrategy. # noqa: E501
:type: V1RollingUpdateDeployment
"""
self._rolling_update = rolling_update
@property
def type(self):
"""Gets the type of this V1DeploymentStrategy. # noqa: E501
Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate. # noqa: E501
:return: The type of this V1DeploymentStrategy. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this V1DeploymentStrategy.
Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate. # noqa: E501
:param type: The type of this V1DeploymentStrategy. # noqa: E501
:type: str
"""
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1DeploymentStrategy):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1DeploymentStrategy):
return True
return self.to_dict() != other.to_dict()
| V1DeploymentStrategy |
python | has2k1__plotnine | plotnine/geoms/geom_map.py | {
"start": 783,
"end": 9602
} | class ____(geom):
"""
Draw map feature
{usage}
The map feature are drawn without any special projections.
Parameters
----------
{common_parameters}
Notes
-----
This geom is best suited for plotting a shapefile read into
geopandas dataframe. The dataframe should have a `geometry`
column.
"""
DEFAULT_AES = {
"alpha": 1,
"color": "#111111",
"fill": "#333333",
"linetype": "solid",
"shape": "o",
"size": 0.5,
"stroke": 0.5,
}
DEFAULT_PARAMS = {
"stat": "identity",
"position": "identity",
"na_rm": False,
}
REQUIRED_AES = {"geometry"}
def __init__(
self,
mapping: aes | None = None,
data: DataLike | None = None,
**kwargs: Any,
):
geom.__init__(self, mapping, data, **kwargs)
# Almost all geodataframes loaded from shapefiles
# have a geometry column.
if "geometry" not in self.mapping:
self.mapping["geometry"] = "geometry"
def setup_data(self, data: pd.DataFrame) -> pd.DataFrame:
if not len(data):
return data
# Remove any NULL geometries, and remember
# All the non-Null shapes in a shapefile are required to be
# of the same shape type.
bool_idx = np.array([g is not None for g in data["geometry"]])
if not np.all(bool_idx):
data = data.loc[bool_idx]
# Add polygon limits. Scale training uses them
try:
bounds = data["geometry"].bounds
except AttributeError:
# The geometry is not a GeoSeries
# Bounds calculation is extracted from
# geopandas.base.GeoPandasBase.bounds
bounds = pd.DataFrame(
np.array([x.bounds for x in data["geometry"]]),
columns=["xmin", "ymin", "xmax", "ymax"],
index=data.index,
)
else:
bounds.rename(
columns={
"minx": "xmin",
"maxx": "xmax",
"miny": "ymin",
"maxy": "ymax",
},
inplace=True,
)
data = pd.concat([data, bounds], axis=1)
return data
def draw_panel(
self,
data: pd.DataFrame,
panel_params: panel_view,
coord: coord,
ax: Axes,
):
if not len(data):
return
params = self.params
data.loc[data["color"].isna(), "color"] = "none"
data.loc[data["fill"].isna(), "fill"] = "none"
data["fill"] = to_rgba(data["fill"], data["alpha"])
geom_type = data.geometry.iloc[0].geom_type
if geom_type in ("Polygon", "MultiPolygon"):
from matplotlib.collections import PatchCollection
linewidth = data["size"] * SIZE_FACTOR
patches = [PolygonPatch(g) for g in data["geometry"]]
coll = PatchCollection(
patches,
edgecolor=data["color"],
facecolor=data["fill"],
linestyle=data["linetype"],
linewidth=linewidth,
zorder=params["zorder"],
rasterized=params["raster"],
)
ax.add_collection(coll)
elif geom_type == "Point":
# Extract point coordinates from shapely geom
# and plot with geom_point
arr = np.array([list(g.coords)[0] for g in data["geometry"]])
data["x"] = arr[:, 0]
data["y"] = arr[:, 1]
for _, gdata in data.groupby("group"):
gdata.reset_index(inplace=True, drop=True)
gdata.is_copy = None
geom_point.draw_group(gdata, panel_params, coord, ax, params)
elif geom_type == "MultiPoint":
# Where n is the length of the dataframe (no. of multipoints),
# m is the number of all points in all multipoints
#
# - MultiPoint -> List of Points (tuples) (n -> m)
# - Explode the list, to create a dataframe were each point
# is associated with the right aesthetics (n -> m)
# - Create x & y columns from the points (m -> m)
data["points"] = [
[p.coords[0] for p in mp.geoms] for mp in data["geometry"]
]
data = data.explode("points", ignore_index=True)
data["x"] = [p[0] for p in data["points"]]
data["y"] = [p[1] for p in data["points"]]
geom_point.draw_group(data, panel_params, coord, ax, params)
elif geom_type in ("LineString", "MultiLineString"):
from matplotlib.collections import LineCollection
linewidth = data["size"] * SIZE_FACTOR
data["color"] = to_rgba(data["color"], data["alpha"])
segments = []
for g in data["geometry"]:
if g.geom_type == "LineString":
segments.append(g.coords)
else:
segments.extend(_g.coords for _g in g.geoms)
coll = LineCollection(
segments,
edgecolor=data["color"],
linewidth=linewidth,
linestyle=data["linetype"],
zorder=params["zorder"],
rasterized=params["raster"],
)
ax.add_collection(coll)
else:
raise TypeError(f"Could not plot geometry of type '{geom_type}'")
@staticmethod
def draw_legend(
data: pd.Series[Any], da: DrawingArea, lyr: layer
) -> DrawingArea:
"""
Draw a rectangle in the box
Parameters
----------
data : Series
Data Row
da : DrawingArea
Canvas
lyr : layer
Layer
Returns
-------
out : DrawingArea
"""
data["size"] = data["stroke"]
del data["stroke"]
return geom_polygon.draw_legend(data, da, lyr)
def PolygonPatch(
obj: Polygon,
) -> PathPatch:
"""
Return a Matplotlib patch from a Polygon/MultiPolygon Geometry
Parameters
----------
obj : shapley.geometry.Polygon | shapley.geometry.MultiPolygon
A Polygon or MultiPolygon to create a patch for description
Returns
-------
result : matplotlib.patches.PathPatch
A patch representing the shapely geometry
Notes
-----
This functionality was originally provided by the descartes package
by Sean Gillies (BSD license, https://pypi.org/project/descartes)
which is nolonger being maintained.
"""
from matplotlib.patches import PathPatch
from matplotlib.path import Path
def cw_coords(ring: LinearRing) -> npt.NDArray[Any]:
"""
Return Clockwise array coordinates
Parameters
----------
ring: shapely.geometry.polygon.LinearRing
LinearRing
Returns
-------
out: ndarray
(n x 2) array of coordinate points.
"""
if ring.is_ccw:
return np.asarray(ring.coords)[:, :2][::-1]
return np.asarray(ring.coords)[:, :2]
def ccw_coords(ring: LinearRing) -> npt.NDArray[Any]:
"""
Return Counter Clockwise array coordinates
Parameters
----------
ring: shapely.geometry.polygon.LinearRing
LinearRing
Returns
-------
out: ndarray
(n x 2) array of coordinate points.
"""
if ring.is_ccw:
return np.asarray(ring.coords)[:, :2]
return np.asarray(ring.coords)[:, :2][::-1]
# The interiors are holes in the Polygon
# MPL draws a hole if the vertex points are specified
# in an opposite direction. So we use Clockwise for
# the exterior/shell and Counter-Clockwise for any
# interiors/holes
if obj.geom_type == "Polygon":
_exterior = [Path(cw_coords(obj.exterior))]
_interior = [Path(ccw_coords(ring)) for ring in obj.interiors]
else:
# A MultiPolygon has one or more Polygon geoms.
# Concatenate the exterior of all the Polygons
# and the interiors
_exterior = []
_interior = []
for p in obj.geoms: # type: ignore
_exterior.append(Path(cw_coords(p.exterior)))
_interior.extend([Path(ccw_coords(ring)) for ring in p.interiors])
path = Path.make_compound_path(*_exterior, *_interior)
return PathPatch(path)
def check_geopandas():
try:
import geopandas # noqa: F401
except ImportError as e:
msg = "geom_map requires geopandas. Please install geopandas."
raise PlotnineError(msg) from e
| geom_map |
python | ray-project__ray | python/ray/util/state/common.py | {
"start": 1870,
"end": 2149
} | class ____(Enum):
ACTORS = "actors"
JOBS = "jobs"
PLACEMENT_GROUPS = "placement_groups"
NODES = "nodes"
WORKERS = "workers"
TASKS = "tasks"
OBJECTS = "objects"
RUNTIME_ENVS = "runtime_envs"
CLUSTER_EVENTS = "cluster_events"
@unique
| StateResource |
python | google__flatbuffers | tests/MyGame/Example/Stat.py | {
"start": 176,
"end": 2220
} | class ____(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Stat()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsStat(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def StatBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4D\x4F\x4E\x53", size_prefixed=size_prefixed)
# Stat
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Stat
def Id(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# Stat
def Val(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos)
return 0
# Stat
def Count(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint16Flags, o + self._tab.Pos)
return 0
def StatStart(builder):
builder.StartObject(3)
def Start(builder):
StatStart(builder)
def StatAddId(builder, id):
builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(id), 0)
def AddId(builder, id):
StatAddId(builder, id)
def StatAddVal(builder, val):
builder.PrependInt64Slot(1, val, 0)
def AddVal(builder, val):
StatAddVal(builder, val)
def StatAddCount(builder, count):
builder.PrependUint16Slot(2, count, 0)
def AddCount(builder, count):
StatAddCount(builder, count)
def StatEnd(builder):
return builder.EndObject()
def End(builder):
return StatEnd(builder)
| Stat |
python | pandas-dev__pandas | pandas/tests/indexes/period/test_constructors.py | {
"start": 393,
"end": 3503
} | class ____:
@pytest.mark.parametrize(
"freq,freq_depr",
[
("2M", "2ME"),
("2Q-MAR", "2QE-MAR"),
("2Y-FEB", "2YE-FEB"),
("2M", "2me"),
("2Q-MAR", "2qe-MAR"),
("2Y-FEB", "2yE-feb"),
],
)
def test_period_index_offsets_frequency_error_message(self, freq, freq_depr):
# GH#52064
msg = f"Invalid frequency: {freq_depr}"
with pytest.raises(ValueError, match=msg):
PeriodIndex(["2020-01-01", "2020-01-02"], freq=freq_depr)
with pytest.raises(ValueError, match=msg):
period_range(start="2020-01-01", end="2020-01-02", freq=freq_depr)
@pytest.mark.parametrize(
"freq",
["2SME", "2sme", "2BYE", "2Bye", "2CBME"],
)
def test_period_index_frequency_invalid_freq(self, freq):
# GH#9586
msg = f"Invalid frequency: {freq}"
with pytest.raises(ValueError, match=msg):
period_range("2020-01", "2020-05", freq=freq)
with pytest.raises(ValueError, match=msg):
PeriodIndex(["2020-01", "2020-05"], freq=freq)
@pytest.mark.parametrize("freq", ["2BQE-SEP", "2BYE-MAR", "2BME"])
def test_period_index_from_datetime_index_invalid_freq(self, freq):
# GH#56899
msg = f"Invalid frequency: {freq}"
rng = date_range("01-Jan-2012", periods=8, freq=freq)
with pytest.raises(ValueError, match=msg):
rng.to_period()
@pytest.mark.parametrize("freq_depr", ["2T", "1l", "2U", "n"])
def test_period_index_T_L_U_N_raises(self, freq_depr):
# GH#9586
msg = f"Invalid frequency: {freq_depr}"
with pytest.raises(ValueError, match=msg):
period_range("2020-01", "2020-05", freq=freq_depr)
with pytest.raises(ValueError, match=msg):
PeriodIndex(["2020-01", "2020-05"], freq=freq_depr)
@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")
@pytest.mark.filterwarnings("ignore:Period with BDay freq:FutureWarning")
@pytest.mark.parametrize(
"freq,freq_depr",
[("2W", "2w"), ("2W-FRI", "2w-fri"), ("2D", "2d"), ("2B", "2b")],
)
def test_period_index_depr_lowercase_frequency(self, freq, freq_depr):
# GH#58998
msg = (
f"'{freq_depr[1:]}' is deprecated and will be removed in a future version."
)
with tm.assert_produces_warning(
Pandas4Warning, match=msg, raise_on_extra_warnings=False
):
result = PeriodIndex(["2020-01-01", "2020-01-02"], freq=freq_depr)
expected = PeriodIndex(["2020-01-01", "2020-01-02"], freq=freq)
tm.assert_index_equal(result, expected)
with tm.assert_produces_warning(
Pandas4Warning, match=msg, raise_on_extra_warnings=False
):
result = period_range(start="2020-01-01", end="2020-01-02", freq=freq_depr)
expected = period_range(start="2020-01-01", end="2020-01-02", freq=freq)
tm.assert_index_equal(result, expected)
| TestPeriodIndexDisallowedFreqs |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 276896,
"end": 278245
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"current_user_can_approve",
"environment",
"reviewers",
"wait_timer",
"wait_timer_started_at",
)
current_user_can_approve = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="currentUserCanApprove"
)
environment = sgqlc.types.Field(
sgqlc.types.non_null("Environment"), graphql_name="environment"
)
reviewers = sgqlc.types.Field(
sgqlc.types.non_null("DeploymentReviewerConnection"),
graphql_name="reviewers",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
wait_timer = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="waitTimer")
wait_timer_started_at = sgqlc.types.Field(
DateTime, graphql_name="waitTimerStartedAt"
)
| DeploymentRequest |
python | doocs__leetcode | solution/2800-2899/2802.Find The K-th Lucky Number/Solution.py | {
"start": 0,
"end": 363
} | class ____:
def kthLuckyNumber(self, k: int) -> str:
n = 1
while k > 1 << n:
k -= 1 << n
n += 1
ans = []
while n:
n -= 1
if k <= 1 << n:
ans.append("4")
else:
ans.append("7")
k -= 1 << n
return "".join(ans)
| Solution |
python | ansible__ansible | lib/ansible/_internal/_ssh/_ssh_agent.py | {
"start": 15155,
"end": 15377
} | class ____(PublicKeyMsg):
type: KeyAlgo
e: mpint
n: mpint
comments: unicode_string = dataclasses.field(default=unicode_string(''), compare=False)
@dataclasses.dataclass(order=True, slots=True)
| RSAPublicKeyMsg |
python | pytest-dev__pytest | scripts/prepare-release-pr.py | {
"start": 812,
"end": 4989
} | class ____(Exception):
pass
SLUG = "pytest-dev/pytest"
PR_BODY = """\
Created by the [prepare release pr]\
(https://github.com/pytest-dev/pytest/actions/workflows/prepare-release-pr.yml) workflow.
Once all builds pass and it has been **approved** by one or more maintainers, start the \
[deploy](https://github.com/pytest-dev/pytest/actions/workflows/deploy.yml) workflow, using these parameters:
* `Use workflow from`: `release-{version}`.
* `Release version`: `{version}`.
Or execute on the command line:
```console
gh workflow run deploy.yml -r release-{version} -f version={version}
```
After the workflow has been approved by a core maintainer, the package will be uploaded to PyPI automatically.
"""
def prepare_release_pr(base_branch: str, is_major: bool, prerelease: str) -> None:
print()
print(f"Processing release for branch {Fore.CYAN}{base_branch}")
check_call(["git", "checkout", f"origin/{base_branch}"])
changelog = Path("changelog")
features = list(changelog.glob("*.feature.rst"))
breaking = list(changelog.glob("*.breaking.rst"))
is_feature_release = bool(features or breaking)
try:
version = find_next_version(
base_branch, is_major, is_feature_release, prerelease
)
except InvalidFeatureRelease as e:
print(f"{Fore.RED}{e}")
raise SystemExit(1) from None
print(f"Version: {Fore.CYAN}{version}")
release_branch = f"release-{version}"
run(
["git", "config", "user.name", "pytest bot"],
check=True,
)
run(
["git", "config", "user.email", "pytestbot@gmail.com"],
check=True,
)
run(
["git", "checkout", "-b", release_branch, f"origin/{base_branch}"],
check=True,
)
print(f"Branch {Fore.CYAN}{release_branch}{Fore.RESET} created.")
if is_major:
template_name = "release.major.rst"
elif prerelease:
template_name = "release.pre.rst"
elif is_feature_release:
template_name = "release.minor.rst"
else:
template_name = "release.patch.rst"
# important to use tox here because we have changed branches, so dependencies
# might have changed as well
cmdline = [
"tox",
"-e",
"release",
"--",
version,
template_name,
release_branch, # doc_version
"--skip-check-links",
]
print("Running", " ".join(cmdline))
run(
cmdline,
check=True,
)
run(
["git", "push", "origin", f"HEAD:{release_branch}", "--force"],
check=True,
)
print(f"Branch {Fore.CYAN}{release_branch}{Fore.RESET} pushed.")
body = PR_BODY.format(version=version)
run(
[
"gh",
"pr",
"create",
f"--base={base_branch}",
f"--head={release_branch}",
f"--title=Release {version}",
f"--body={body}",
"--draft",
],
check=True,
)
def find_next_version(
base_branch: str, is_major: bool, is_feature_release: bool, prerelease: str
) -> str:
output = check_output(["git", "tag"], encoding="UTF-8")
valid_versions = []
for v in output.splitlines():
m = re.match(r"\d.\d.\d+$", v.strip())
if m:
valid_versions.append(tuple(int(x) for x in v.split(".")))
valid_versions.sort()
last_version = valid_versions[-1]
if is_major:
return f"{last_version[0] + 1}.0.0{prerelease}"
elif is_feature_release:
return f"{last_version[0]}.{last_version[1] + 1}.0{prerelease}"
else:
return f"{last_version[0]}.{last_version[1]}.{last_version[2] + 1}{prerelease}"
def main() -> None:
init(autoreset=True)
parser = argparse.ArgumentParser()
parser.add_argument("base_branch")
parser.add_argument("--major", action="store_true", default=False)
parser.add_argument("--prerelease", default="")
options = parser.parse_args()
prepare_release_pr(
base_branch=options.base_branch,
is_major=options.major,
prerelease=options.prerelease,
)
if __name__ == "__main__":
main()
| InvalidFeatureRelease |
python | doocs__leetcode | lcci/08.12.Eight Queens/Solution.py | {
"start": 0,
"end": 682
} | class ____:
def solveNQueens(self, n: int) -> List[List[str]]:
def dfs(i: int):
if i == n:
ans.append(["".join(row) for row in g])
return
for j in range(n):
if col[j] + dg[i + j] + udg[n - i + j] == 0:
g[i][j] = "Q"
col[j] = dg[i + j] = udg[n - i + j] = 1
dfs(i + 1)
col[j] = dg[i + j] = udg[n - i + j] = 0
g[i][j] = "."
ans = []
g = [["."] * n for _ in range(n)]
col = [0] * n
dg = [0] * (n << 1)
udg = [0] * (n << 1)
dfs(0)
return ans
| Solution |
python | wandb__wandb | wandb/vendor/pygments/lexers/modeling.py | {
"start": 6911,
"end": 10085
} | class ____(RegexLexer):
"""
Pygments Lexer for JAGS.
.. versionadded:: 1.6
"""
name = 'JAGS'
aliases = ['jags']
filenames = ['*.jag', '*.bug']
# JAGS
_FUNCTIONS = (
'abs', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctanh',
'cos', 'cosh', 'cloglog',
'equals', 'exp', 'icloglog', 'ifelse', 'ilogit', 'log', 'logfact',
'loggam', 'logit', 'phi', 'pow', 'probit', 'round', 'sin', 'sinh',
'sqrt', 'step', 'tan', 'tanh', 'trunc', 'inprod', 'interp.lin',
'logdet', 'max', 'mean', 'min', 'prod', 'sum', 'sd', 'inverse',
'rank', 'sort', 't', 'acos', 'acosh', 'asin', 'asinh', 'atan',
# Truncation/Censoring (should I include)
'T', 'I')
# Distributions with density, probability and quartile functions
_DISTRIBUTIONS = tuple('[dpq]%s' % x for x in
('bern', 'beta', 'dchiqsqr', 'ddexp', 'dexp',
'df', 'gamma', 'gen.gamma', 'logis', 'lnorm',
'negbin', 'nchisqr', 'norm', 'par', 'pois', 'weib'))
# Other distributions without density and probability
_OTHER_DISTRIBUTIONS = (
'dt', 'dunif', 'dbetabin', 'dbern', 'dbin', 'dcat', 'dhyper',
'ddirch', 'dmnorm', 'dwish', 'dmt', 'dmulti', 'dbinom', 'dchisq',
'dnbinom', 'dweibull', 'ddirich')
tokens = {
'whitespace': [
(r"\s+", Text),
],
'names': [
# Regular variable names
(r'[a-zA-Z][\w.]*\b', Name),
],
'comments': [
# do not use stateful comments
(r'(?s)/\*.*?\*/', Comment.Multiline),
# Comments
(r'#.*$', Comment.Single),
],
'root': [
# Comments
include('comments'),
include('whitespace'),
# Block start
(r'(model|data)(\s+)(\{)',
bygroups(Keyword.Namespace, Text, Punctuation)),
(r'var(?![\w.])', Keyword.Declaration),
# Reserved Words
(r'(for|in)(?![\w.])', Keyword.Reserved),
# Builtins
# Need to use lookahead because . is a valid char
(r'(%s)(?=\s*\()' % r'|'.join(_FUNCTIONS
+ _DISTRIBUTIONS
+ _OTHER_DISTRIBUTIONS),
Name.Builtin),
# Names
include('names'),
# Number Literals
(r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', Number),
(r'\[|\]|\(|\)|:|,|;', Punctuation),
# Assignment operators
(r'<-|~', Operator),
# # JAGS includes many more than OpenBUGS
(r'\+|-|\*|\/|\|\|[&]{2}|[<>=]=?|\^|%.*?%', Operator),
(r'[{}]', Punctuation),
]
}
def analyse_text(text):
if re.search(r'^\s*model\s*\{', text, re.M):
if re.search(r'^\s*data\s*\{', text, re.M):
return 0.9
elif re.search(r'^\s*var', text, re.M):
return 0.9
else:
return 0.3
else:
return 0
| JagsLexer |
python | pytorch__pytorch | torch/_inductor/compile_fx_ext.py | {
"start": 12656,
"end": 18908
} | class ____(FxCompile):
"""
This is used to represent an FxCompile which occurs across a serialized
boundary.
"""
@override
def codegen_and_compile(
self,
gm: GraphModule,
example_inputs: Sequence[InputType],
inputs_to_check: Sequence[int],
graph_kwargs: _CompileFxKwargs,
) -> OutputCode:
# If this code changes it's likely _AsyncFxCompile.codegen_and_compile()
# will also need to match.
serialized = self.serialize_compile(
gm, example_inputs, inputs_to_check, graph_kwargs
)
if not serialized:
return _InProcessFxCompile().codegen_and_compile(
gm, example_inputs, inputs_to_check, graph_kwargs
)
inputs, constants = serialized
output = self._send_to_child(inputs).deserialize(constants)
self._postprocess(output)
self._compile_stats[type(self)].codegen_and_compile += 1
# TODO: Do we need to figure out what changed in TracingContext in the
# child and plumb that back up to the parent?
return output.graph
def serialize_compile(
self,
gm: GraphModule,
example_inputs: Sequence[InputType],
inputs_to_check: Sequence[int],
graph_kwargs: _CompileFxKwargs,
) -> Optional[tuple[_WireProtocolPickledInput, CompiledFxGraphConstantsWithGm]]:
"""
Prepare a _WireProtocolInput to compile. If None is returned then it
wasn't possible to serialize and we should fallback to in-process.
"""
try:
# _check_for_hop raises BypassFxGraphCache when it detects something
# we can't cache (or serialize)
FxGraphCache._check_for_hop(gm)
except BypassFxGraphCache as e:
log.debug("Skipping %s compile: %s", type(self), e) # noqa: G200
return None
context = torch._guards.TracingContext.try_get()
constants = CompiledFxGraphConstantsWithGm(gm)
logger_state = _LoggerState()
lowering = _LoweringSerializer()
# If we're running tests then grab the DeterministicGuard (don't want to
# import this if it isn't already imported because it has side-effects)
deterministic_guard_for_testing: Optional[ # type: ignore[name-defined] # mypy bug
torch.testing._internal.common_utils.DeterministicGuard
] = None
try:
deterministic_guard_for_testing = (
torch.testing._internal.common_utils.DeterministicGuard._current_state() # type: ignore[attr-defined] # mypy bug
)
except AttributeError:
pass
fake_mode = _current_fake_mode()
fake_tensor_mode = _FakeTensorModeSerializer(fake_mode)
from pickle import PicklingError
try:
input = _WireProtocolInput(
gm,
example_inputs,
inputs_to_check,
graph_kwargs,
context,
config.save_config_portable(),
_VirtualizedSerializer.serialize(),
deterministic_guard_for_testing,
logger_state,
lowering,
fake_tensor_mode,
).serialize()
return (input, constants)
except (AttributeError, BypassFxGraphCache, PicklingError):
# For example: AttributeError: Can't pickle local object
# 'make_opaque_unary_fn.<locals>.OpaqueUnaryFn'
# TODO: scuba record about not being able to do this?
log.warning("Unable to pickle input graph or example inputs", exc_info=True)
return None
@abstractmethod
def _send_to_child(
self, pickled_input: _WireProtocolPickledInput
) -> _WireProtocolPickledOutput:
# The implementation of this should transfer `input` to the child, call
# `_run_in_child(input)` and transfer the result back.
...
def _postprocess(self, output: _WireProtocolOutput) -> None:
pass
@classmethod
def _run_in_child(
cls,
pickled_input: _WireProtocolPickledInput,
extra_env: Optional[Mapping[str, str]] = None,
) -> _WireProtocolPickledOutput:
metrics = CachedMetricsHelper()
with contextlib.ExitStack() as stack:
if extra_env is not None:
import unittest
stack.enter_context(unittest.mock.patch.dict("os.environ", extra_env))
# Save warnings to "replay" in the parent
warning_replay = stack.enter_context(warnings.catch_warnings(record=True))
# TODO: Should we split the input into multiple sections where each
# section sets up state for the previous section? (i.e. a Config section
# which we decode and apply, followed by a FakeTensorMode section which
# we decode and apply, etc)
input = pickled_input.deserialize()
stack.enter_context(input.virtualized.patch())
stack.enter_context(input.lowering.patch())
stack.enter_context(config.patch(input.config))
captured_logs = stack.enter_context(input.logger_state)
if input.deterministic_guard_for_testing:
stack.enter_context(input.deterministic_guard_for_testing)
stack.enter_context(torch._guards.tracing(input.tracing_context))
stack.enter_context(DebugContext())
fake_mode = _current_fake_mode()
stack.enter_context(input.fake_tensor_mode.patch(fake_mode))
output_graph = _InProcessFxCompile().codegen_and_compile(
input.gm,
input.example_inputs,
input.inputs_to_check,
input.graph_kwargs,
)
logs = captured_logs.finish()
return _WireProtocolOutput(
output_graph,
metrics.get_deltas(),
logs,
warning_replay,
fake_mode.shape_env,
).serialize()
# This is a debugging/testing implementation of FxCompile which serializes the
# input and output but still runs the FxCompile in-process.
@final
| _SerializedFxCompile |
python | sqlalchemy__sqlalchemy | test/dialect/postgresql/test_types.py | {
"start": 204107,
"end": 209549
} | class ____(AssertsCompiledSQL, fixtures.TestBase):
__dialect__ = "postgresql"
def setup_test(self):
metadata = MetaData()
self.test_table = Table(
"test_table",
metadata,
Column("id", Integer, primary_key=True),
Column("test_column", JSON),
)
self.jsoncol = self.test_table.c.test_column
@property
def any_(self):
return any_(array([7]))
# Test combinations that use path (#>) and astext (->> and #>>) operators
# These don't change between JSON and JSONB
@testing.combinations(
(
lambda self: self.jsoncol[("foo", 1)] == None, # noqa
"(test_table.test_column #> %(test_column_1)s) IS NULL",
),
(
lambda self: self.jsoncol[("foo", 1)] != None, # noqa
"(test_table.test_column #> %(test_column_1)s) IS NOT NULL",
),
(
lambda self: self.jsoncol["bar"].astext == None, # noqa
"(test_table.test_column ->> %(test_column_1)s) IS NULL",
),
(
lambda self: self.jsoncol["bar"].astext.cast(Integer) == 5,
"CAST((test_table.test_column ->> %(test_column_1)s) AS INTEGER) "
"= %(param_1)s",
),
(
lambda self: self.jsoncol[("foo", 1)].astext == None, # noqa
"(test_table.test_column #>> %(test_column_1)s) IS NULL",
),
(
lambda self: self.jsoncol["bar"].astext == self.any_,
"(test_table.test_column ->> %(test_column_1)s) = "
"ANY (ARRAY[%(param_1)s])",
),
(
lambda self: self.jsoncol["bar"].astext != self.any_,
"(test_table.test_column ->> %(test_column_1)s) != "
"ANY (ARRAY[%(param_1)s])",
),
(
lambda self: self.jsoncol[("foo", 1)] == self.any_,
"(test_table.test_column #> %(test_column_1)s) = "
"ANY (ARRAY[%(param_1)s])",
),
(
lambda self: self.jsoncol[("foo", 1)] != self.any_,
"(test_table.test_column #> %(test_column_1)s) != "
"ANY (ARRAY[%(param_1)s])",
),
id_="as",
)
def test_where(self, whereclause_fn, expected):
whereclause = whereclause_fn(self)
stmt = select(self.test_table).where(whereclause)
self.assert_compile(
stmt,
"SELECT test_table.id, test_table.test_column FROM test_table "
"WHERE %s" % expected,
)
# Test combinations that use subscript (->) operator
# These differ between JSON (always ->) and JSONB ([] on PG 14+)
@testing.combinations(
(
lambda self: self.jsoncol["bar"] == None, # noqa
"(test_table.test_column -> %(test_column_1)s) IS NULL",
),
(
lambda self: self.jsoncol["bar"] != None, # noqa
"(test_table.test_column -> %(test_column_1)s) IS NOT NULL",
),
(
lambda self: self.jsoncol["bar"].cast(Integer) == 5,
"CAST((test_table.test_column -> %(test_column_1)s) AS INTEGER) "
"= %(param_1)s",
),
(
lambda self: self.jsoncol["bar"] == 42,
"(test_table.test_column -> %(test_column_1)s) = %(param_1)s",
),
(
lambda self: self.jsoncol["bar"] != 42,
"(test_table.test_column -> %(test_column_1)s) != %(param_1)s",
),
(
lambda self: self.jsoncol["bar"] == self.any_,
"(test_table.test_column -> %(test_column_1)s) = "
"ANY (ARRAY[%(param_1)s])",
),
(
lambda self: self.jsoncol["bar"] != self.any_,
"(test_table.test_column -> %(test_column_1)s) != "
"ANY (ARRAY[%(param_1)s])",
),
id_="as",
)
def test_where_subscript(self, whereclause_fn, expected):
whereclause = whereclause_fn(self)
stmt = select(self.test_table).where(whereclause)
self.assert_compile(
stmt,
"SELECT test_table.id, test_table.test_column FROM test_table "
"WHERE %s" % expected,
)
def test_path_typing(self):
col = column("x", JSON())
is_(col["q"].type._type_affinity, types.JSON)
is_(col[("q",)].type._type_affinity, types.JSON)
is_(col["q"]["p"].type._type_affinity, types.JSON)
is_(col[("q", "p")].type._type_affinity, types.JSON)
def test_custom_astext_type(self):
class MyType(types.UserDefinedType):
pass
col = column("x", JSON(astext_type=MyType))
is_(col["q"].astext.type.__class__, MyType)
is_(col[("q", "p")].astext.type.__class__, MyType)
is_(col["q"]["p"].astext.type.__class__, MyType)
# Test column selection that uses subscript (->) operator
# This differs between JSON (always ->) and JSONB ([] on PG 14+)
@testing.combinations(
(
lambda self: self.jsoncol["foo"],
"test_table.test_column -> %(test_column_1)s AS anon_1",
True,
)
)
def test_cols_subscript(self, colclause_fn, expected, from_):
colclause = colclause_fn(self)
stmt = select(colclause)
self.assert_compile(
stmt,
("SELECT %s" + (" FROM test_table" if from_ else "")) % expected,
)
| JSONTest |
python | apache__airflow | airflow-core/src/airflow/exceptions.py | {
"start": 9257,
"end": 9524
} | class ____(DeprecationWarning):
"""Issued for usage of deprecated features of Airflow provider."""
deprecated_provider_since: str | None = None
"Indicates the provider version that started raising this deprecation warning"
| AirflowProviderDeprecationWarning |
python | jazzband__django-model-utils | tests/test_models/test_uuid_model.py | {
"start": 140,
"end": 689
} | class ____(TestCase):
def test_uuid_model_with_uuid_field_as_primary_key(self) -> None:
instance = CustomUUIDModel()
instance.save()
self.assertEqual(instance.id.__class__.__name__, 'UUID')
self.assertEqual(instance.id, instance.pk)
def test_uuid_model_with_uuid_field_as_not_primary_key(self) -> None:
instance = CustomNotPrimaryUUIDModel()
instance.save()
self.assertEqual(instance.uuid.__class__.__name__, 'UUID')
self.assertNotEqual(instance.uuid, instance.pk)
| UUIDFieldTests |
python | tox-dev__tox | src/tox/config/loader/toml/_replace.py | {
"start": 5952,
"end": 6313
} | class ____:
def __init__(self, loader: TomlLoader, section: str) -> None:
self._loader = loader
self._section = section
def load(self, item: str, chain: list[str] | None = None) -> Any: # noqa: ARG002
return self._loader.load_raw_from_root(f"{self._section}{self._loader.section.SEP}{item}")
__all__ = [
"Unroll",
]
| RawLoader |
python | celery__celery | t/unit/utils/test_collections.py | {
"start": 421,
"end": 1575
} | class ____:
def test_get_set_keys_values_items(self):
x = DictAttribute(Bunch())
x['foo'] = 'The quick brown fox'
assert x['foo'] == 'The quick brown fox'
assert x['foo'] == x.obj.foo
assert x.get('foo') == 'The quick brown fox'
assert x.get('bar') is None
with pytest.raises(KeyError):
x['bar']
x.foo = 'The quick yellow fox'
assert x['foo'] == 'The quick yellow fox'
assert ('foo', 'The quick yellow fox') in list(x.items())
assert 'foo' in list(x.keys())
assert 'The quick yellow fox' in list(x.values())
def test_setdefault(self):
x = DictAttribute(Bunch())
x.setdefault('foo', 'NEW')
assert x['foo'] == 'NEW'
x.setdefault('foo', 'XYZ')
assert x['foo'] == 'NEW'
def test_contains(self):
x = DictAttribute(Bunch())
x['foo'] = 1
assert 'foo' in x
assert 'bar' not in x
def test_items(self):
obj = Bunch(attr1=1)
x = DictAttribute(obj)
x['attr2'] = 2
assert x['attr1'] == 1
assert x['attr2'] == 2
| test_DictAttribute |
python | langchain-ai__langchain | libs/langchain/langchain_classic/chains/natbot/crawler.py | {
"start": 770,
"end": 16881
} | class ____:
"""A crawler for web pages.
**Security Note**: This is an implementation of a crawler that uses a browser via
Playwright.
This crawler can be used to load arbitrary webpages INCLUDING content
from the local file system.
Control access to who can submit crawling requests and what network access
the crawler has.
Make sure to scope permissions to the minimal permissions necessary for
the application.
See https://docs.langchain.com/oss/python/security-policy for more information.
"""
def __init__(self) -> None:
"""Initialize the crawler."""
try:
from playwright.sync_api import sync_playwright
except ImportError as e:
msg = (
"Could not import playwright python package. "
"Please install it with `pip install playwright`."
)
raise ImportError(msg) from e
self.browser: Browser = (
sync_playwright().start().chromium.launch(headless=False)
)
self.page: Page = self.browser.new_page()
self.page.set_viewport_size({"width": 1280, "height": 1080})
self.page_element_buffer: dict[int, ElementInViewPort]
self.client: CDPSession
def go_to_page(self, url: str) -> None:
"""Navigate to the given URL.
Args:
url: The URL to navigate to. If it does not contain a scheme, it will be
prefixed with "http://".
"""
self.page.goto(url=url if "://" in url else "http://" + url)
self.client = self.page.context.new_cdp_session(self.page)
self.page_element_buffer = {}
def scroll(self, direction: str) -> None:
"""Scroll the page in the given direction.
Args:
direction: The direction to scroll in, either "up" or "down".
"""
if direction == "up":
self.page.evaluate(
"(document.scrollingElement || document.body).scrollTop = "
"(document.scrollingElement || document.body).scrollTop - "
"window.innerHeight;"
)
elif direction == "down":
self.page.evaluate(
"(document.scrollingElement || document.body).scrollTop = "
"(document.scrollingElement || document.body).scrollTop + "
"window.innerHeight;"
)
def click(self, id_: str | int) -> None:
"""Click on an element with the given id.
Args:
id_: The id of the element to click on.
"""
# Inject javascript into the page which removes the target= attribute from links
js = """
links = document.getElementsByTagName("a");
for (var i = 0; i < links.length; i++) {
links[i].removeAttribute("target");
}
"""
self.page.evaluate(js)
element = self.page_element_buffer.get(int(id_))
if element:
x: float = element["center_x"]
y: float = element["center_y"]
self.page.mouse.click(x, y)
else:
print("Could not find element") # noqa: T201
def type(self, id_: str | int, text: str) -> None:
"""Type text into an element with the given id.
Args:
id_: The id of the element to type into.
text: The text to type into the element.
"""
self.click(id_)
self.page.keyboard.type(text)
def enter(self) -> None:
"""Press the Enter key."""
self.page.keyboard.press("Enter")
def crawl(self) -> list[str]:
"""Crawl the current page.
Returns:
A list of the elements in the viewport.
"""
page = self.page
page_element_buffer = self.page_element_buffer
start = time.time()
page_state_as_text = []
device_pixel_ratio: float = page.evaluate("window.devicePixelRatio")
if platform == "darwin" and device_pixel_ratio == 1: # lies
device_pixel_ratio = 2
win_upper_bound: float = page.evaluate("window.pageYOffset")
win_left_bound: float = page.evaluate("window.pageXOffset")
win_width: float = page.evaluate("window.screen.width")
win_height: float = page.evaluate("window.screen.height")
win_right_bound: float = win_left_bound + win_width
win_lower_bound: float = win_upper_bound + win_height
# percentage_progress_start = (win_upper_bound / document_scroll_height) * 100
# percentage_progress_end = (
# (win_height + win_upper_bound) / document_scroll_height
# ) * 100
percentage_progress_start = 1
percentage_progress_end = 2
page_state_as_text.append(
{
"x": 0,
"y": 0,
"text": f"[scrollbar {percentage_progress_start:0.2f}-"
f"{percentage_progress_end:0.2f}%]",
}
)
tree = self.client.send(
"DOMSnapshot.captureSnapshot",
{"computedStyles": [], "includeDOMRects": True, "includePaintOrder": True},
)
strings: dict[int, str] = tree["strings"]
document: dict[str, Any] = tree["documents"][0]
nodes: dict[str, Any] = document["nodes"]
backend_node_id: dict[int, int] = nodes["backendNodeId"]
attributes: dict[int, dict[int, Any]] = nodes["attributes"]
node_value: dict[int, int] = nodes["nodeValue"]
parent: dict[int, int] = nodes["parentIndex"]
node_names: dict[int, int] = nodes["nodeName"]
is_clickable: set[int] = set(nodes["isClickable"]["index"])
input_value: dict[str, Any] = nodes["inputValue"]
input_value_index: list[int] = input_value["index"]
input_value_values: list[int] = input_value["value"]
layout: dict[str, Any] = document["layout"]
layout_node_index: list[int] = layout["nodeIndex"]
bounds: dict[int, list[float]] = layout["bounds"]
cursor: int = 0
child_nodes: dict[str, list[dict[str, Any]]] = {}
elements_in_view_port: list[ElementInViewPort] = []
anchor_ancestry: dict[str, tuple[bool, int | None]] = {"-1": (False, None)}
button_ancestry: dict[str, tuple[bool, int | None]] = {"-1": (False, None)}
def convert_name(
node_name: str | None,
has_click_handler: bool | None, # noqa: FBT001
) -> str:
if node_name == "a":
return "link"
if node_name == "input":
return "input"
if node_name == "img":
return "img"
if (
node_name == "button" or has_click_handler
): # found pages that needed this quirk
return "button"
return "text"
def find_attributes(
attributes: dict[int, Any], keys: list[str]
) -> dict[str, str]:
values = {}
for [key_index, value_index] in zip(*(iter(attributes),) * 2, strict=False):
if value_index < 0:
continue
key = strings[key_index]
value = strings[value_index]
if key in keys:
values[key] = value
keys.remove(key)
if not keys:
return values
return values
def add_to_hash_tree(
hash_tree: dict[str, tuple[bool, int | None]],
tag: str,
node_id: int,
node_name: str | None,
parent_id: int,
) -> tuple[bool, int | None]:
parent_id_str = str(parent_id)
if parent_id_str not in hash_tree:
parent_name = strings[node_names[parent_id]].lower()
grand_parent_id = parent[parent_id]
add_to_hash_tree(
hash_tree, tag, parent_id, parent_name, grand_parent_id
)
is_parent_desc_anchor, anchor_id = hash_tree[parent_id_str]
# even if the anchor is nested in another anchor, we set the "root" for all
# descendants to be ::Self
if node_name == tag:
value: tuple[bool, int | None] = (True, node_id)
elif (
is_parent_desc_anchor
): # reuse the parent's anchor_id (which could be much higher in the tree)
value = (True, anchor_id)
else:
value = (
False,
None,
)
# not a descendant of an anchor, most likely it will become text, an
# interactive element or discarded
hash_tree[str(node_id)] = value
return value
for index, node_name_index in enumerate(node_names):
node_parent = parent[index]
node_name: str | None = strings[node_name_index].lower()
is_ancestor_of_anchor, anchor_id = add_to_hash_tree(
anchor_ancestry, "a", index, node_name, node_parent
)
is_ancestor_of_button, button_id = add_to_hash_tree(
button_ancestry, "button", index, node_name, node_parent
)
try:
cursor = layout_node_index.index(index)
# TODO: replace this with proper cursoring, ignoring the fact this is
# O(n^2) for the moment
except ValueError:
continue
if node_name in black_listed_elements:
continue
[x, y, width, height] = bounds[cursor]
x /= device_pixel_ratio
y /= device_pixel_ratio
width /= device_pixel_ratio
height /= device_pixel_ratio
elem_left_bound = x
elem_top_bound = y
elem_right_bound = x + width
elem_lower_bound = y + height
partially_is_in_viewport = (
elem_left_bound < win_right_bound
and elem_right_bound >= win_left_bound
and elem_top_bound < win_lower_bound
and elem_lower_bound >= win_upper_bound
)
if not partially_is_in_viewport:
continue
meta_data: list[str] = []
# inefficient to grab the same set of keys for kinds of objects, but it's
# fine for now
element_attributes = find_attributes(
attributes[index], ["type", "placeholder", "aria-label", "title", "alt"]
)
ancestor_exception = is_ancestor_of_anchor or is_ancestor_of_button
ancestor_node_key = (
None
if not ancestor_exception
else str(anchor_id)
if is_ancestor_of_anchor
else str(button_id)
)
ancestor_node = (
None
if not ancestor_exception
else child_nodes.setdefault(str(ancestor_node_key), [])
)
if node_name == "#text" and ancestor_exception and ancestor_node:
text = strings[node_value[index]]
if text in {"|", "•"}:
continue
ancestor_node.append({"type": "type", "value": text})
else:
if (
node_name == "input" and element_attributes.get("type") == "submit"
) or node_name == "button":
node_name = "button"
element_attributes.pop(
"type", None
) # prevent [button ... (button)..]
for key in element_attributes:
if ancestor_exception and ancestor_node:
ancestor_node.append(
{
"type": "attribute",
"key": key,
"value": element_attributes[key],
}
)
else:
meta_data.append(element_attributes[key])
element_node_value = None
if node_value[index] >= 0:
element_node_value = strings[node_value[index]]
if (
element_node_value == "|"
# commonly used as a separator, does not add much context - lets
# save ourselves some token space
):
continue
elif (
node_name == "input"
and index in input_value_index
and element_node_value is None
):
node_input_text_index = input_value_index.index(index)
text_index = input_value_values[node_input_text_index]
if node_input_text_index >= 0 and text_index >= 0:
element_node_value = strings[text_index]
# remove redundant elements
if ancestor_exception and (node_name not in {"a", "button"}):
continue
elements_in_view_port.append(
{
"node_index": str(index),
"backend_node_id": backend_node_id[index],
"node_name": node_name,
"node_value": element_node_value,
"node_meta": meta_data,
"is_clickable": index in is_clickable,
"origin_x": int(x),
"origin_y": int(y),
"center_x": int(x + (width / 2)),
"center_y": int(y + (height / 2)),
}
)
# lets filter further to remove anything that does not hold any text nor has
# click handlers + merge text from leaf#text nodes with the parent
elements_of_interest = []
id_counter = 0
for element in elements_in_view_port:
node_index = element.get("node_index")
node_name = element.get("node_name")
element_node_value = element.get("node_value")
node_is_clickable = element.get("is_clickable")
node_meta_data: list[str] | None = element.get("node_meta")
inner_text = f"{element_node_value} " if element_node_value else ""
meta = ""
if node_index in child_nodes:
for child in child_nodes[node_index]:
entry_type = child.get("type")
entry_value = child.get("value")
if entry_type == "attribute" and node_meta_data:
entry_key = child.get("key")
node_meta_data.append(f'{entry_key}="{entry_value}"')
else:
inner_text += f"{entry_value} "
if node_meta_data:
meta_string = " ".join(node_meta_data)
meta = f" {meta_string}"
if inner_text != "":
inner_text = f"{inner_text.strip()}"
converted_node_name = convert_name(node_name, node_is_clickable)
# not very elegant, more like a placeholder
if (
(converted_node_name != "button" or meta == "")
and converted_node_name not in {"link", "input", "img", "textarea"}
) and inner_text.strip() == "":
continue
page_element_buffer[id_counter] = element
if inner_text != "":
elements_of_interest.append(
f"<{converted_node_name} id={id_counter}{meta}>{inner_text}"
f"</{converted_node_name}>"
)
else:
elements_of_interest.append(
f"""<{converted_node_name} id={id_counter}{meta}/>"""
)
id_counter += 1
print(f"Parsing time: {time.time() - start:0.2f} seconds") # noqa: T201
return elements_of_interest
| Crawler |
python | kamyu104__LeetCode-Solutions | Python/build-array-where-you-can-find-the-maximum-exactly-k-comparisons.py | {
"start": 41,
"end": 1010
} | class ____(object):
def numOfArrays(self, n, m, k):
"""
:type n: int
:type m: int
:type k: int
:rtype: int
"""
MOD = 10**9 + 7
# dp[l][i][j] = number of ways of constructing array length l with max element i at search cost j
dp = [[[0]*(k+1) for _ in xrange(m+1)] for _ in xrange(2)]
# prefix_dp[l][i][j] = sum(dp[l][i][j] for i in [1..i])
prefix_dp = [[[0]*(k+1) for _ in xrange(m+1)] for _ in xrange(2)]
for i in xrange(1, m+1):
dp[1][i][1] = 1
prefix_dp[1][i][1] = (prefix_dp[1][i-1][1] + dp[1][i][1])%MOD
for l in xrange(2, n+1):
for i in xrange(1, m+1):
for j in xrange(1, k+1):
dp[l%2][i][j] = (i*dp[(l-1)%2][i][j]%MOD + prefix_dp[(l-1)%2][i-1][j-1])%MOD
prefix_dp[l%2][i][j] = (prefix_dp[l%2][i-1][j] + dp[l%2][i][j])%MOD
return prefix_dp[n%2][m][k]
| Solution |
python | huggingface__transformers | src/transformers/convert_slow_tokenizer.py | {
"start": 30927,
"end": 32542
} | class ____(SpmConverter):
def vocab(self, proto):
vocab = [
("<s>", 0.0),
("<pad>", 0.0),
("</s>", 0.0),
("<unk>", 0.0),
]
vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]]
vocab += [("ar_AR", 0.0), ("cs_CZ", 0.0), ("de_DE", 0.0), ("en_XX", 0.0), ("es_XX", 0.0), ("et_EE", 0.0), ("fi_FI", 0.0), ("fr_XX", 0.0), ("gu_IN", 0.0), ("hi_IN", 0.0), ("it_IT", 0.0), ("ja_XX", 0.0), ("kk_KZ", 0.0), ("ko_KR", 0.0), ("lt_LT", 0.0), ("lv_LV", 0.0), ("my_MM", 0.0), ("ne_NP", 0.0), ("nl_XX", 0.0), ("ro_RO", 0.0), ("ru_RU", 0.0), ("si_LK", 0.0), ("tr_TR", 0.0), ("vi_VN", 0.0), ("zh_CN", 0.0), ("af_ZA", 0.0), ("az_AZ", 0.0), ("bn_IN", 0.0), ("fa_IR", 0.0), ("he_IL", 0.0), ("hr_HR", 0.0), ("id_ID", 0.0), ("ka_GE", 0.0), ("km_KH", 0.0), ("mk_MK", 0.0), ("ml_IN", 0.0), ("mn_MN", 0.0), ("mr_IN", 0.0), ("pl_PL", 0.0), ("ps_AF", 0.0), ("pt_XX", 0.0), ("sv_SE", 0.0), ("sw_KE", 0.0), ("ta_IN", 0.0), ("te_IN", 0.0), ("th_TH", 0.0), ("tl_XX", 0.0), ("uk_UA", 0.0), ("ur_PK", 0.0), ("xh_ZA", 0.0), ("gl_ES", 0.0), ("sl_SI", 0.0)] # fmt: skip
vocab += [("<mask>", 0.0)]
return vocab
def unk_id(self, proto):
return 3
def post_processor(self):
return processors.TemplateProcessing(
single="en_XX $A </s>",
pair="en_XX $A $B </s>",
special_tokens=[
("en_XX", self.original_tokenizer.convert_tokens_to_ids("en_XX")),
("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")),
],
)
| MBart50Converter |
python | django__django | django/db/backends/oracle/validation.py | {
"start": 103,
"end": 860
} | class ____(BaseDatabaseValidation):
def check_field_type(self, field, field_type):
"""Oracle doesn't support a database index on some data types."""
errors = []
if field.db_index and field_type.lower() in self.connection._limited_data_types:
errors.append(
checks.Warning(
"Oracle does not support a database index on %s columns."
% field_type,
hint=(
"An index won't be created. Silence this warning if "
"you don't care about it."
),
obj=field,
id="fields.W162",
)
)
return errors
| DatabaseValidation |
python | getsentry__sentry | src/sentry/sentry_metrics/aggregation_option_registry.py | {
"start": 316,
"end": 1395
} | class ____(Enum):
SEVEN_DAYS = "7d"
FOURTEEN_DAYS = "14d"
THIRTY_DAYS = "30d"
NINETY_DAYS = "90d"
METRIC_ID_AGG_OPTION = {
"d:transactions/measurements.fcp@millisecond": {AggregationOption.HIST: TimeWindow.NINETY_DAYS},
"d:transactions/measurements.lcp@millisecond": {AggregationOption.HIST: TimeWindow.NINETY_DAYS},
"d:spans/webvital.inp@millisecond": None,
}
# Currently there are no default per-use case aggregation options
# They are all set via specific overrides, so we removed the global mapping
def get_aggregation_options(mri: str) -> dict[AggregationOption, TimeWindow] | None:
use_case_id: UseCaseID = extract_use_case_id(mri)
# We first check if the particular metric ID has a specified aggregation
if mri in METRIC_ID_AGG_OPTION:
return METRIC_ID_AGG_OPTION[mri]
# Then move to use case-level disabled percentiles
elif use_case_id.value in options.get("sentry-metrics.drop-percentiles.per-use-case"):
return {AggregationOption.DISABLE_PERCENTILES: TimeWindow.NINETY_DAYS}
return None
| TimeWindow |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeAlias22.py | {
"start": 1434,
"end": 1492
} | class ____:
pass
T = TypeVar("T", bound=InnerA)
| InnerB |
python | celery__celery | t/unit/tasks/test_context.py | {
"start": 574,
"end": 3134
} | class ____:
def test_default_context(self):
# A bit of a tautological test, since it uses the same
# initializer as the default_context constructor.
defaults = dict(default_context, children=[])
assert get_context_as_dict(Context()) == defaults
def test_updated_context(self):
expected = dict(default_context)
changes = {'id': 'unique id', 'args': ['some', 1], 'wibble': 'wobble'}
ctx = Context()
expected.update(changes)
ctx.update(changes)
assert get_context_as_dict(ctx) == expected
assert get_context_as_dict(Context()) == default_context
def test_modified_context(self):
expected = dict(default_context)
ctx = Context()
expected['id'] = 'unique id'
expected['args'] = ['some', 1]
ctx.id = 'unique id'
ctx.args = ['some', 1]
assert get_context_as_dict(ctx) == expected
assert get_context_as_dict(Context()) == default_context
def test_cleared_context(self):
changes = {'id': 'unique id', 'args': ['some', 1], 'wibble': 'wobble'}
ctx = Context()
ctx.update(changes)
ctx.clear()
defaults = dict(default_context, children=[])
assert get_context_as_dict(ctx) == defaults
assert get_context_as_dict(Context()) == defaults
def test_context_get(self):
expected = dict(default_context)
changes = {'id': 'unique id', 'args': ['some', 1], 'wibble': 'wobble'}
ctx = Context()
expected.update(changes)
ctx.update(changes)
ctx_dict = get_context_as_dict(ctx, getter=Context.get)
assert ctx_dict == expected
assert get_context_as_dict(Context()) == default_context
def test_extract_headers(self):
# Should extract custom headers from the request dict
request = {
'task': 'test.test_task',
'id': 'e16eeaee-1172-49bb-9098-5437a509ffd9',
'custom-header': 'custom-value',
}
ctx = Context(request)
assert ctx.headers == {'custom-header': 'custom-value'}
def test_dont_override_headers(self):
# Should not override headers if defined in the request
request = {
'task': 'test.test_task',
'id': 'e16eeaee-1172-49bb-9098-5437a509ffd9',
'headers': {'custom-header': 'custom-value'},
'custom-header-2': 'custom-value-2',
}
ctx = Context(request)
assert ctx.headers == {'custom-header': 'custom-value'}
| test_Context |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 131979,
"end": 132394
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("field", "direction")
field = sgqlc.types.Field(
sgqlc.types.non_null(RepositoryInvitationOrderField), graphql_name="field"
)
direction = sgqlc.types.Field(
sgqlc.types.non_null(OrderDirection), graphql_name="direction"
)
| RepositoryInvitationOrder |
python | kamyu104__LeetCode-Solutions | Python/design-a-stack-with-increment-operation.py | {
"start": 115,
"end": 921
} | class ____(object):
def __init__(self, maxSize):
"""
:type maxSize: int
"""
self.__max_size = maxSize
self.__stk = []
def push(self, x):
"""
:type x: int
:rtype: None
"""
if len(self.__stk) == self.__max_size:
return
self.__stk.append([x, 0])
def pop(self):
"""
:rtype: int
"""
if not self.__stk:
return -1
x, inc = self.__stk.pop()
if self.__stk:
self.__stk[-1][1] += inc
return x + inc
def increment(self, k, val):
"""
:type k: int
:type val: int
:rtype: None
"""
i = min(len(self.__stk), k)-1
if i >= 0:
self.__stk[i][1] += val
| CustomStack |
python | pypa__build | tests/test_projectbuilder.py | {
"start": 2292,
"end": 2669
} | class ____(MockDistribution):
def read_text(self, filename):
if filename == 'METADATA':
return textwrap.dedent(
"""
Metadata-Version: 2.2
Name: recursive_dep
Version: 1.0.0
Requires-Dist: recursive_unmet_dep
"""
).strip()
| RecursiveMockDistribution |
python | getsentry__sentry | src/sentry/uptime/migrations/0044_remove_project_uptime_subscription.py | {
"start": 388,
"end": 2561
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = False
dependencies = [
("uptime", "0043_uptime_django_json_field"),
]
operations = [
AlterField(
model_name="projectuptimesubscription",
name="owner_team",
field=FlexibleForeignKey(
db_constraint=False,
null=True,
on_delete=deletion.SET_NULL,
to="sentry.team",
),
),
AlterField(
model_name="projectuptimesubscription",
name="project",
field=FlexibleForeignKey(
db_constraint=False,
on_delete=deletion.CASCADE,
to="sentry.project",
),
),
AlterField(
model_name="projectuptimesubscription",
name="uptime_subscription",
field=FlexibleForeignKey(
db_constraint=False,
on_delete=deletion.PROTECT,
to="uptime.uptimesubscription",
),
),
SafeDeleteModel(
name="ProjectUptimeSubscription",
deletion_action=DeletionAction.MOVE_TO_PENDING,
),
]
| Migration |
python | ray-project__ray | python/ray/train/v2/_internal/execution/callback.py | {
"start": 5922,
"end": 6369
} | class ____(RayTrainCallback):
"""
Callbacks that are hooked to the worker event.
These callbacks are created on the train driver process and then
copied and passed to all the workers.
The execution of these callbacks happens on each of the workers,
not on the train driver process.
"""
def after_init_train_context(self):
pass
def before_worker_shutdown(self):
pass
@DeveloperAPI
| WorkerCallback |
python | numba__numba | numba/cuda/kernels/reduction.py | {
"start": 5496,
"end": 9362
} | class ____(object):
"""Create a reduction object that reduces values using a given binary
function. The binary function is compiled once and cached inside this
object. Keeping this object alive will prevent re-compilation.
"""
_cache = {}
def __init__(self, functor):
"""
:param functor: A function implementing a binary operation for
reduction. It will be compiled as a CUDA device
function using ``cuda.jit(device=True)``.
"""
self._functor = functor
def _compile(self, dtype):
key = self._functor, dtype
if key in self._cache:
kernel = self._cache[key]
else:
kernel = _gpu_reduce_factory(self._functor, from_dtype(dtype))
self._cache[key] = kernel
return kernel
def __call__(self, arr, size=None, res=None, init=0, stream=0):
"""Performs a full reduction.
:param arr: A host or device array.
:param size: Optional integer specifying the number of elements in
``arr`` to reduce. If this parameter is not specified, the
entire array is reduced.
:param res: Optional device array into which to write the reduction
result to. The result is written into the first element of
this array. If this parameter is specified, then no
communication of the reduction output takes place from the
device to the host.
:param init: Optional initial value for the reduction, the type of which
must match ``arr.dtype``.
:param stream: Optional CUDA stream in which to perform the reduction.
If no stream is specified, the default stream of 0 is
used.
:return: If ``res`` is specified, ``None`` is returned. Otherwise, the
result of the reduction is returned.
"""
from numba import cuda
# ensure 1d array
if arr.ndim != 1:
raise TypeError("only support 1D array")
# adjust array size
if size is not None:
arr = arr[:size]
init = arr.dtype.type(init) # ensure the right type
# return `init` if `arr` is empty
if arr.size < 1:
return init
kernel = self._compile(arr.dtype)
# Perform the reduction on the GPU
blocksize = _NUMWARPS * _WARPSIZE
size_full = (arr.size // blocksize) * blocksize
size_partial = arr.size - size_full
full_blockct = min(size_full // blocksize, _WARPSIZE * 2)
# allocate size of partials array
partials_size = full_blockct
if size_partial:
partials_size += 1
partials = cuda.device_array(shape=partials_size, dtype=arr.dtype)
if size_full:
# kernel for the fully populated threadblocks
kernel[full_blockct, blocksize, stream](arr[:size_full],
partials[:full_blockct],
init,
True)
if size_partial:
# kernel for partially populated threadblocks
kernel[1, size_partial, stream](arr[size_full:],
partials[full_blockct:],
init,
not full_blockct)
if partials.size > 1:
# finish up
kernel[1, partials_size, stream](partials, partials, init, False)
# handle return value
if res is not None:
res[:1].copy_to_device(partials[:1], stream=stream)
return
else:
return partials[0]
| Reduce |
python | eventlet__eventlet | eventlet/wsgi.py | {
"start": 8701,
"end": 8975
} | class ____:
def __init__(self):
pass
def error(self, msg, *args, **kwargs):
pass
def info(self, msg, *args, **kwargs):
pass
def debug(self, msg, *args, **kwargs):
pass
def write(self, msg, *args):
pass
| LoggerNull |
python | gevent__gevent | src/gevent/_config.py | {
"start": 6638,
"end": 8180
} | class ____(object):
def _import_one_of(self, candidates):
assert isinstance(candidates, list)
if not candidates:
raise ImportError('Cannot import from empty list')
for item in candidates[:-1]:
try:
return self._import_one(item)
except ImportError:
pass
return self._import_one(candidates[-1])
def _import_one(self, path, _MISSING=object()):
if not isinstance(path, string_types):
return path
if '.' not in path or '/' in path:
raise ImportError("Cannot import %r. "
"Required format: [package.]module.class. "
"Or choose from %r"
% (path, list(self.shortname_map)))
module, item = path.rsplit('.', 1)
module = importlib.import_module(module)
x = getattr(module, item, _MISSING)
if x is _MISSING:
raise ImportError('Cannot import %r from %r' % (item, module))
return x
shortname_map = {}
def validate(self, value):
if isinstance(value, type):
return value
return self._import_one_of([self.shortname_map.get(x, x) for x in value])
def get_options(self):
result = {}
for name, val in self.shortname_map.items():
try:
result[name] = self._import_one(val)
except ImportError as e:
result[name] = e
return result
| ImportableSetting |
python | dask__distributed | distributed/tests/test_collections.py | {
"start": 580,
"end": 8414
} | class ____:
def __init__(self, k, i):
self.k = k
self.i = i
def __hash__(self):
return hash(self.k)
def __eq__(self, other):
return isinstance(other, C) and other.k == self.k
def __repr__(self):
return f"C({self.k}, {self.i})"
def test_heapset():
heap = HeapSet(key=operator.attrgetter("i"))
cx = C("x", 2)
cy = C("y", 1)
cz = C("z", 3)
cw = C("w", 4)
heap.add(cx)
heap.add(cy)
heap.add(cz)
heap.add(cw)
heap.add(C("x", 0)) # Ignored; x already in heap
assert len(heap) == 4
assert repr(heap) == "<HeapSet: 4 items>"
assert cx in heap
assert cy in heap
assert cz in heap
assert cw in heap
heap_sorted = heap.sorted()
# iteration does not empty heap
assert len(heap) == 4
assert next(heap_sorted) is cy
assert next(heap_sorted) is cx
assert next(heap_sorted) is cz
assert next(heap_sorted) is cw
with pytest.raises(StopIteration):
next(heap_sorted)
assert set(heap) == {cx, cy, cz, cw}
assert heap.peek() is cy
assert heap.pop() is cy
assert cx in heap
assert cy not in heap
assert cz in heap
assert cw in heap
assert heap.peek() is cx
assert heap.pop() is cx
assert heap.pop() is cz
assert heap.pop() is cw
assert not heap
with pytest.raises(KeyError):
heap.pop()
with pytest.raises(KeyError):
heap.peek()
# Test out-of-order discard
heap.add(cx)
heap.add(cy)
heap.add(cz)
heap.add(cw)
assert heap.peek() is cy
heap.remove(cy)
assert cy not in heap
with pytest.raises(KeyError):
heap.remove(cy)
heap.discard(cw)
assert cw not in heap
heap.discard(cw)
assert len(heap) == 2
assert list(heap.sorted()) == [cx, cz]
# cy is at the top of heap._heap, but is skipped
assert heap.peek() is cx
assert heap.pop() is cx
assert heap.peek() is cz
assert heap.pop() is cz
# heap._heap is not empty
assert not heap
with pytest.raises(KeyError):
heap.peek()
with pytest.raises(KeyError):
heap.pop()
assert list(heap.sorted()) == []
# Test clear()
heap.add(cx)
heap.clear()
assert not heap
heap.add(cx)
assert cx in heap
# Test discard last element
heap.discard(cx)
assert not heap
heap.add(cx)
assert cx in heap
# Test peekn()
heap.add(cy)
heap.add(cw)
heap.add(cz)
heap.add(cx)
assert list(heap.peekn(3)) == [cy, cx, cz]
heap.remove(cz)
assert list(heap.peekn(10)) == [cy, cx, cw]
assert list(heap.peekn(0)) == []
assert list(heap.peekn(-1)) == []
heap.remove(cy)
assert list(heap.peekn(1)) == [cx]
heap.remove(cw)
assert list(heap.peekn(1)) == [cx]
heap.remove(cx)
assert list(heap.peekn(-1)) == []
assert list(heap.peekn(0)) == []
assert list(heap.peekn(1)) == []
assert list(heap.peekn(2)) == []
# Test resilience to failure in key()
heap.add(cx)
bad_key = C("bad_key", 0)
del bad_key.i
with pytest.raises(AttributeError):
heap.add(bad_key)
assert len(heap) == 1
assert set(heap) == {cx}
# Test resilience to failure in weakref.ref()
class D:
__slots__ = ("i",)
def __init__(self, i):
self.i = i
with pytest.raises(TypeError):
heap.add(D("bad_weakref", 2))
assert len(heap) == 1
assert set(heap) == {cx}
# Test resilience to key() returning non-sortable output
with pytest.raises(TypeError):
heap.add(C("unsortable_key", None))
assert len(heap) == 1
assert set(heap) == {cx}
def assert_heap_sorted(heap: HeapSet) -> None:
assert heap._sorted
assert heap._heap == sorted(heap._heap)
def test_heapset_sorted_flag_left():
heap = HeapSet(key=operator.attrgetter("i"))
assert heap._sorted
c1 = C("1", 1)
c2 = C("2", 2)
c3 = C("3", 3)
c4 = C("4", 4)
heap.add(c4)
assert not heap._sorted
heap.add(c3)
heap.add(c2)
heap.add(c1)
list(heap.sorted())
assert_heap_sorted(heap)
# `peek` maintains sort if first element is not discarded
assert heap.peek() is c1
assert_heap_sorted(heap)
# `pop` always de-sorts
assert heap.pop() is c1
assert not heap._sorted
list(heap.sorted())
# discard first element
heap.discard(c2)
assert heap.peek() is c3
assert not heap._sorted
# popping the last element resets the sorted flag
assert heap.pop() is c3
assert heap.pop() is c4
assert not heap
assert_heap_sorted(heap)
# discarding`` the last element resets the sorted flag
heap.add(c1)
heap.add(c2)
assert not heap._sorted
heap.discard(c1)
assert not heap._sorted
heap.discard(c2)
assert not heap
assert_heap_sorted(heap)
def test_heapset_sorted_flag_right():
"Verify right operations don't affect sortedness"
heap = HeapSet(key=operator.attrgetter("i"))
c1 = C("1", 1)
c2 = C("2", 2)
c3 = C("3", 3)
heap.add(c2)
heap.add(c3)
heap.add(c1)
assert not heap._sorted
list(heap.sorted())
assert_heap_sorted(heap)
assert heap.peekright() is c3
assert_heap_sorted(heap)
assert heap.popright() is c3
assert_heap_sorted(heap)
assert heap.popright() is c2
assert_heap_sorted(heap)
heap.add(c2)
assert not heap._sorted
assert heap.popright() is c2
assert not heap._sorted
assert heap.popright() is c1
assert not heap
assert_heap_sorted(heap)
@pytest.mark.parametrize("peek", [False, True])
def test_heapset_popright(peek):
heap = HeapSet(key=operator.attrgetter("i"))
with pytest.raises(KeyError):
heap.peekright()
with pytest.raises(KeyError):
heap.popright()
# The heap contains broken weakrefs
for i in range(200):
c = C(f"y{i}", random.random())
heap.add(c)
if random.random() > 0.7:
heap.remove(c)
c0 = heap.peek()
while len(heap) > 1:
# These two code paths determine which of the two methods deals with the
# removal of broken weakrefs
if peek:
c1 = heap.peekright()
assert c1.i >= c0.i
assert heap.popright() is c1
else:
c1 = heap.popright()
assert c1.i >= c0.i
# Test that the heap hasn't been corrupted
h2 = heap._heap[:]
heapq.heapify(h2)
assert h2 == heap._heap
assert heap.peekright() is c0
assert heap.popright() is c0
assert not heap
def test_heapset_pickle():
"""Test pickle roundtrip for a HeapSet.
Note
----
To make this test work with plain pickle and not need cloudpickle, we had to avoid
lambdas and local classes in our test. Here we're testing that HeapSet doesn't add
lambdas etc. of its own.
"""
heap = HeapSet(key=operator.attrgetter("i"))
# The heap contains broken weakrefs
for i in range(200):
c = C(f"y{i}", random.random())
heap.add(c)
if random.random() > 0.7:
heap.remove(c)
list(heap.sorted()) # trigger sort
assert heap._sorted
heap2 = pickle.loads(pickle.dumps(heap))
assert len(heap) == len(heap2)
assert not heap2._sorted # re-heapification may have broken the sort
# Test that the heap has been re-heapified upon unpickle
assert len(heap2._heap) < len(heap._heap)
while heap:
assert heap.pop() == heap2.pop()
def test_heapset_sort_duplicate():
"""See https://github.com/dask/distributed/issues/6951"""
heap = HeapSet(key=operator.attrgetter("i"))
c1 = C("x", 1)
c2 = C("2", 2)
heap.add(c1)
heap.add(c2)
heap.discard(c1)
heap.add(c1)
assert list(heap.sorted()) == [c1, c2]
| C |
python | kamyu104__LeetCode-Solutions | Python/minimum-amount-of-damage-dealt-to-bob.py | {
"start": 48,
"end": 586
} | class ____(object):
def minDamage(self, power, damage, health):
"""
:type power: int
:type damage: List[int]
:type health: List[int]
:rtype: int
"""
def ceil_divide(a, b):
return (a+b-1)//b
idxs = range(len(health))
idxs.sort(key=lambda i: float(ceil_divide(health[i], power))/damage[i])
result = t = 0
for i in idxs:
t += ceil_divide(health[i], power)
result += t*damage[i]
return result
| Solution |
python | huggingface__transformers | src/transformers/models/smollm3/modular_smollm3.py | {
"start": 13411,
"end": 13707
} | class ____(LlamaForQuestionAnswering):
pass
__all__ = [
"SmolLM3Config",
"SmolLM3PreTrainedModel",
"SmolLM3Model",
"SmolLM3ForCausalLM",
"SmolLM3ForSequenceClassification",
"SmolLM3ForTokenClassification",
"SmolLM3ForQuestionAnswering",
]
| SmolLM3ForQuestionAnswering |
python | django__django | tests/apps/tests.py | {
"start": 21561,
"end": 24758
} | class ____(TransactionTestCase):
available_apps = ["apps"]
databases = {"default", "other"}
expected_msg = (
"Accessing the database during app initialization is discouraged. To fix this "
"warning, avoid executing queries in AppConfig.ready() or when your app "
"modules are imported."
)
def test_query_default_database_using_model(self):
query_results = self.run_setup("QueryDefaultDatabaseModelAppConfig")
self.assertSequenceEqual(query_results, [("new name",)])
def test_query_other_database_using_model(self):
query_results = self.run_setup("QueryOtherDatabaseModelAppConfig")
self.assertSequenceEqual(query_results, [("new name",)])
def test_query_default_database_using_cursor(self):
query_results = self.run_setup("QueryDefaultDatabaseCursorAppConfig")
self.assertSequenceEqual(query_results, [(42,)])
def test_query_other_database_using_cursor(self):
query_results = self.run_setup("QueryOtherDatabaseCursorAppConfig")
self.assertSequenceEqual(query_results, [(42,)])
def test_query_many_default_database_using_cursor(self):
self.run_setup("QueryDefaultDatabaseCursorManyAppConfig")
def test_query_many_other_database_using_cursor(self):
self.run_setup("QueryOtherDatabaseCursorManyAppConfig")
@skipUnlessDBFeature("create_test_procedure_without_params_sql")
def test_query_default_database_using_stored_procedure(self):
connection = connections["default"]
with connection.cursor() as cursor:
cursor.execute(connection.features.create_test_procedure_without_params_sql)
try:
self.run_setup("QueryDefaultDatabaseStoredProcedureAppConfig")
finally:
with connection.schema_editor() as editor:
editor.remove_procedure("test_procedure")
@skipUnlessDBFeature("create_test_procedure_without_params_sql")
def test_query_other_database_using_stored_procedure(self):
connection = connections["other"]
with connection.cursor() as cursor:
cursor.execute(connection.features.create_test_procedure_without_params_sql)
try:
self.run_setup("QueryOtherDatabaseStoredProcedureAppConfig")
finally:
with connection.schema_editor() as editor:
editor.remove_procedure("test_procedure")
def run_setup(self, app_config_name):
custom_settings = override_settings(
INSTALLED_APPS=[f"apps.query_performing_app.apps.{app_config_name}"]
)
custom_settings.enable()
old_stored_app_configs = apps.stored_app_configs
apps.stored_app_configs = []
try:
with patch.multiple(apps, ready=False, loading=False, app_configs={}):
with self.assertWarnsMessage(RuntimeWarning, self.expected_msg):
django.setup()
app_config = apps.get_app_config("query_performing_app")
return app_config.query_results
finally:
setattr(apps, "stored_app_configs", old_stored_app_configs)
custom_settings.disable()
| QueryPerformingAppTests |
python | apache__airflow | airflow-core/tests/unit/plugins/priority_weight_strategy.py | {
"start": 1169,
"end": 1429
} | class ____(PriorityWeightStrategy):
factor: int = 3
def serialize(self) -> dict[str, Any]:
return {"factor": self.factor}
def get_weight(self, ti: TaskInstance):
return max(ti.map_index, 1) * self.factor
| FactorPriorityWeightStrategy |
python | doocs__leetcode | solution/1200-1299/1240.Tiling a Rectangle with the Fewest Squares/Solution2.py | {
"start": 0,
"end": 1206
} | class ____:
def tilingRectangle(self, n: int, m: int) -> int:
def dfs(i: int, j: int, t: int):
nonlocal ans
if j == m:
i += 1
j = 0
if i == n:
ans = t
return
if filled[i] >> j & 1:
dfs(i, j + 1, t)
elif t + 1 < ans:
r = c = 0
for k in range(i, n):
if filled[k] >> j & 1:
break
r += 1
for k in range(j, m):
if filled[i] >> k & 1:
break
c += 1
mx = min(r, c)
for x in range(i, i + mx):
for y in range(j, j + mx):
filled[x] |= 1 << y
for w in range(mx, 0, -1):
dfs(i, j + w, t + 1)
for k in range(w):
filled[i + w - 1] ^= 1 << (j + k)
if k < w - 1:
filled[i + k] ^= 1 << (j + w - 1)
ans = n * m
filled = [0] * n
dfs(0, 0, 0)
return ans
| Solution |
python | pytorch__pytorch | torch/_numpy/_dtypes.py | {
"start": 800,
"end": 845
} | class ____(generic):
name = "number"
| number |
python | coleifer__peewee | tests/sqlite.py | {
"start": 65835,
"end": 67589
} | class ____(ModelTestCase):
database = database
requires = [RowIDModel]
def test_model_meta(self):
self.assertEqual(RowIDModel._meta.sorted_field_names, ['rowid', 'data'])
self.assertEqual(RowIDModel._meta.primary_key.name, 'rowid')
self.assertTrue(RowIDModel._meta.auto_increment)
def test_rowid_field(self):
r1 = RowIDModel.create(data=10)
self.assertEqual(r1.rowid, 1)
self.assertEqual(r1.data, 10)
r2 = RowIDModel.create(data=20)
self.assertEqual(r2.rowid, 2)
self.assertEqual(r2.data, 20)
query = RowIDModel.select().where(RowIDModel.rowid == 2)
self.assertSQL(query, (
'SELECT "t1"."rowid", "t1"."data" '
'FROM "row_id_model" AS "t1" '
'WHERE ("t1"."rowid" = ?)'), [2])
r_db = query.get()
self.assertEqual(r_db.rowid, 2)
self.assertEqual(r_db.data, 20)
r_db2 = query.columns(RowIDModel.rowid, RowIDModel.data).get()
self.assertEqual(r_db2.rowid, 2)
self.assertEqual(r_db2.data, 20)
def test_insert_with_rowid(self):
RowIDModel.insert({RowIDModel.rowid: 5, RowIDModel.data: 1}).execute()
self.assertEqual(5, RowIDModel.select(RowIDModel.rowid).first().rowid)
def test_insert_many_with_rowid_without_field_validation(self):
RowIDModel.insert_many([{RowIDModel.rowid: 5, RowIDModel.data: 1}]).execute()
self.assertEqual(5, RowIDModel.select(RowIDModel.rowid).first().rowid)
def test_insert_many_with_rowid_with_field_validation(self):
RowIDModel.insert_many([{RowIDModel.rowid: 5, RowIDModel.data: 1}]).execute()
self.assertEqual(5, RowIDModel.select(RowIDModel.rowid).first().rowid)
| TestRowIDField |
python | mwaskom__seaborn | tests/test_axisgrid.py | {
"start": 741,
"end": 24063
} | class ____:
df = pd.DataFrame(dict(x=rs.normal(size=60),
y=rs.gamma(4, size=60),
a=np.repeat(list("abc"), 20),
b=np.tile(list("mn"), 30),
c=np.tile(list("tuv"), 20),
d=np.tile(list("abcdefghijkl"), 5)))
def test_self_data(self):
g = ag.FacetGrid(self.df)
assert g.data is self.df
def test_self_figure(self):
g = ag.FacetGrid(self.df)
assert isinstance(g.figure, plt.Figure)
assert g.figure is g._figure
def test_self_axes(self):
g = ag.FacetGrid(self.df, row="a", col="b", hue="c")
for ax in g.axes.flat:
assert isinstance(ax, plt.Axes)
def test_axes_array_size(self):
g = ag.FacetGrid(self.df)
assert g.axes.shape == (1, 1)
g = ag.FacetGrid(self.df, row="a")
assert g.axes.shape == (3, 1)
g = ag.FacetGrid(self.df, col="b")
assert g.axes.shape == (1, 2)
g = ag.FacetGrid(self.df, hue="c")
assert g.axes.shape == (1, 1)
g = ag.FacetGrid(self.df, row="a", col="b", hue="c")
assert g.axes.shape == (3, 2)
for ax in g.axes.flat:
assert isinstance(ax, plt.Axes)
def test_single_axes(self):
g = ag.FacetGrid(self.df)
assert isinstance(g.ax, plt.Axes)
g = ag.FacetGrid(self.df, row="a")
with pytest.raises(AttributeError):
g.ax
g = ag.FacetGrid(self.df, col="a")
with pytest.raises(AttributeError):
g.ax
g = ag.FacetGrid(self.df, col="a", row="b")
with pytest.raises(AttributeError):
g.ax
def test_col_wrap(self):
n = len(self.df.d.unique())
g = ag.FacetGrid(self.df, col="d")
assert g.axes.shape == (1, n)
assert g.facet_axis(0, 8) is g.axes[0, 8]
g_wrap = ag.FacetGrid(self.df, col="d", col_wrap=4)
assert g_wrap.axes.shape == (n,)
assert g_wrap.facet_axis(0, 8) is g_wrap.axes[8]
assert g_wrap._ncol == 4
assert g_wrap._nrow == (n / 4)
with pytest.raises(ValueError):
g = ag.FacetGrid(self.df, row="b", col="d", col_wrap=4)
df = self.df.copy()
df.loc[df.d == "j"] = np.nan
g_missing = ag.FacetGrid(df, col="d")
assert g_missing.axes.shape == (1, n - 1)
g_missing_wrap = ag.FacetGrid(df, col="d", col_wrap=4)
assert g_missing_wrap.axes.shape == (n - 1,)
g = ag.FacetGrid(self.df, col="d", col_wrap=1)
assert len(list(g.facet_data())) == n
def test_normal_axes(self):
null = np.empty(0, object).flat
g = ag.FacetGrid(self.df)
npt.assert_array_equal(g._bottom_axes, g.axes.flat)
npt.assert_array_equal(g._not_bottom_axes, null)
npt.assert_array_equal(g._left_axes, g.axes.flat)
npt.assert_array_equal(g._not_left_axes, null)
npt.assert_array_equal(g._inner_axes, null)
g = ag.FacetGrid(self.df, col="c")
npt.assert_array_equal(g._bottom_axes, g.axes.flat)
npt.assert_array_equal(g._not_bottom_axes, null)
npt.assert_array_equal(g._left_axes, g.axes[:, 0].flat)
npt.assert_array_equal(g._not_left_axes, g.axes[:, 1:].flat)
npt.assert_array_equal(g._inner_axes, null)
g = ag.FacetGrid(self.df, row="c")
npt.assert_array_equal(g._bottom_axes, g.axes[-1, :].flat)
npt.assert_array_equal(g._not_bottom_axes, g.axes[:-1, :].flat)
npt.assert_array_equal(g._left_axes, g.axes.flat)
npt.assert_array_equal(g._not_left_axes, null)
npt.assert_array_equal(g._inner_axes, null)
g = ag.FacetGrid(self.df, col="a", row="c")
npt.assert_array_equal(g._bottom_axes, g.axes[-1, :].flat)
npt.assert_array_equal(g._not_bottom_axes, g.axes[:-1, :].flat)
npt.assert_array_equal(g._left_axes, g.axes[:, 0].flat)
npt.assert_array_equal(g._not_left_axes, g.axes[:, 1:].flat)
npt.assert_array_equal(g._inner_axes, g.axes[:-1, 1:].flat)
def test_wrapped_axes(self):
null = np.empty(0, object).flat
g = ag.FacetGrid(self.df, col="a", col_wrap=2)
npt.assert_array_equal(g._bottom_axes,
g.axes[np.array([1, 2])].flat)
npt.assert_array_equal(g._not_bottom_axes, g.axes[:1].flat)
npt.assert_array_equal(g._left_axes, g.axes[np.array([0, 2])].flat)
npt.assert_array_equal(g._not_left_axes, g.axes[np.array([1])].flat)
npt.assert_array_equal(g._inner_axes, null)
def test_axes_dict(self):
g = ag.FacetGrid(self.df)
assert isinstance(g.axes_dict, dict)
assert not g.axes_dict
g = ag.FacetGrid(self.df, row="c")
assert list(g.axes_dict.keys()) == g.row_names
for (name, ax) in zip(g.row_names, g.axes.flat):
assert g.axes_dict[name] is ax
g = ag.FacetGrid(self.df, col="c")
assert list(g.axes_dict.keys()) == g.col_names
for (name, ax) in zip(g.col_names, g.axes.flat):
assert g.axes_dict[name] is ax
g = ag.FacetGrid(self.df, col="a", col_wrap=2)
assert list(g.axes_dict.keys()) == g.col_names
for (name, ax) in zip(g.col_names, g.axes.flat):
assert g.axes_dict[name] is ax
g = ag.FacetGrid(self.df, row="a", col="c")
for (row_var, col_var), ax in g.axes_dict.items():
i = g.row_names.index(row_var)
j = g.col_names.index(col_var)
assert g.axes[i, j] is ax
def test_figure_size(self):
g = ag.FacetGrid(self.df, row="a", col="b")
npt.assert_array_equal(g.figure.get_size_inches(), (6, 9))
g = ag.FacetGrid(self.df, row="a", col="b", height=6)
npt.assert_array_equal(g.figure.get_size_inches(), (12, 18))
g = ag.FacetGrid(self.df, col="c", height=4, aspect=.5)
npt.assert_array_equal(g.figure.get_size_inches(), (6, 4))
def test_figure_size_with_legend(self):
g = ag.FacetGrid(self.df, col="a", hue="c", height=4, aspect=.5)
npt.assert_array_equal(g.figure.get_size_inches(), (6, 4))
g.add_legend()
assert g.figure.get_size_inches()[0] > 6
g = ag.FacetGrid(self.df, col="a", hue="c", height=4, aspect=.5,
legend_out=False)
npt.assert_array_equal(g.figure.get_size_inches(), (6, 4))
g.add_legend()
npt.assert_array_equal(g.figure.get_size_inches(), (6, 4))
def test_legend_data(self):
g = ag.FacetGrid(self.df, hue="a")
g.map(plt.plot, "x", "y")
g.add_legend()
palette = color_palette(n_colors=3)
assert g._legend.get_title().get_text() == "a"
a_levels = sorted(self.df.a.unique())
lines = g._legend.get_lines()
assert len(lines) == len(a_levels)
for line, hue in zip(lines, palette):
assert_colors_equal(line.get_color(), hue)
labels = g._legend.get_texts()
assert len(labels) == len(a_levels)
for label, level in zip(labels, a_levels):
assert label.get_text() == level
def test_legend_data_missing_level(self):
g = ag.FacetGrid(self.df, hue="a", hue_order=list("azbc"))
g.map(plt.plot, "x", "y")
g.add_legend()
c1, c2, c3, c4 = color_palette(n_colors=4)
palette = [c1, c3, c4]
assert g._legend.get_title().get_text() == "a"
a_levels = sorted(self.df.a.unique())
lines = g._legend.get_lines()
assert len(lines) == len(a_levels)
for line, hue in zip(lines, palette):
assert_colors_equal(line.get_color(), hue)
labels = g._legend.get_texts()
assert len(labels) == 4
for label, level in zip(labels, list("azbc")):
assert label.get_text() == level
def test_get_boolean_legend_data(self):
self.df["b_bool"] = self.df.b == "m"
g = ag.FacetGrid(self.df, hue="b_bool")
g.map(plt.plot, "x", "y")
g.add_legend()
palette = color_palette(n_colors=2)
assert g._legend.get_title().get_text() == "b_bool"
b_levels = list(map(str, categorical_order(self.df.b_bool)))
lines = g._legend.get_lines()
assert len(lines) == len(b_levels)
for line, hue in zip(lines, palette):
assert_colors_equal(line.get_color(), hue)
labels = g._legend.get_texts()
assert len(labels) == len(b_levels)
for label, level in zip(labels, b_levels):
assert label.get_text() == level
def test_legend_tuples(self):
g = ag.FacetGrid(self.df, hue="a")
g.map(plt.plot, "x", "y")
handles, labels = g.ax.get_legend_handles_labels()
label_tuples = [("", l) for l in labels]
legend_data = dict(zip(label_tuples, handles))
g.add_legend(legend_data, label_tuples)
for entry, label in zip(g._legend.get_texts(), labels):
assert entry.get_text() == label
def test_legend_options(self):
g = ag.FacetGrid(self.df, hue="b")
g.map(plt.plot, "x", "y")
g.add_legend()
g1 = ag.FacetGrid(self.df, hue="b", legend_out=False)
g1.add_legend(adjust_subtitles=True)
g1 = ag.FacetGrid(self.df, hue="b", legend_out=False)
g1.add_legend(adjust_subtitles=False)
def test_legendout_with_colwrap(self):
g = ag.FacetGrid(self.df, col="d", hue='b',
col_wrap=4, legend_out=False)
g.map(plt.plot, "x", "y", linewidth=3)
g.add_legend()
def test_legend_tight_layout(self):
g = ag.FacetGrid(self.df, hue='b')
g.map(plt.plot, "x", "y", linewidth=3)
g.add_legend()
g.tight_layout()
axes_right_edge = g.ax.get_window_extent().xmax
legend_left_edge = g._legend.get_window_extent().xmin
assert axes_right_edge < legend_left_edge
def test_subplot_kws(self):
g = ag.FacetGrid(self.df, despine=False,
subplot_kws=dict(projection="polar"))
for ax in g.axes.flat:
assert "PolarAxes" in ax.__class__.__name__
def test_gridspec_kws(self):
ratios = [3, 1, 2]
gskws = dict(width_ratios=ratios)
g = ag.FacetGrid(self.df, col='c', row='a', gridspec_kws=gskws)
for ax in g.axes.flat:
ax.set_xticks([])
ax.set_yticks([])
g.figure.tight_layout()
for (l, m, r) in g.axes:
assert l.get_position().width > m.get_position().width
assert r.get_position().width > m.get_position().width
def test_gridspec_kws_col_wrap(self):
ratios = [3, 1, 2, 1, 1]
gskws = dict(width_ratios=ratios)
with pytest.warns(UserWarning):
ag.FacetGrid(self.df, col='d', col_wrap=5, gridspec_kws=gskws)
def test_data_generator(self):
g = ag.FacetGrid(self.df, row="a")
d = list(g.facet_data())
assert len(d) == 3
tup, data = d[0]
assert tup == (0, 0, 0)
assert (data["a"] == "a").all()
tup, data = d[1]
assert tup == (1, 0, 0)
assert (data["a"] == "b").all()
g = ag.FacetGrid(self.df, row="a", col="b")
d = list(g.facet_data())
assert len(d) == 6
tup, data = d[0]
assert tup == (0, 0, 0)
assert (data["a"] == "a").all()
assert (data["b"] == "m").all()
tup, data = d[1]
assert tup == (0, 1, 0)
assert (data["a"] == "a").all()
assert (data["b"] == "n").all()
tup, data = d[2]
assert tup == (1, 0, 0)
assert (data["a"] == "b").all()
assert (data["b"] == "m").all()
g = ag.FacetGrid(self.df, hue="c")
d = list(g.facet_data())
assert len(d) == 3
tup, data = d[1]
assert tup == (0, 0, 1)
assert (data["c"] == "u").all()
def test_map(self):
g = ag.FacetGrid(self.df, row="a", col="b", hue="c")
g.map(plt.plot, "x", "y", linewidth=3)
lines = g.axes[0, 0].lines
assert len(lines) == 3
line1, _, _ = lines
assert line1.get_linewidth() == 3
x, y = line1.get_data()
mask = (self.df.a == "a") & (self.df.b == "m") & (self.df.c == "t")
npt.assert_array_equal(x, self.df.x[mask])
npt.assert_array_equal(y, self.df.y[mask])
def test_map_dataframe(self):
g = ag.FacetGrid(self.df, row="a", col="b", hue="c")
def plot(x, y, data=None, **kws):
plt.plot(data[x], data[y], **kws)
# Modify __module__ so this doesn't look like a seaborn function
plot.__module__ = "test"
g.map_dataframe(plot, "x", "y", linestyle="--")
lines = g.axes[0, 0].lines
assert len(g.axes[0, 0].lines) == 3
line1, _, _ = lines
assert line1.get_linestyle() == "--"
x, y = line1.get_data()
mask = (self.df.a == "a") & (self.df.b == "m") & (self.df.c == "t")
npt.assert_array_equal(x, self.df.x[mask])
npt.assert_array_equal(y, self.df.y[mask])
def test_set(self):
g = ag.FacetGrid(self.df, row="a", col="b")
xlim = (-2, 5)
ylim = (3, 6)
xticks = [-2, 0, 3, 5]
yticks = [3, 4.5, 6]
g.set(xlim=xlim, ylim=ylim, xticks=xticks, yticks=yticks)
for ax in g.axes.flat:
npt.assert_array_equal(ax.get_xlim(), xlim)
npt.assert_array_equal(ax.get_ylim(), ylim)
npt.assert_array_equal(ax.get_xticks(), xticks)
npt.assert_array_equal(ax.get_yticks(), yticks)
def test_set_titles(self):
g = ag.FacetGrid(self.df, row="a", col="b")
g.map(plt.plot, "x", "y")
# Test the default titles
assert g.axes[0, 0].get_title() == "a = a | b = m"
assert g.axes[0, 1].get_title() == "a = a | b = n"
assert g.axes[1, 0].get_title() == "a = b | b = m"
# Test a provided title
g.set_titles("{row_var} == {row_name} \\/ {col_var} == {col_name}")
assert g.axes[0, 0].get_title() == "a == a \\/ b == m"
assert g.axes[0, 1].get_title() == "a == a \\/ b == n"
assert g.axes[1, 0].get_title() == "a == b \\/ b == m"
# Test a single row
g = ag.FacetGrid(self.df, col="b")
g.map(plt.plot, "x", "y")
# Test the default titles
assert g.axes[0, 0].get_title() == "b = m"
assert g.axes[0, 1].get_title() == "b = n"
# test with dropna=False
g = ag.FacetGrid(self.df, col="b", hue="b", dropna=False)
g.map(plt.plot, 'x', 'y')
def test_set_titles_margin_titles(self):
g = ag.FacetGrid(self.df, row="a", col="b", margin_titles=True)
g.map(plt.plot, "x", "y")
# Test the default titles
assert g.axes[0, 0].get_title() == "b = m"
assert g.axes[0, 1].get_title() == "b = n"
assert g.axes[1, 0].get_title() == ""
# Test the row "titles"
assert g.axes[0, 1].texts[0].get_text() == "a = a"
assert g.axes[1, 1].texts[0].get_text() == "a = b"
assert g.axes[0, 1].texts[0] is g._margin_titles_texts[0]
# Test provided titles
g.set_titles(col_template="{col_name}", row_template="{row_name}")
assert g.axes[0, 0].get_title() == "m"
assert g.axes[0, 1].get_title() == "n"
assert g.axes[1, 0].get_title() == ""
assert len(g.axes[1, 1].texts) == 1
assert g.axes[1, 1].texts[0].get_text() == "b"
def test_set_ticklabels(self):
g = ag.FacetGrid(self.df, row="a", col="b")
g.map(plt.plot, "x", "y")
ax = g.axes[-1, 0]
xlab = [l.get_text() + "h" for l in ax.get_xticklabels()]
ylab = [l.get_text() + "i" for l in ax.get_yticklabels()]
g.set_xticklabels(xlab)
g.set_yticklabels(ylab)
got_x = [l.get_text() for l in g.axes[-1, 1].get_xticklabels()]
got_y = [l.get_text() for l in g.axes[0, 0].get_yticklabels()]
npt.assert_array_equal(got_x, xlab)
npt.assert_array_equal(got_y, ylab)
x, y = np.arange(10), np.arange(10)
df = pd.DataFrame(np.c_[x, y], columns=["x", "y"])
g = ag.FacetGrid(df).map_dataframe(pointplot, x="x", y="y", order=x)
g.set_xticklabels(step=2)
got_x = [int(l.get_text()) for l in g.axes[0, 0].get_xticklabels()]
npt.assert_array_equal(x[::2], got_x)
g = ag.FacetGrid(self.df, col="d", col_wrap=5)
g.map(plt.plot, "x", "y")
g.set_xticklabels(rotation=45)
g.set_yticklabels(rotation=75)
for ax in g._bottom_axes:
for l in ax.get_xticklabels():
assert l.get_rotation() == 45
for ax in g._left_axes:
for l in ax.get_yticklabels():
assert l.get_rotation() == 75
def test_set_axis_labels(self):
g = ag.FacetGrid(self.df, row="a", col="b")
g.map(plt.plot, "x", "y")
xlab = 'xx'
ylab = 'yy'
g.set_axis_labels(xlab, ylab)
got_x = [ax.get_xlabel() for ax in g.axes[-1, :]]
got_y = [ax.get_ylabel() for ax in g.axes[:, 0]]
npt.assert_array_equal(got_x, xlab)
npt.assert_array_equal(got_y, ylab)
for ax in g.axes.flat:
ax.set(xlabel="x", ylabel="y")
g.set_axis_labels(xlab, ylab)
for ax in g._not_bottom_axes:
assert not ax.get_xlabel()
for ax in g._not_left_axes:
assert not ax.get_ylabel()
def test_axis_lims(self):
g = ag.FacetGrid(self.df, row="a", col="b", xlim=(0, 4), ylim=(-2, 3))
assert g.axes[0, 0].get_xlim() == (0, 4)
assert g.axes[0, 0].get_ylim() == (-2, 3)
def test_data_orders(self):
g = ag.FacetGrid(self.df, row="a", col="b", hue="c")
assert g.row_names == list("abc")
assert g.col_names == list("mn")
assert g.hue_names == list("tuv")
assert g.axes.shape == (3, 2)
g = ag.FacetGrid(self.df, row="a", col="b", hue="c",
row_order=list("bca"),
col_order=list("nm"),
hue_order=list("vtu"))
assert g.row_names == list("bca")
assert g.col_names == list("nm")
assert g.hue_names == list("vtu")
assert g.axes.shape == (3, 2)
g = ag.FacetGrid(self.df, row="a", col="b", hue="c",
row_order=list("bcda"),
col_order=list("nom"),
hue_order=list("qvtu"))
assert g.row_names == list("bcda")
assert g.col_names == list("nom")
assert g.hue_names == list("qvtu")
assert g.axes.shape == (4, 3)
def test_palette(self):
rcmod.set()
g = ag.FacetGrid(self.df, hue="c")
assert g._colors == color_palette(n_colors=len(self.df.c.unique()))
g = ag.FacetGrid(self.df, hue="d")
assert g._colors == color_palette("husl", len(self.df.d.unique()))
g = ag.FacetGrid(self.df, hue="c", palette="Set2")
assert g._colors == color_palette("Set2", len(self.df.c.unique()))
dict_pal = dict(t="red", u="green", v="blue")
list_pal = color_palette(["red", "green", "blue"], 3)
g = ag.FacetGrid(self.df, hue="c", palette=dict_pal)
assert g._colors == list_pal
list_pal = color_palette(["green", "blue", "red"], 3)
g = ag.FacetGrid(self.df, hue="c", hue_order=list("uvt"),
palette=dict_pal)
assert g._colors == list_pal
def test_hue_kws(self):
kws = dict(marker=["o", "s", "D"])
g = ag.FacetGrid(self.df, hue="c", hue_kws=kws)
g.map(plt.plot, "x", "y")
for line, marker in zip(g.axes[0, 0].lines, kws["marker"]):
assert line.get_marker() == marker
def test_dropna(self):
df = self.df.copy()
hasna = pd.Series(np.tile(np.arange(6), 10), dtype=float)
hasna[hasna == 5] = np.nan
df["hasna"] = hasna
g = ag.FacetGrid(df, dropna=False, row="hasna")
assert g._not_na.sum() == 60
g = ag.FacetGrid(df, dropna=True, row="hasna")
assert g._not_na.sum() == 50
def test_categorical_column_missing_categories(self):
df = self.df.copy()
df['a'] = df['a'].astype('category')
g = ag.FacetGrid(df[df['a'] == 'a'], col="a", col_wrap=1)
assert g.axes.shape == (len(df['a'].cat.categories),)
def test_categorical_warning(self):
g = ag.FacetGrid(self.df, col="b")
with pytest.warns(UserWarning):
g.map(pointplot, "b", "x")
def test_refline(self):
g = ag.FacetGrid(self.df, row="a", col="b")
g.refline()
for ax in g.axes.flat:
assert not ax.lines
refx = refy = 0.5
hline = np.array([[0, refy], [1, refy]])
vline = np.array([[refx, 0], [refx, 1]])
g.refline(x=refx, y=refy)
for ax in g.axes.flat:
assert ax.lines[0].get_color() == '.5'
assert ax.lines[0].get_linestyle() == '--'
assert len(ax.lines) == 2
npt.assert_array_equal(ax.lines[0].get_xydata(), vline)
npt.assert_array_equal(ax.lines[1].get_xydata(), hline)
color, linestyle = 'red', '-'
g.refline(x=refx, color=color, linestyle=linestyle)
npt.assert_array_equal(g.axes[0, 0].lines[-1].get_xydata(), vline)
assert g.axes[0, 0].lines[-1].get_color() == color
assert g.axes[0, 0].lines[-1].get_linestyle() == linestyle
def test_apply(self, long_df):
def f(grid, color):
grid.figure.set_facecolor(color)
color = (.1, .6, .3, .9)
g = ag.FacetGrid(long_df)
res = g.apply(f, color)
assert res is g
assert g.figure.get_facecolor() == color
def test_pipe(self, long_df):
def f(grid, color):
grid.figure.set_facecolor(color)
return color
color = (.1, .6, .3, .9)
g = ag.FacetGrid(long_df)
res = g.pipe(f, color)
assert res == color
assert g.figure.get_facecolor() == color
def test_tick_params(self):
g = ag.FacetGrid(self.df, row="a", col="b")
color = "blue"
pad = 3
g.tick_params(pad=pad, color=color)
for ax in g.axes.flat:
for axis in ["xaxis", "yaxis"]:
for tick in getattr(ax, axis).get_major_ticks():
assert mpl.colors.same_color(tick.tick1line.get_color(), color)
assert mpl.colors.same_color(tick.tick2line.get_color(), color)
assert tick.get_pad() == pad
@pytest.mark.skipif(
condition=not hasattr(pd.api, "interchange"),
reason="Tests behavior assuming support for dataframe interchange"
)
def test_data_interchange(self, mock_long_df, long_df):
g = ag.FacetGrid(mock_long_df, col="a", row="b")
g.map(scatterplot, "x", "y")
assert g.axes.shape == (long_df["b"].nunique(), long_df["a"].nunique())
for ax in g.axes.flat:
assert len(ax.collections) == 1
| TestFacetGrid |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-outbrain-amplify/source_outbrain_amplify/source.py | {
"start": 4604,
"end": 6673
} | class ____(OutbrainAmplifyStream, HttpSubStream):
primary_key = None
def __init__(self, authenticator, config, parent: Marketers, **kwargs):
super().__init__(parent=parent, **kwargs)
self.config = config
self._authenticator = authenticator
self._session = requests.sessions.Session()
@property
def use_cache(self) -> bool:
return True
@property
def cache_filename(self):
return "campaigns.yml"
@property
def name(self) -> str:
return "campaigns"
def stream_slices(
self, sync_mode: SyncMode.full_refresh, cursor_field: List[str] = None, stream_state: Mapping[str, Any] = None
) -> Iterable[Optional[Mapping[str, Any]]]:
parent_stream_slices = self.parent.stream_slices(
sync_mode=SyncMode.full_refresh, cursor_field=cursor_field, stream_state=stream_state
)
for stream_slice in parent_stream_slices:
parent_records = self.parent.read_records(
sync_mode=SyncMode.full_refresh, cursor_field=cursor_field, stream_slice=stream_slice, stream_state=stream_state
)
for record in parent_records:
yield {"marketer_id": record.get("id")}
def parse_response(
self,
response: requests.Response,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> Iterable[Mapping]:
if response.json():
for x in response.json().get("campaigns"):
yield x
def path(
self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> str:
return f"marketers/{stream_slice['marketer_id']}/campaigns?limit={DEFAULT_LIMIT}"
# Retrieve Campaign GeoLocations.
# A new endpoint has been added which returns all targeted and excluded locations of a given campaign. It can be called in order to retrieve a campaign's geotargeting.
| CampaignsByMarketers |
python | keras-team__keras | keras/src/layers/core/wrapper_test.py | {
"start": 102,
"end": 290
} | class ____(layers.Wrapper):
"""Simple Wrapper subclass."""
def call(self, inputs, **kwargs):
return ops.cast(self.layer(inputs, **kwargs), self.compute_dtype)
| ExampleWrapper |
python | PyCQA__pycodestyle | testing/data/E30not.py | {
"start": 2452,
"end": 2752
} | class ____(Protocol):
# This emits the (ignored-by-default) E704, but here we're testing
# for no E30x being emitted.
@property
def f(self) -> int: ...
@property
def g(self) -> str: ...
#: Okay
#!python
# -*- coding: utf-8 -*-
def a():
pass
#: Okay
def f(
a,
):
pass
| C |
python | getsentry__sentry | src/sentry/db/models/fields/citext.py | {
"start": 219,
"end": 798
} | class ____(models.EmailField[str, str]):
def db_type(self, connection: BaseDatabaseWrapper) -> str:
return "citext"
def create_citext_extension(using: str, **kwargs: object) -> None:
# We always need the citext extension installed for Postgres,
# and for tests, it's not always guaranteed that we will have
# run full migrations which installed it.
cursor = connections[using].cursor()
try:
cursor.execute("CREATE EXTENSION IF NOT EXISTS citext")
except Exception:
pass
pre_migrate.connect(create_citext_extension)
| CIEmailField |
python | modin-project__modin | modin/config/envvars.py | {
"start": 23690,
"end": 24168
} | class ____(EnvironmentVariable, type=dict):
"""
Ray node's custom resources to initialize with.
Visit Ray documentation for more details:
https://docs.ray.io/en/latest/ray-core/scheduling/resources.html#custom-resources
Notes
-----
Relying on Modin to initialize Ray, you should set this config
for the proper initialization with custom resources.
"""
varname = "MODIN_RAY_INIT_CUSTOM_RESOURCES"
default = None
| RayInitCustomResources |
python | sympy__sympy | sympy/polys/polyoptions.py | {
"start": 19099,
"end": 19473
} | class ____(Flag, metaclass=OptionType):
"""``gen`` flag to polynomial manipulation functions. """
option = 'gen'
@classmethod
def default(cls):
return 0
@classmethod
def preprocess(cls, gen):
if isinstance(gen, (Basic, int)):
return gen
else:
raise OptionError("invalid argument for 'gen' option")
| Gen |
python | google__pytype | pytype/blocks/process_blocks.py | {
"start": 3517,
"end": 5600
} | class ____(pyc.CodeVisitor):
"""Add metadata to function definition opcodes."""
def __init__(self, param_annotations):
super().__init__()
self.annots = param_annotations
def visit_code(self, code):
for op in code.code_iter:
if isinstance(op, opcodes.MAKE_FUNCTION):
if op.line in self.annots:
op.metadata.signature_annotations = self.annots[op.line]
return code
def merge_annotations(code, annotations, param_annotations):
"""Merges type comments into their associated opcodes.
Modifies code in place.
Args:
code: An OrderedCode object.
annotations: A map of lines to annotations.
param_annotations: A list of _ParamAnnotations from the director
Returns:
The code with annotations added to the relevant opcodes.
"""
if param_annotations:
visitor = FunctionDefVisitor(param_annotations)
pyc.visit(code, visitor)
visitor = CollectAnnotationTargetsVisitor()
code = pyc.visit(code, visitor)
# Apply type comments to the STORE_* opcodes
for line, op in visitor.store_ops.items():
if line in annotations:
annot = annotations[line]
if annot.name in (None, op.argval):
op.annotation = annot.annotation
# Apply type comments to the MAKE_FUNCTION opcodes
for start, (end, op) in sorted(
visitor.make_function_ops.items(), reverse=True
):
for i in range(start, end):
# Take the first comment we find as the function typecomment.
if i in annotations:
# Record the line number of the comment for error messages.
op.annotation = (annotations[i].annotation, i)
break
return code
def adjust_returns(code, block_returns):
"""Adjust line numbers for return statements in with blocks."""
rets = {k: iter(v) for k, v in block_returns}
for block in code.order:
for op in block:
if op.__class__.__name__ in ("RETURN_VALUE", "RETURN_CONST"):
if op.line in rets:
lines = rets[op.line]
new_line = next(lines, None)
if new_line:
op.line = new_line
| FunctionDefVisitor |
python | tensorflow__tensorflow | tensorflow/python/training/saver_test.py | {
"start": 132131,
"end": 132980
} | class ____(trackable_base.Trackable):
"""A Trackable object which returns a more complex SaveableObject."""
def __init__(self):
self.non_dep_variable = variable_scope.get_variable(
name="non_dep_variable", initializer=6., use_resource=True)
self.mirrored = variable_scope.get_variable(
name="mirrored", initializer=15., use_resource=True)
def _gather_saveables_for_checkpoint(self):
def _saveable_factory(name=self.non_dep_variable.name):
return _MirroringSaveable(
primary_variable=self.non_dep_variable,
mirrored_variable=self.mirrored,
name=name)
return {trackable_base.VARIABLE_VALUE_KEY: _saveable_factory}
# The Saver sorts by name before parsing, so we need a name property.
@property
def name(self):
return self.non_dep_variable.name
| _OwnsMirroredVariables |
python | sanic-org__sanic | sanic/cli/executor.py | {
"start": 429,
"end": 683
} | class ____(ArgumentParser):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self.description:
self.description = ""
self.description = get_logo(True) + self.description
| ExecutorSubParser |
python | neetcode-gh__leetcode | python/1345-jump-game-iv.py | {
"start": 70,
"end": 1796
} | class ____:
# Time O(n) - Space O(n)
def minJumps(self, arr: List[int]) -> int:
n = len(arr)
# Base case.
if n < 2:
return 0
# A dictionary of vertices indexed by values.
d = defaultdict(list)
for i in reversed(range(n)):
d[arr[i]].append(i)
# A function that gets all neighbors of a node that we have not
# queued yet.
def getUnqueuedNeighbors(i: int) -> List[int]:
adj = []
# We can reach the element before.
if 0 < i and not seen[i - 1]:
seen[i - 1] = True
adj.append(i - 1)
# We can reach the element after.
if i < n - 1 and not seen[i + 1]:
seen[i + 1] = True
adj.append(i + 1)
# We can also reach any element with the same value.
if arr[i] in d:
for node in d[arr[i]]:
if node != i:
adj.append(node)
seen[node] = True
d.pop(arr[i])
return adj
# A list of nodes that we have visited already.
seen = [False] * n
seen[0] = True
# BFS starting at 0 and counting the steps until we reach n-1.
steps, level = 0, deque([0])
while level:
steps += 1
# Process an entire level.
for _ in range(len(level)):
current = level.popleft()
for nei in getUnqueuedNeighbors(current):
# If this is the target node, return.
if nei == n - 1:
return steps
level.append(nei)
| Solution |
python | facelessuser__soupsieve | tests/test_level3/test_namespace.py | {
"start": 54,
"end": 9762
} | class ____(util.TestCase):
"""Test namespace selectors."""
MARKUP = """
<?xml version="1.0" encoding="UTF-8"?>
<tag id="root">
<head id="0"></head>
<foo:other id="1" xmlns:foo="http://me.com/namespaces/foofoo"
xmlns:bar="http://me.com/namespaces/foobar">
<foo:head id="2">
<foo:title id="3"></foo:title>
<bar:title id="4"></bar:title>
</foo:head>
<body id="5">
<foo:e1 id="6"></foo:e1>
<bar:e1 id="7"></bar:e1>
<e1 id="8"></e1>
<foo:e2 id="9"></foo:e2>
<bar:e2 id="10"></bar:e2>
<e2 id="11"></e2>
<foo:e3 id="12"></foo:e3>
<bar:e3 id="13"></bar:e3>
<e3 id="14"></e3>
</body>
</foo:other>
<other id="15" xmlns="http://me.com/namespaces/other">
<e4 id="16">Inherit</er>
</other>
</tag>
"""
MARKUP_ATTR = """
<div>
<h1>A contrived example</h1>
<svg viewBox="0 0 20 32" class="icon icon-1">
<use id="0" xlink:href="images/sprites.svg#icon-undo"></use>
</svg>
<svg viewBox="0 0 30 32" class="icon icon-2">
<use id="1" xlink:href="images/sprites.svg#icon-redo"></use>
</svg>
<svg viewBox="0 0 40 32" class="icon icon-3">
<use id="2" xlink:href="images/sprites.svg#icon-forward"></use>
</svg>
<svg viewBox="0 0 50 32" class="icon icon-4">
<use id="3" xlink:href="other/sprites.svg#icon-reply"></use>
</svg>
<svg viewBox="0 0 50 32" class="icon icon-4">
<use id="4" :href="other/sprites.svg#icon-reply"></use>
</svg>
<svg viewBox="0 0 50 32" class="icon icon-4">
<use id="5" other:href="other/sprites.svg#icon-reply" xlink:other="value doesn't match"></use>
</svg>
</div>
"""
def wrap_xlink(self, content, xhtml=False):
"""Wrap with `xlink`."""
xhtml_ns = 'xmlns="http://www.w3.org/1999/xhtml"' if xhtml else ''
return f"""
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN"
"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
<html {xhtml_ns} xmlns:xlink="http://www.w3.org/1999/xlink">
<head>
</head>
<body>
{content}
</body>
</html>
"""
def test_namespace(self):
"""Test namespace."""
self.assert_selector(
self.MARKUP,
"foo|title",
["3"],
namespaces={
"foo": "http://me.com/namespaces/foofoo",
"bar": "http://me.com/namespaces/foobar"
},
flags=util.XML
)
def test_namespace_case(self):
"""Test that namespaces are always case sensitive."""
# These won't match
self.assert_selector(
self.MARKUP,
"FOO|title",
[],
namespaces={
"foo": "http://me.com/namespaces/foofoo",
"bar": "http://me.com/namespaces/foobar"
},
flags=util.XML
)
self.assert_selector(
self.MARKUP,
"foo|title",
[],
namespaces={
"FOO": "http://me.com/namespaces/foofoo",
"bar": "http://me.com/namespaces/foobar"
},
flags=util.XML
)
def test_namespace_with_universal_tag(self):
"""Test namespace with universal selector for the tag."""
self.assert_selector(
self.MARKUP,
"bar|*",
["4", "7", "10", "13"],
namespaces={
"foo": "http://me.com/namespaces/foofoo",
"bar": "http://me.com/namespaces/foobar"
},
flags=util.XML
)
def test_no_namespace(self):
"""Test for tags with no namespace."""
self.assert_selector(
self.MARKUP,
"|head",
["0"],
namespaces={
"foo": "http://me.com/namespaces/foofoo",
"bar": "http://me.com/namespaces/foobar"
},
flags=util.XML
)
def test_universal_namespace(self):
"""Test for a tag with a universal namespace selector."""
self.assert_selector(
self.MARKUP,
"*|e2",
["9", "10", "11"],
namespaces={
"foo": "http://me.com/namespaces/foofoo",
"bar": "http://me.com/namespaces/foobar"
},
flags=util.XML
)
def test_namespace_no_default(self):
"""
Test for a tag with without specifying a default namespace.
Because we employ level 4 selectors
E, when no default namespace is defined, will be read as *|E.
"""
self.assert_selector(
self.MARKUP,
"e3",
["12", "13", "14"],
namespaces={
"foo": "http://me.com/namespaces/foofoo",
"bar": "http://me.com/namespaces/foobar"
},
flags=util.XML
)
def test_namespace_with_default(self):
"""Test for a tag with a default namespace."""
# Now that we apply a default namespace. Null space.
self.assert_selector(
self.MARKUP,
"e3",
["14"],
namespaces={
"": "",
"foo": "http://me.com/namespaces/foofoo",
"bar": "http://me.com/namespaces/foobar"
},
flags=util.XML
)
self.assert_selector(
self.MARKUP,
"head",
["0"],
namespaces={
"": "",
"foo": "http://me.com/namespaces/foofoo",
"bar": "http://me.com/namespaces/foobar"
},
flags=util.XML
)
def test_namespace_inherit(self):
"""Test for a tag namespace inheritance."""
# Because no prefix is specified for "other" in the above document,
# `e4` inherits the other namespace. The prefix in this case doesn't matter.
# We specify `other` as prefix in our CSS just so we can use it to target the element.
self.assert_selector(
self.MARKUP,
"e4",
[],
namespaces={
"": "",
"foo": "http://me.com/namespaces/foofoo",
"bar": "http://me.com/namespaces/foobar",
"other": "http://me.com/namespaces/other"
},
flags=util.XML
)
self.assert_selector(
self.MARKUP,
"other|e4",
['16'],
namespaces={
"": "",
"foo": "http://me.com/namespaces/foofoo",
"bar": "http://me.com/namespaces/foobar",
"other": "http://me.com/namespaces/other"
},
flags=util.XML
)
def test_undefined_namespace(self):
"""Test undefined namespace."""
# Namespaces are defined wrong
markup = """
<tag id="1" xmlns:ns1=http://namespace1/ xmlns:ns2=http://namespace2/>
<ns1:el id="2">...</ns1:el>
<ns2:el id="3">...</ns2:el>
</tag>
"""
# We are not feeding in the namespaces so they shouldn't be found.
self.assert_selector(
markup,
"ns1|el, ns2|el",
[],
flags=util.XML
)
def test_attribute_namespace(self):
"""Test attribute namespace."""
self.assert_selector(
self.MARKUP_ATTR,
'[xlink|href*=forw],[xlink|href="images/sprites.svg#icon-redo"]',
['1', '2'],
namespaces={"xlink": "http://www.w3.org/1999/xlink"},
flags=util.HTML5
)
def test_attribute_namespace_escapes(self):
"""Test attribute namespace escapes."""
self.assert_selector(
self.MARKUP_ATTR,
'[xlink\\:href*=forw]',
['2'],
namespaces={"xlink": "http://www.w3.org/1999/xlink"},
flags=util.HTML5
)
self.assert_selector(
self.MARKUP_ATTR,
'[\\:href]',
['4'],
namespaces={"xlink": "http://www.w3.org/1999/xlink"},
flags=util.HTML5
)
def test_invalid_namespace_attribute(self):
"""Test invalid namespace attributes."""
self.assert_selector(
self.MARKUP_ATTR,
'[xlink\\:nomatch*=forw]',
[],
namespaces={"xlink": "http://www.w3.org/1999/xlink"},
flags=util.HTML5
)
self.assert_selector(
self.MARKUP_ATTR,
'[bad|href*=forw]',
[],
namespaces={"xlink": "http://www.w3.org/1999/xlink"},
flags=util.HTML5
)
def test_attribute_namespace_xhtml(self):
"""Test attribute namespace in XHTML."""
self.assert_selector(
self.wrap_xlink(self.MARKUP_ATTR, True),
'[xlink|href*=forw],[xlink|href="images/sprites.svg#icon-redo"]',
['1', '2'],
namespaces={"xlink": "http://www.w3.org/1999/xlink"},
flags=util.XHTML
)
def test_attribute_namespace_xml(self):
"""Test attribute namespace in XML."""
self.assert_selector(
self.wrap_xlink(self.MARKUP_ATTR),
'[xlink|href*=forw],[xlink|href="images/sprites.svg#icon-redo"]',
['1', '2'],
namespaces={"xlink": "http://www.w3.org/1999/xlink"},
flags=util.XHTML
)
| TestNamespace |
python | pallets__jinja | src/jinja2/nodes.py | {
"start": 9716,
"end": 9797
} | class ____(Node):
"""Base node for all statements."""
abstract = True
| Stmt |
python | apache__airflow | airflow-core/tests/unit/utils/test_helpers.py | {
"start": 9652,
"end": 9723
} | class ____(MockJobRunner):
job_type = "TriggererJob"
| TriggererJobRunner |
python | django__django | django/http/response.py | {
"start": 24864,
"end": 24933
} | class ____(HttpResponse):
status_code = 500
| HttpResponseServerError |
python | mlflow__mlflow | mlflow/gateway/uc_function_utils.py | {
"start": 3867,
"end": 3980
} | class ____:
statement: str
parameters: list["StatementParameterListItem"]
@dataclass
| ParameterizedStatement |
python | django__django | tests/model_forms/models.py | {
"start": 12047,
"end": 12468
} | class ____(models.Model):
most_recently_fooled = models.ForeignKey(
Character,
models.CASCADE,
limit_choices_to=today_callable_dict,
related_name="jokes",
)
has_fooled_today = models.ManyToManyField(
Character,
limit_choices_to=today_callable_q,
related_name="jokes_today",
)
funny = models.BooleanField(default=False)
# Model for #13776
| StumpJoke |
python | protocolbuffers__protobuf | python/google/protobuf/internal/text_format_test.py | {
"start": 112676,
"end": 113595
} | class ____(unittest.TestCase):
def testForcePrintOptionalColon(self):
packed_message = unittest_pb2.OneString()
packed_message.data = 'string'
message = any_test_pb2.TestAny()
message.any_value.Pack(packed_message)
output = text_format.MessageToString(
message,
force_colon=True)
expected = ('any_value: {\n'
' [type.googleapis.com/proto2_unittest.OneString]: {\n'
' data: "string"\n'
' }\n'
'}\n')
self.assertEqual(expected, output)
def testPrintShortFormatRepeatedFields(self):
message = unittest_pb2.TestAllTypes()
message.repeated_int32.append(1)
output = text_format.MessageToString(
message, use_short_repeated_primitives=True, force_colon=True)
self.assertEqual('repeated_int32: [1]\n', output)
if __name__ == '__main__':
unittest.main()
| OptionalColonMessageToStringTest |
python | celery__celery | t/unit/backends/test_redis.py | {
"start": 12760,
"end": 33403
} | class ____(basetest_RedisBackend):
@pytest.mark.usefixtures('depends_on_current_app')
def test_reduce(self):
pytest.importorskip('redis')
from celery.backends.redis import RedisBackend
x = RedisBackend(app=self.app)
assert loads(dumps(x))
def test_no_redis(self):
self.Backend.redis = None
with pytest.raises(ImproperlyConfigured):
self.Backend(app=self.app)
def test_username_password_from_redis_conf(self):
self.app.conf.redis_password = 'password'
x = self.Backend(app=self.app)
assert x.connparams
assert 'username' not in x.connparams
assert x.connparams['password'] == 'password'
self.app.conf.redis_username = 'username'
x = self.Backend(app=self.app)
assert x.connparams
assert x.connparams['username'] == 'username'
assert x.connparams['password'] == 'password'
def test_credential_provider_from_redis_conf(self):
self.app.conf.redis_backend_credential_provider = "redis.CredentialProvider"
x = self.Backend(app=self.app)
assert x.connparams
assert 'credential_provider' in x.connparams
assert 'username' not in x.connparams
assert 'password' not in x.connparams
# with local credential provider
self.app.conf.redis_backend_credential_provider = MyCredentialProvider()
x = self.Backend(app=self.app)
assert x.connparams
assert 'credential_provider' in x.connparams
assert 'username' not in x.connparams
assert 'password' not in x.connparams
# raise ImportError
self.app.conf.redis_backend_credential_provider = "not_exist.CredentialProvider"
with pytest.raises(ImportError):
self.Backend(app=self.app)
# raise value Error
self.app.conf.redis_backend_credential_provider = NonCredentialProvider()
with pytest.raises(ValueError):
self.Backend(app=self.app)
def test_url(self):
self.app.conf.redis_socket_timeout = 30.0
self.app.conf.redis_socket_connect_timeout = 100.0
x = self.Backend(
'redis://:bosco@vandelay.com:123//1', app=self.app,
)
assert x.connparams
assert x.connparams['host'] == 'vandelay.com'
assert x.connparams['db'] == 1
assert x.connparams['port'] == 123
assert x.connparams['password'] == 'bosco'
assert x.connparams['socket_timeout'] == 30.0
assert x.connparams['socket_connect_timeout'] == 100.0
assert 'username' not in x.connparams
x = self.Backend(
'redis://username:bosco@vandelay.com:123//1', app=self.app,
)
assert x.connparams
assert x.connparams['host'] == 'vandelay.com'
assert x.connparams['db'] == 1
assert x.connparams['port'] == 123
assert x.connparams['username'] == 'username'
assert x.connparams['password'] == 'bosco'
assert x.connparams['socket_timeout'] == 30.0
assert x.connparams['socket_connect_timeout'] == 100.0
def test_url_with_credential_provider(self):
self.app.conf.redis_socket_timeout = 30.0
self.app.conf.redis_socket_connect_timeout = 100.0
x = self.Backend(
'redis://:bosco@vandelay.com:123/1?credential_provider=redis.CredentialProvider', app=self.app,
)
assert x.connparams
assert x.connparams['host'] == 'vandelay.com'
assert x.connparams['db'] == 1
assert x.connparams['port'] == 123
assert x.connparams['socket_timeout'] == 30.0
assert x.connparams['socket_connect_timeout'] == 100.0
assert isinstance(x.connparams['credential_provider'], CredentialProvider)
assert "username" not in x.connparams
assert "password" not in x.connparams
# without username and password
x = self.Backend(
'redis://@vandelay.com:123/1?credential_provider=redis.UsernamePasswordCredentialProvider', app=self.app,
)
assert x.connparams
assert x.connparams['host'] == 'vandelay.com'
assert x.connparams['db'] == 1
assert x.connparams['port'] == 123
assert isinstance(x.connparams['credential_provider'], CredentialProvider)
# raise importError
with pytest.raises(ImportError):
self.Backend(
'redis://@vandelay.com:123/1?credential_provider=not_exist.CredentialProvider', app=self.app,
)
# raise valueError
with pytest.raises(ValueError):
# some non-credential provider class
# not ideal but serve purpose
self.Backend(
'redis://@vandelay.com:123/1?credential_provider=abc.ABC', app=self.app,
)
def test_timeouts_in_url_coerced(self):
pytest.importorskip('redis')
x = self.Backend(
('redis://:bosco@vandelay.com:123//1?'
'socket_timeout=30&socket_connect_timeout=100'),
app=self.app,
)
assert x.connparams
assert x.connparams['host'] == 'vandelay.com'
assert x.connparams['db'] == 1
assert x.connparams['port'] == 123
assert x.connparams['password'] == 'bosco'
assert x.connparams['socket_timeout'] == 30
assert x.connparams['socket_connect_timeout'] == 100
def test_socket_url(self):
pytest.importorskip('redis')
self.app.conf.redis_socket_timeout = 30.0
self.app.conf.redis_socket_connect_timeout = 100.0
x = self.Backend(
'socket:///tmp/redis.sock?virtual_host=/3', app=self.app,
)
assert x.connparams
assert x.connparams['path'] == '/tmp/redis.sock'
assert (x.connparams['connection_class'] is
redis.UnixDomainSocketConnection)
assert 'host' not in x.connparams
assert 'port' not in x.connparams
assert x.connparams['socket_timeout'] == 30.0
assert 'socket_connect_timeout' not in x.connparams
assert 'socket_keepalive' not in x.connparams
assert x.connparams['db'] == 3
def test_backend_ssl(self):
pytest.importorskip('redis')
self.app.conf.redis_backend_use_ssl = {
'ssl_cert_reqs': ssl.CERT_REQUIRED,
'ssl_ca_certs': '/path/to/ca.crt',
'ssl_certfile': '/path/to/client.crt',
'ssl_keyfile': '/path/to/client.key',
}
self.app.conf.redis_socket_timeout = 30.0
self.app.conf.redis_socket_connect_timeout = 100.0
x = self.Backend(
'rediss://:bosco@vandelay.com:123//1', app=self.app,
)
assert x.connparams
assert x.connparams['host'] == 'vandelay.com'
assert x.connparams['db'] == 1
assert x.connparams['port'] == 123
assert x.connparams['password'] == 'bosco'
assert x.connparams['socket_timeout'] == 30.0
assert x.connparams['socket_connect_timeout'] == 100.0
assert x.connparams['ssl_cert_reqs'] == ssl.CERT_REQUIRED
assert x.connparams['ssl_ca_certs'] == '/path/to/ca.crt'
assert x.connparams['ssl_certfile'] == '/path/to/client.crt'
assert x.connparams['ssl_keyfile'] == '/path/to/client.key'
from redis.connection import SSLConnection
assert x.connparams['connection_class'] is SSLConnection
def test_backend_health_check_interval_ssl(self):
pytest.importorskip('redis')
self.app.conf.redis_backend_use_ssl = {
'ssl_cert_reqs': ssl.CERT_REQUIRED,
'ssl_ca_certs': '/path/to/ca.crt',
'ssl_certfile': '/path/to/client.crt',
'ssl_keyfile': '/path/to/client.key',
}
self.app.conf.redis_backend_health_check_interval = 10
x = self.Backend(
'rediss://:bosco@vandelay.com:123//1', app=self.app,
)
assert x.connparams
assert x.connparams['host'] == 'vandelay.com'
assert x.connparams['db'] == 1
assert x.connparams['port'] == 123
assert x.connparams['password'] == 'bosco'
assert x.connparams['health_check_interval'] == 10
from redis.connection import SSLConnection
assert x.connparams['connection_class'] is SSLConnection
def test_backend_health_check_interval(self):
pytest.importorskip('redis')
self.app.conf.redis_backend_health_check_interval = 10
x = self.Backend(
'redis://vandelay.com:123//1', app=self.app,
)
assert x.connparams
assert x.connparams['host'] == 'vandelay.com'
assert x.connparams['db'] == 1
assert x.connparams['port'] == 123
assert x.connparams['health_check_interval'] == 10
def test_backend_health_check_interval_not_set(self):
pytest.importorskip('redis')
x = self.Backend(
'redis://vandelay.com:123//1', app=self.app,
)
assert x.connparams
assert x.connparams['host'] == 'vandelay.com'
assert x.connparams['db'] == 1
assert x.connparams['port'] == 123
assert "health_check_interval" not in x.connparams
def test_backend_redis_client_name(self):
pytest.importorskip('redis')
self.app.conf.redis_client_name = 'celery-worker'
x = self.Backend(
'redis://vandelay.com:123//1', app=self.app,
)
assert x.connparams
assert x.connparams['host'] == 'vandelay.com'
assert x.connparams['db'] == 1
assert x.connparams['port'] == 123
assert x.connparams['client_name'] == 'celery-worker'
def test_backend_redis_client_name_not_set(self):
pytest.importorskip('redis')
x = self.Backend(
'redis://vandelay.com:123//1', app=self.app,
)
assert x.connparams
assert x.connparams['host'] == 'vandelay.com'
assert x.connparams['db'] == 1
assert x.connparams['port'] == 123
assert x.connparams['client_name'] is None
@pytest.mark.parametrize('cert_str', [
"required",
"CERT_REQUIRED",
])
def test_backend_ssl_certreq_str(self, cert_str):
pytest.importorskip('redis')
self.app.conf.redis_backend_use_ssl = {
'ssl_cert_reqs': cert_str,
'ssl_ca_certs': '/path/to/ca.crt',
'ssl_certfile': '/path/to/client.crt',
'ssl_keyfile': '/path/to/client.key',
}
self.app.conf.redis_socket_timeout = 30.0
self.app.conf.redis_socket_connect_timeout = 100.0
x = self.Backend(
'rediss://:bosco@vandelay.com:123//1', app=self.app,
)
assert x.connparams
assert x.connparams['host'] == 'vandelay.com'
assert x.connparams['db'] == 1
assert x.connparams['port'] == 123
assert x.connparams['password'] == 'bosco'
assert x.connparams['socket_timeout'] == 30.0
assert x.connparams['socket_connect_timeout'] == 100.0
assert x.connparams['ssl_cert_reqs'] == ssl.CERT_REQUIRED
assert x.connparams['ssl_ca_certs'] == '/path/to/ca.crt'
assert x.connparams['ssl_certfile'] == '/path/to/client.crt'
assert x.connparams['ssl_keyfile'] == '/path/to/client.key'
from redis.connection import SSLConnection
assert x.connparams['connection_class'] is SSLConnection
@pytest.mark.parametrize('cert_str', [
"required",
"CERT_REQUIRED",
])
def test_backend_ssl_url(self, cert_str):
pytest.importorskip('redis')
self.app.conf.redis_socket_timeout = 30.0
self.app.conf.redis_socket_connect_timeout = 100.0
x = self.Backend(
'rediss://:bosco@vandelay.com:123//1?ssl_cert_reqs=%s' % cert_str,
app=self.app,
)
assert x.connparams
assert x.connparams['host'] == 'vandelay.com'
assert x.connparams['db'] == 1
assert x.connparams['port'] == 123
assert x.connparams['password'] == 'bosco'
assert x.connparams['socket_timeout'] == 30.0
assert x.connparams['socket_connect_timeout'] == 100.0
assert x.connparams['ssl_cert_reqs'] == ssl.CERT_REQUIRED
from redis.connection import SSLConnection
assert x.connparams['connection_class'] is SSLConnection
@pytest.mark.parametrize('cert_str', [
"none",
"CERT_NONE",
])
def test_backend_ssl_url_options(self, cert_str):
pytest.importorskip('redis')
x = self.Backend(
(
'rediss://:bosco@vandelay.com:123//1'
'?ssl_cert_reqs={cert_str}'
'&ssl_ca_certs=%2Fvar%2Fssl%2Fmyca.pem'
'&ssl_certfile=%2Fvar%2Fssl%2Fredis-server-cert.pem'
'&ssl_keyfile=%2Fvar%2Fssl%2Fprivate%2Fworker-key.pem'
).format(cert_str=cert_str),
app=self.app,
)
assert x.connparams
assert x.connparams['host'] == 'vandelay.com'
assert x.connparams['db'] == 1
assert x.connparams['port'] == 123
assert x.connparams['password'] == 'bosco'
assert x.connparams['ssl_cert_reqs'] == ssl.CERT_NONE
assert x.connparams['ssl_ca_certs'] == '/var/ssl/myca.pem'
assert x.connparams['ssl_certfile'] == '/var/ssl/redis-server-cert.pem'
assert x.connparams['ssl_keyfile'] == '/var/ssl/private/worker-key.pem'
@pytest.mark.parametrize('cert_str', [
"optional",
"CERT_OPTIONAL",
])
def test_backend_ssl_url_cert_none(self, cert_str):
pytest.importorskip('redis')
x = self.Backend(
'rediss://:bosco@vandelay.com:123//1?ssl_cert_reqs=%s' % cert_str,
app=self.app,
)
assert x.connparams
assert x.connparams['host'] == 'vandelay.com'
assert x.connparams['db'] == 1
assert x.connparams['port'] == 123
assert x.connparams['ssl_cert_reqs'] == ssl.CERT_OPTIONAL
from redis.connection import SSLConnection
assert x.connparams['connection_class'] is SSLConnection
@pytest.mark.parametrize("uri", [
'rediss://:bosco@vandelay.com:123//1?ssl_cert_reqs=CERT_KITTY_CATS',
'rediss://:bosco@vandelay.com:123//1'
])
def test_backend_ssl_url_invalid(self, uri):
pytest.importorskip('redis')
with pytest.raises(ValueError):
self.Backend(
uri,
app=self.app,
)
def test_conf_raises_KeyError(self):
self.app.conf = AttributeDict({
'result_serializer': 'json',
'result_cache_max': 1,
'result_expires': None,
'accept_content': ['json'],
'result_accept_content': ['json'],
})
self.Backend(app=self.app)
@patch('celery.backends.redis.logger')
def test_on_connection_error(self, logger):
intervals = iter([10, 20, 30])
exc = KeyError()
assert self.b.on_connection_error(None, exc, intervals, 1) == 10
logger.error.assert_called_with(
self.E_LOST, 1, 'Inf', 'in 10.00 seconds')
assert self.b.on_connection_error(10, exc, intervals, 2) == 20
logger.error.assert_called_with(self.E_LOST, 2, 10, 'in 20.00 seconds')
assert self.b.on_connection_error(10, exc, intervals, 3) == 30
logger.error.assert_called_with(self.E_LOST, 3, 10, 'in 30.00 seconds')
@patch('celery.backends.redis.retry_over_time')
def test_retry_policy_conf(self, retry_over_time):
self.app.conf.result_backend_transport_options = dict(
retry_policy=dict(
max_retries=2,
interval_start=0,
interval_step=0.01,
),
)
b = self.Backend(app=self.app)
def fn():
return 1
# We don't want to re-test retry_over_time, just check we called it
# with the expected args
b.ensure(fn, (),)
retry_over_time.assert_called_with(
fn, b.connection_errors, (), {}, ANY,
max_retries=2, interval_start=0, interval_step=0.01, interval_max=1
)
def test_exception_safe_to_retry(self):
b = self.Backend(app=self.app)
assert not b.exception_safe_to_retry(Exception("failed"))
assert not b.exception_safe_to_retry(BaseException("failed"))
assert not b.exception_safe_to_retry(exceptions.RedisError("redis error"))
assert b.exception_safe_to_retry(exceptions.ConnectionError("service unavailable"))
assert b.exception_safe_to_retry(exceptions.TimeoutError("timeout"))
def test_incr(self):
self.b.client = Mock(name='client')
self.b.incr('foo')
self.b.client.incr.assert_called_with('foo')
def test_expire(self):
self.b.client = Mock(name='client')
self.b.expire('foo', 300)
self.b.client.expire.assert_called_with('foo', 300)
def test_apply_chord(self, unlock='celery.chord_unlock'):
self.app.tasks[unlock] = Mock()
header_result_args = (
uuid(),
[self.app.AsyncResult(x) for x in range(3)],
)
self.b.apply_chord(header_result_args, None)
assert self.app.tasks[unlock].apply_async.call_count == 0
def test_unpack_chord_result(self):
self.b.exception_to_python = Mock(name='etp')
decode = Mock(name='decode')
exc = KeyError()
tup = decode.return_value = (1, 'id1', states.FAILURE, exc)
with pytest.raises(ChordError):
self.b._unpack_chord_result(tup, decode)
decode.assert_called_with(tup)
self.b.exception_to_python.assert_called_with(exc)
exc = ValueError()
tup = decode.return_value = (2, 'id2', states.RETRY, exc)
ret = self.b._unpack_chord_result(tup, decode)
self.b.exception_to_python.assert_called_with(exc)
assert ret is self.b.exception_to_python()
def test_on_chord_part_return_no_gid_or_tid(self):
request = Mock(name='request')
request.id = request.group = request.group_index = None
assert self.b.on_chord_part_return(request, 'SUCCESS', 10) is None
def test_ConnectionPool(self):
self.b.redis = Mock(name='redis')
assert self.b._ConnectionPool is None
assert self.b.ConnectionPool is self.b.redis.ConnectionPool
assert self.b.ConnectionPool is self.b.redis.ConnectionPool
def test_expires_defaults_to_config(self):
self.app.conf.result_expires = 10
b = self.Backend(expires=None, app=self.app)
assert b.expires == 10
def test_expires_is_int(self):
b = self.Backend(expires=48, app=self.app)
assert b.expires == 48
def test_add_to_chord(self):
b = self.Backend('redis://', app=self.app)
gid = uuid()
b.add_to_chord(gid, 'sig')
b.client.incr.assert_called_with(b.get_key_for_group(gid, '.t'), 1)
def test_set_chord_size(self):
b = self.Backend('redis://', app=self.app)
gid = uuid()
b.set_chord_size(gid, 10)
b.client.set.assert_called_with(b.get_key_for_group(gid, '.s'), 10)
def test_expires_is_None(self):
b = self.Backend(expires=None, app=self.app)
assert b.expires == self.app.conf.result_expires.total_seconds()
def test_expires_is_timedelta(self):
b = self.Backend(expires=timedelta(minutes=1), app=self.app)
assert b.expires == 60
def test_mget(self):
assert self.b.mget(['a', 'b', 'c'])
self.b.client.mget.assert_called_with(['a', 'b', 'c'])
def test_set_no_expire(self):
self.b.expires = None
self.b._set_with_state('foo', 'bar', states.SUCCESS)
def test_process_cleanup(self):
self.b.process_cleanup()
def test_get_set_forget(self):
tid = uuid()
self.b.store_result(tid, 42, states.SUCCESS)
assert self.b.get_state(tid) == states.SUCCESS
assert self.b.get_result(tid) == 42
self.b.forget(tid)
assert self.b.get_state(tid) == states.PENDING
def test_set_expires(self):
self.b = self.Backend(expires=512, app=self.app)
tid = uuid()
key = self.b.get_key_for_task(tid)
self.b.store_result(tid, 42, states.SUCCESS)
self.b.client.expire.assert_called_with(
key, 512,
)
def test_set_raises_error_on_large_value(self):
with pytest.raises(BackendStoreError):
self.b.set('key', 'x' * (self.b._MAX_STR_VALUE_SIZE + 1))
| test_RedisBackend |
python | openai__openai-python | src/openai/types/responses/response_input_item_param.py | {
"start": 9420,
"end": 9919
} | class ____(TypedDict, total=False):
diff: Required[str]
"""Unified diff content to apply to the existing file."""
path: Required[str]
"""Path of the file to update relative to the workspace root."""
type: Required[Literal["update_file"]]
"""The operation type. Always `update_file`."""
ApplyPatchCallOperation: TypeAlias = Union[
ApplyPatchCallOperationCreateFile, ApplyPatchCallOperationDeleteFile, ApplyPatchCallOperationUpdateFile
]
| ApplyPatchCallOperationUpdateFile |
python | pytorch__pytorch | test/test_dataloader.py | {
"start": 121637,
"end": 123247
} | class ____:
def __init__(self, data):
transposed_data = list(zip(*data))
self.inp = torch.stack(transposed_data[0], 0)
self.tgt = torch.stack(transposed_data[1], 0)
def pin_memory(self):
self.inp = self.inp.pin_memory()
self.tgt = self.tgt.pin_memory()
return self
def is_pinned(self):
return self.inp.is_pinned() and self.tgt.is_pinned()
# Workaround for https://github.com/pytorch/pytorch/issues/50661
# Classes from `__main__` can not be correctly unpickled from spawned module
# See https://docs.python.org/3/library/multiprocessing.html#multiprocessing-programming
self_module = __import__(os.path.splitext(os.path.basename(__file__))[0])
def collate_wrapper(batch):
return self_module.SimpleCustomBatch(batch)
def collate_into_packed_sequence(batch):
data = torch.stack([sample[0] for sample in batch], 1)
t, b = data.size()
lengths = torch.randint(1, t, size=(b,), dtype=torch.int64)
return torch.nn.utils.rnn.pack_padded_sequence(data, lengths, enforce_sorted=False)
def collate_into_packed_sequence_batch_first(batch):
data = torch.stack([sample[0] for sample in batch], 0)
b, t = data.size()
lengths = torch.randint(1, t, size=(b,), dtype=torch.int64)
return torch.nn.utils.rnn.pack_padded_sequence(
data, lengths, batch_first=True, enforce_sorted=False
)
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)",
)
| SimpleCustomBatch |
python | django__django | django/contrib/gis/db/models/fields.py | {
"start": 12037,
"end": 12248
} | class ____(GeometryField):
geom_type = "GEOMETRYCOLLECTION"
geom_class = GeometryCollection
form_class = forms.GeometryCollectionField
description = _("Geometry collection")
| GeometryCollectionField |
python | django-import-export__django-import-export | tests/core/migrations/0011_uuidcategory_legacybook_alter_uuidbook_id_and_more.py | {
"start": 105,
"end": 1383
} | class ____(migrations.Migration):
dependencies = [
("core", "0010_uuidbook"),
]
operations = [
migrations.CreateModel(
name="UUIDCategory",
fields=[
(
"catid",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("name", models.CharField(max_length=32)),
],
),
migrations.CreateModel(
name="LegacyBook",
fields=[],
options={
"proxy": True,
"indexes": [],
"constraints": [],
},
bases=("core.book",),
),
migrations.AlterField(
model_name="uuidbook",
name="id",
field=models.UUIDField(
default=uuid.uuid4, editable=False, primary_key=True, serialize=False
),
),
migrations.AddField(
model_name="uuidbook",
name="categories",
field=models.ManyToManyField(blank=True, to="core.uuidcategory"),
),
]
| Migration |
python | getsentry__sentry | src/sentry_plugins/github/client.py | {
"start": 2564,
"end": 4706
} | class ____(GithubPluginClientMixin, ApiClient):
def __init__(self, integration: RpcIntegration):
self.integration = integration
self.token: str | None = None
self.expires_at: datetime.datetime | None = None
super().__init__()
def get_token(self):
if not self.token or (
self.expires_at is not None and self.expires_at < datetime.datetime.utcnow()
):
res = self.create_token()
self.token = res["token"]
self.expires_at = datetime.datetime.strptime(res["expires_at"], "%Y-%m-%dT%H:%M:%SZ")
return self.token
def get_jwt(self) -> str:
exp_dt = datetime.datetime.utcnow() + datetime.timedelta(minutes=10)
exp = calendar.timegm(exp_dt.timetuple())
# Generate the JWT
payload = {
# issued at time
"iat": int(time.time()),
# JWT expiration time (10 minute maximum)
"exp": exp,
# Integration's GitHub identifier
"iss": options.get("github.integration-app-id"),
}
return jwt.encode(payload, options.get("github.integration-private-key"), algorithm="RS256")
def request(self, method, path, headers=None, data=None, params=None):
if headers is None:
headers = {
"Authorization": "token %s" % self.get_token(),
# TODO(jess): remove this whenever it's out of preview
"Accept": "application/vnd.github.machine-man-preview+json",
}
return self._request(method, path, headers=headers, data=data, params=params)
def create_token(self):
headers = {
# TODO(jess): remove this whenever it's out of preview
"Accept": "application/vnd.github.machine-man-preview+json",
}
headers.update(jwt.authorization_header(self.get_jwt()))
return self.post(
f"/app/installations/{self.integration.external_id}/access_tokens",
headers=headers,
)
def get_repositories(self):
return self.get("/installation/repositories")
| GithubPluginAppsClient |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_data_labels42.py | {
"start": 315,
"end": 1856
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_data_labels42.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [74893568, 80048128]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
[10, 20, 30, 40, 50],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
worksheet.write_column("D1", data[3])
chart.add_series(
{
"values": "=Sheet1!$A$1:$A$5",
"data_labels": {
"value": 1,
"custom": [
{
"value": "=Sheet1!$D$1",
"font": {"color": "red", "baseline": -1},
"border": {"color": "red"},
}
],
},
}
)
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | django__django | django/utils/text.py | {
"start": 5510,
"end": 11070
} | class ____(SimpleLazyObject):
"""
An object used to truncate text, either by characters or words.
When truncating HTML text (either chars or words), input will be limited to
at most `MAX_LENGTH_HTML` characters.
"""
# 5 million characters are approximately 4000 text pages or 3 web pages.
MAX_LENGTH_HTML = 5_000_000
def __init__(self, text):
super().__init__(lambda: str(text))
def chars(self, num, truncate=None, html=False):
"""
Return the text truncated to be no longer than the specified number
of characters.
`truncate` specifies what should be used to notify that the string has
been truncated, defaulting to a translatable string of an ellipsis.
"""
self._setup()
length = int(num)
if length <= 0:
return ""
text = unicodedata.normalize("NFC", self._wrapped)
if html:
parser = TruncateCharsHTMLParser(length=length, replacement=truncate)
parser.feed(text)
parser.close()
return "".join(parser.output)
return self._text_chars(length, truncate, text)
def _text_chars(self, length, truncate, text):
"""Truncate a string after a certain number of chars."""
truncate_len = calculate_truncate_chars_length(length, truncate)
s_len = 0
end_index = None
for i, char in enumerate(text):
if unicodedata.combining(char):
# Don't consider combining characters
# as adding to the string length
continue
s_len += 1
if end_index is None and s_len > truncate_len:
end_index = i
if s_len > length:
# Return the truncated string
return add_truncation_text(text[: end_index or 0], truncate)
# Return the original string since no truncation was necessary
return text
def words(self, num, truncate=None, html=False):
"""
Truncate a string after a certain number of words. `truncate` specifies
what should be used to notify that the string has been truncated,
defaulting to ellipsis.
"""
self._setup()
length = int(num)
if length <= 0:
return ""
if html:
parser = TruncateWordsHTMLParser(length=length, replacement=truncate)
parser.feed(self._wrapped)
parser.close()
return "".join(parser.output)
return self._text_words(length, truncate)
def _text_words(self, length, truncate):
"""
Truncate a string after a certain number of words.
Strip newlines in the string.
"""
words = self._wrapped.split()
if len(words) > length:
words = words[:length]
return add_truncation_text(" ".join(words), truncate)
return " ".join(words)
@keep_lazy_text
def get_valid_filename(name):
"""
Return the given string converted to a string that can be used for a clean
filename. Remove leading and trailing spaces; convert other spaces to
underscores; and remove anything that is not an alphanumeric, dash,
underscore, or dot.
>>> get_valid_filename("john's portrait in 2004.jpg")
'johns_portrait_in_2004.jpg'
"""
s = str(name).strip().replace(" ", "_")
s = re.sub(r"(?u)[^-\w.]", "", s)
if s in {"", ".", ".."}:
raise SuspiciousFileOperation("Could not derive file name from '%s'" % name)
return s
@keep_lazy_text
def get_text_list(list_, last_word=gettext_lazy("or")):
"""
>>> get_text_list(['a', 'b', 'c', 'd'])
'a, b, c or d'
>>> get_text_list(['a', 'b', 'c'], 'and')
'a, b and c'
>>> get_text_list(['a', 'b'], 'and')
'a and b'
>>> get_text_list(['a'])
'a'
>>> get_text_list([])
''
"""
if not list_:
return ""
if len(list_) == 1:
return str(list_[0])
return "%s %s %s" % (
# Translators: This string is used as a separator between list elements
_(", ").join(str(i) for i in list_[:-1]),
str(last_word),
str(list_[-1]),
)
@keep_lazy_text
def normalize_newlines(text):
"""Normalize CRLF and CR newlines to just LF."""
return re_newlines.sub("\n", str(text))
@keep_lazy_text
def phone2numeric(phone):
"""Convert a phone number with letters into its numeric equivalent."""
char2number = {
"a": "2",
"b": "2",
"c": "2",
"d": "3",
"e": "3",
"f": "3",
"g": "4",
"h": "4",
"i": "4",
"j": "5",
"k": "5",
"l": "5",
"m": "6",
"n": "6",
"o": "6",
"p": "7",
"q": "7",
"r": "7",
"s": "7",
"t": "8",
"u": "8",
"v": "8",
"w": "9",
"x": "9",
"y": "9",
"z": "9",
}
return "".join(char2number.get(c, c) for c in phone.lower())
def _get_random_filename(max_random_bytes):
return b"a" * secrets.randbelow(max_random_bytes)
def compress_string(s, *, max_random_bytes=None):
compressed_data = gzip_compress(s, compresslevel=6, mtime=0)
if not max_random_bytes:
return compressed_data
compressed_view = memoryview(compressed_data)
header = bytearray(compressed_view[:10])
header[3] = gzip.FNAME
filename = _get_random_filename(max_random_bytes) + b"\x00"
return bytes(header) + filename + compressed_view[10:]
| Truncator |
python | gevent__gevent | src/greentest/3.14/test__interpreters.py | {
"start": 6163,
"end": 7475
} | class ____(TestBase):
def test_main(self):
main, *_ = _interpreters.get_main()
self.assertTrue(_interpreters.is_running(main))
@unittest.skip('Fails on FreeBSD')
def test_subinterpreter(self):
interp = _interpreters.create()
self.assertFalse(_interpreters.is_running(interp))
with _running(interp):
self.assertTrue(_interpreters.is_running(interp))
self.assertFalse(_interpreters.is_running(interp))
def test_from_subinterpreter(self):
interp = _interpreters.create()
out = _run_output(interp, dedent(f"""
import _interpreters
if _interpreters.is_running({interp}):
print(True)
else:
print(False)
"""))
self.assertEqual(out.strip(), 'True')
def test_already_destroyed(self):
interp = _interpreters.create()
_interpreters.destroy(interp)
with self.assertRaises(InterpreterNotFoundError):
_interpreters.is_running(interp)
def test_does_not_exist(self):
with self.assertRaises(InterpreterNotFoundError):
_interpreters.is_running(1_000_000)
def test_bad_id(self):
with self.assertRaises(ValueError):
_interpreters.is_running(-1)
| IsRunningTests |
python | google__pytype | pytype/matcher_test.py | {
"start": 1115,
"end": 19290
} | class ____(MatcherTestBase):
"""Test matcher.AbstractMatcher."""
def _make_class(self, name):
return abstract.InterpreterClass(name, [], {}, None, None, (), self.ctx)
def _parse_and_lookup(self, src, objname, filename=None):
if filename is None:
filename = str(hash(src))
with test_utils.Tempdir() as d:
d.create_file(filename + ".pyi", src)
self.ctx.options.tweak(pythonpath=[d.path]) # monkeypatch
return self.ctx.loader.lookup_pytd(filename, objname)
def _convert(self, x, name, as_instance=False):
pyval = self._parse_and_lookup(x, name)
if as_instance:
pyval = abstract_utils.AsInstance(pyval)
return self.ctx.convert.constant_to_value(pyval, {}, self.ctx.root_node)
def _convert_type(self, t, as_instance=False):
"""Convenience function for turning a string into an abstract value.
Note that this function cannot be called more than once per test with
the same arguments, since we hash the arguments to get a filename for
the temporary pyi.
Args:
t: The string representation of a type.
as_instance: Whether to convert as an instance.
Returns:
A BaseValue.
"""
src = textwrap.dedent(f"""
from typing import Any, Callable, Iterator, Tuple, Type, Union
from protocols import Sequence, SupportsLower
x = ... # type: {t}
""")
filename = str(hash((t, as_instance)))
x = self._parse_and_lookup(src, "x", filename).type
if as_instance:
x = abstract_utils.AsInstance(x)
return self.ctx.convert.constant_to_value(x, {}, self.ctx.root_node)
def test_basic(self):
self.assertMatch(abstract.Empty(self.ctx), abstract.Empty(self.ctx))
def test_type(self):
left = self._make_class("dummy")
type_parameters = {
abstract_utils.T: abstract.TypeParameter(abstract_utils.T, self.ctx)
}
other_type = abstract.ParameterizedClass(
self.ctx.convert.type_type, type_parameters, self.ctx
)
for result in self._match_var(left, other_type):
(instance_binding,) = result[abstract_utils.T].bindings
self.assertEqual(instance_binding.data.cls, left)
def test_union(self):
left_option1 = self._make_class("o1")
left_option2 = self._make_class("o2")
left = abstract.Union([left_option1, left_option2], self.ctx)
self.assertMatch(left, self.ctx.convert.type_type)
def test_metaclass(self):
left = self._make_class("left")
meta1 = self._make_class("m1")
meta2 = self._make_class("m2")
left.set_class(
self.ctx.root_node,
self.ctx.program.NewVariable([meta1, meta2], [], self.ctx.root_node),
)
self.assertMatch(left, meta1)
self.assertMatch(left, meta2)
def test_empty_against_class(self):
var = self.ctx.program.NewVariable()
right = self._make_class("bar")
result = self.matcher.match_var_against_type(var, right, {}, {})
self.assertEqual(result, {})
def test_empty_var_against_empty(self):
var = self.ctx.program.NewVariable()
right = abstract.Empty(self.ctx)
result = self.matcher.match_var_against_type(var, right, {}, {})
self.assertEqual(result, {})
def test_empty_against_type_parameter(self):
var = self.ctx.program.NewVariable()
right = abstract.TypeParameter("T", self.ctx)
result = self.matcher.match_var_against_type(var, right, {}, {})
self.assertCountEqual(result.keys(), ["T"])
self.assertFalse(result["T"].bindings)
def test_empty_against_unsolvable(self):
var = self.ctx.program.NewVariable()
right = abstract.Empty(self.ctx)
result = self.matcher.match_var_against_type(var, right, {}, {})
self.assertEqual(result, {})
def test_class_against_type_union(self):
left = self._make_class("foo")
union = abstract.Union((left,), self.ctx)
right = abstract.ParameterizedClass(
self.ctx.convert.type_type, {abstract_utils.T: union}, self.ctx
)
self.assertMatch(left, right)
def test_none_against_bool(self):
left = self._convert_type("None", as_instance=True)
right = self._convert_type("bool")
self.assertNoMatch(left, right)
def test_homogeneous_tuple(self):
left = self._convert_type("Tuple[int, ...]", as_instance=True)
right1 = self._convert_type("Tuple[int, ...]")
right2 = self._convert_type("Tuple[str, ...]")
self.assertMatch(left, right1)
self.assertNoMatch(left, right2)
def test_heterogeneous_tuple(self):
left1 = self._convert_type("Tuple[Union[int, str]]", as_instance=True)
left2 = self._convert_type("Tuple[int, str]", as_instance=True)
left3 = self._convert_type("Tuple[str, int]", as_instance=True)
right = self._convert_type("Tuple[int, str]")
self.assertNoMatch(left1, right)
self.assertMatch(left2, right)
self.assertNoMatch(left3, right)
def test_heterogeneous_tuple_against_homogeneous_tuple(self):
left = self._convert_type("Tuple[bool, int]", as_instance=True)
right1 = self._convert_type("Tuple[bool, ...]")
right2 = self._convert_type("Tuple[int, ...]")
right3 = self._convert_type("tuple")
self.assertNoMatch(left, right1)
self.assertMatch(left, right2)
self.assertMatch(left, right3)
def test_homogeneous_tuple_against_heterogeneous_tuple(self):
left1 = self._convert_type("Tuple[bool, ...]", as_instance=True)
left2 = self._convert_type("Tuple[int, ...]", as_instance=True)
left3 = self._convert_type("tuple", as_instance=True)
right = self._convert_type("Tuple[bool, int]")
self.assertMatch(left1, right)
self.assertNoMatch(left2, right)
self.assertMatch(left3, right)
def test_tuple_type(self):
# homogeneous against homogeneous
left = self._convert_type("Type[Tuple[float, ...]]", as_instance=True)
right1 = self._convert_type("Type[Tuple[float, ...]]")
right2 = self._convert_type("Type[Tuple[str, ...]]")
self.assertMatch(left, right1)
self.assertNoMatch(left, right2)
# heterogeneous against heterogeneous
left1 = self._convert_type("Type[Tuple[Union[int, str]]]", as_instance=True)
left2 = self._convert_type("Type[Tuple[int, str]]", as_instance=True)
left3 = self._convert_type("Type[Tuple[str, int]]", as_instance=True)
right = self._convert_type("Type[Tuple[int, str]]")
self.assertNoMatch(left1, right)
self.assertMatch(left2, right)
self.assertNoMatch(left3, right)
# heterogeneous against homogeneous
left = self._convert_type("Type[Tuple[bool, int]]", as_instance=True)
right1 = self._convert_type("Type[Tuple[bool, ...]]")
right2 = self._convert_type("Type[Tuple[int, ...]]")
right3 = self._convert_type("Type[tuple]")
self.assertNoMatch(left, right1)
self.assertMatch(left, right2)
self.assertMatch(left, right3)
# homogeneous against heterogeneous
left1 = self._convert_type("Type[Tuple[bool, ...]]", as_instance=True)
left2 = self._convert_type("Type[Tuple[int, ...]]", as_instance=True)
left3 = self._convert_type("Type[tuple]", as_instance=True)
right = self._convert_type("Type[Tuple[bool, int]]")
self.assertMatch(left1, right)
self.assertNoMatch(left2, right)
self.assertMatch(left3, right)
def test_tuple_subclass(self):
left = self._convert(
"""
from typing import Tuple
class A(Tuple[bool, int]): ...""",
"A",
as_instance=True,
)
right1 = self._convert_type("Tuple[bool, int]")
right2 = self._convert_type("Tuple[int, bool]")
right3 = self._convert_type("Tuple[int, int]")
right4 = self._convert_type("Tuple[int]")
right5 = self._convert_type("tuple")
right6 = self._convert_type("Tuple[bool, ...]")
right7 = self._convert_type("Tuple[int, ...]")
self.assertMatch(left, right1)
self.assertNoMatch(left, right2)
self.assertMatch(left, right3)
self.assertNoMatch(left, right4)
self.assertMatch(left, right5)
self.assertNoMatch(left, right6)
self.assertMatch(left, right7)
def test_annotation_class(self):
left = abstract.AnnotationClass("Dict", self.ctx)
right = self.ctx.convert.object_type
self.assertMatch(left, right)
def test_empty_tuple_class(self):
var = self.ctx.program.NewVariable()
params = {
0: abstract.TypeParameter(abstract_utils.K, self.ctx),
1: abstract.TypeParameter(abstract_utils.V, self.ctx),
}
params[abstract_utils.T] = abstract.Union((params[0], params[1]), self.ctx)
right = abstract.TupleClass(self.ctx.convert.tuple_type, params, self.ctx)
match = self.matcher.match_var_against_type(var, right, {}, {})
self.assertSetEqual(set(match), {abstract_utils.K, abstract_utils.V})
def test_unsolvable_against_tuple_class(self):
left = self.ctx.convert.unsolvable
params = {
0: abstract.TypeParameter(abstract_utils.K, self.ctx),
1: abstract.TypeParameter(abstract_utils.V, self.ctx),
}
params[abstract_utils.T] = abstract.Union((params[0], params[1]), self.ctx)
right = abstract.TupleClass(self.ctx.convert.tuple_type, params, self.ctx)
for match in self._match_var(left, right):
self.assertSetEqual(set(match), {abstract_utils.K, abstract_utils.V})
self.assertEqual(
match[abstract_utils.K].data, [self.ctx.convert.unsolvable]
)
self.assertEqual(
match[abstract_utils.V].data, [self.ctx.convert.unsolvable]
)
def test_bool_against_float(self):
left = self.ctx.convert.true
right = self.ctx.convert.primitive_classes[float]
self.assertMatch(left, right)
def test_pytd_function_against_callable(self):
f = self._convert("def f(x: int) -> bool: ...", "f")
plain_callable = self._convert_type("Callable")
good_callable1 = self._convert_type("Callable[[bool], int]")
good_callable2 = self._convert_type("Callable[..., int]")
self.assertMatch(f, plain_callable)
self.assertMatch(f, good_callable1)
self.assertMatch(f, good_callable2)
def test_pytd_function_against_callable_bad_return(self):
f = self._convert("def f(x: int) -> bool: ...", "f")
callable_bad_ret = self._convert_type("Callable[[int], str]")
self.assertNoMatch(f, callable_bad_ret)
def test_pytd_function_against_callable_bad_arg_count(self):
f = self._convert("def f(x: int) -> bool: ...", "f")
callable_bad_count1 = self._convert_type("Callable[[], bool]")
callable_bad_count2 = self._convert_type("Callable[[int, str], bool]")
self.assertNoMatch(f, callable_bad_count1)
self.assertNoMatch(f, callable_bad_count2)
def test_pytd_function_against_callable_bad_arg_type(self):
f = self._convert("def f(x: bool) -> bool: ...", "f")
callable_bad_arg1 = self._convert_type("Callable[[int], bool]")
callable_bad_arg2 = self._convert_type("Callable[[str], bool]")
self.assertNoMatch(f, callable_bad_arg1)
self.assertNoMatch(f, callable_bad_arg2)
def test_bound_pytd_function_against_callable(self):
instance = self._convert(
"""
class A:
def f(self, x: int) -> bool: ...
""",
"A",
as_instance=True,
)
binding = instance.to_binding(self.ctx.root_node)
_, var = self.ctx.attribute_handler.get_attribute(
self.ctx.root_node, instance, "f", binding
)
bound = var.data[0]
_, var = self.ctx.attribute_handler.get_attribute(
self.ctx.root_node, instance.cls, "f"
)
unbound = var.data[0]
callable_no_self = self._convert_type("Callable[[int], Any]")
callable_self = self._convert_type("Callable[[Any, int], Any]")
self.assertMatch(bound, callable_no_self)
self.assertNoMatch(unbound, callable_no_self)
self.assertNoMatch(bound, callable_self)
self.assertMatch(unbound, callable_self)
def test_native_function_against_callable(self):
# Matching a native function against a callable always succeeds, regardless
# of argument and return types.
f = abstract.NativeFunction("f", lambda x: x, self.ctx)
callable_type = self._convert_type("Callable[[int], int]")
self.assertMatch(f, callable_type)
def test_callable_instance(self):
left1 = self._convert_type("Callable[[int], bool]", as_instance=True)
left2 = self._convert_type("Callable", as_instance=True)
left3 = self._convert_type("Callable[..., int]", as_instance=True)
right1 = self._convert_type("Callable[[bool], int]")
right2 = self._convert_type("Callable[..., int]")
right3 = self._convert_type("Callable")
self.assertMatch(left1, right1)
self.assertMatch(left2, right1)
self.assertMatch(left3, right1)
self.assertMatch(left1, right2)
self.assertMatch(left2, right2)
self.assertMatch(left3, right2)
self.assertMatch(left1, right3)
self.assertMatch(left2, right3)
self.assertMatch(left3, right3)
def test_callable_instance_bad_return(self):
left1 = self._convert_type("Callable[[int], float]", as_instance=True)
left2 = self._convert_type("Callable[..., float]", as_instance=True)
right1 = self._convert_type("Callable[[bool], int]")
right2 = self._convert_type("Callable[..., int]")
self.assertNoMatch(left1, right1)
self.assertNoMatch(left2, right1)
self.assertNoMatch(left1, right2)
self.assertNoMatch(left2, right2)
def test_callable_instance_bad_arg_count(self):
left1 = self._convert_type("Callable[[], int]", as_instance=True)
left2 = self._convert_type("Callable[[str, str], int]", as_instance=True)
right = self._convert_type("Callable[[str], int]")
self.assertNoMatch(left1, right)
self.assertNoMatch(left2, right)
def test_callable_instance_bad_arg_type(self):
left1 = self._convert_type("Callable[[bool], Any]", as_instance=True)
left2 = self._convert_type("Callable[[str], Any]", as_instance=True)
right = self._convert_type("Callable[[int], Any]")
self.assertNoMatch(left1, right)
self.assertNoMatch(left2, right)
def test_type_against_callable(self):
left1 = self._convert_type("Type[int]", as_instance=True)
left2 = self._convert_type("Type[str]", as_instance=True)
right1 = self._convert_type("Callable[..., float]")
right2 = self._convert_type("Callable[[], float]")
self.assertMatch(left1, right1)
self.assertMatch(left1, right2)
self.assertNoMatch(left2, right1)
self.assertNoMatch(left2, right2)
def test_anystr_instance_against_anystr(self):
right = self.ctx.convert.lookup_value("typing", "AnyStr")
dummy_instance = abstract.Instance(self.ctx.convert.tuple_type, self.ctx)
left = abstract.TypeParameterInstance(right, dummy_instance, self.ctx)
for result in self._match_var(left, right):
self.assertCountEqual(
[(name, var.data) for name, var in result.items()],
[("typing.AnyStr", [left])],
)
def test_protocol(self):
left1 = self._convert_type("str", as_instance=True)
left2 = self._convert(
"""
class A:
def lower(self) : ...
""",
"A",
as_instance=True,
)
left3 = self._convert_type("int", as_instance=True)
right = self._convert_type("SupportsLower")
self.assertMatch(left1, right)
self.assertMatch(left2, right)
self.assertNoMatch(left3, right)
def test_protocol_iterator(self):
left1 = self._convert_type("Iterator", as_instance=True)
left2 = self._convert(
"""
class A:
def __next__(self): ...
def __iter__(self): ...
""",
"A",
as_instance=True,
)
left3 = self._convert_type("int", as_instance=True)
right = self._convert_type("Iterator")
self.assertMatch(left1, right)
self.assertMatch(left2, right)
self.assertNoMatch(left3, right)
def test_protocol_sequence(self):
left1 = self._convert_type("list", as_instance=True)
left2 = self._convert(
"""
class A:
def __getitem__(self, i) : ...
def __len__(self): ...
""",
"A",
as_instance=True,
)
left3 = self._convert_type("int", as_instance=True)
right = self._convert_type("Sequence")
self.assertMatch(left1, right)
self.assertMatch(left2, right)
self.assertNoMatch(left3, right)
@unittest.skip("Needs to be fixed, tries to match protocol against A")
def test_parameterized_protocol(self):
left1 = self._convert(
"""
from typing import Iterator
class A:
def __iter__(self) -> Iterator[int] : ...
""",
"A",
as_instance=True,
)
left2 = self._convert_type("int", as_instance=True)
right = self._convert_type("Iterable[int]")
self.assertMatch(left1, right)
self.assertNoMatch(left2, right)
def test_never(self):
self.assertMatch(self.ctx.convert.never, self.ctx.convert.never)
def test_empty_against_never(self):
self.assertMatch(self.ctx.convert.empty, self.ctx.convert.never)
def test_never_against_class(self):
right = self._convert_type("int")
self.assertNoMatch(self.ctx.convert.never, right)
def test_empty_against_parameterized_iterable(self):
left = self.ctx.convert.empty
right = abstract.ParameterizedClass(
self.ctx.convert.list_type,
{abstract_utils.T: abstract.TypeParameter(abstract_utils.T, self.ctx)},
self.ctx,
)
for subst in self._match_var(left, right):
self.assertSetEqual(set(subst), {abstract_utils.T})
self.assertListEqual(
subst[abstract_utils.T].data, [self.ctx.convert.empty]
)
def test_list_against_mapping(self):
left = self._convert_type("list", as_instance=True)
right = self.ctx.convert.lookup_value("typing", "Mapping")
self.assertNoMatch(left, right)
def test_list_against_parameterized_mapping(self):
left = self._convert_type("list", as_instance=True)
right = abstract.ParameterizedClass(
self.ctx.convert.lookup_value("typing", "Mapping"),
{
abstract_utils.K: abstract.TypeParameter(
abstract_utils.K, self.ctx
),
abstract_utils.V: abstract.TypeParameter(
abstract_utils.V, self.ctx
),
},
self.ctx,
)
self.assertNoMatch(left, right)
| MatcherTest |
python | doocs__leetcode | solution/1800-1899/1824.Minimum Sideway Jumps/Solution.py | {
"start": 0,
"end": 397
} | class ____:
def minSideJumps(self, obstacles: List[int]) -> int:
f = [1, 0, 1]
for v in obstacles[1:]:
for j in range(3):
if v == j + 1:
f[j] = inf
break
x = min(f) + 1
for j in range(3):
if v != j + 1:
f[j] = min(f[j], x)
return min(f)
| Solution |
python | uqfoundation__dill | dill/tests/test_source.py | {
"start": 790,
"end": 7311
} | class ____:
pass
_bar = Bar()
# inspect.getsourcelines # dill.source.getblocks
def test_getsource():
assert getsource(f) == 'f = lambda x: x**2\n'
assert getsource(g) == 'def g(x): return f(x) - x\n'
assert getsource(h) == 'def h(x):\n def g(x): return x\n return g(x) - x\n'
assert getname(f) == 'f'
assert getname(g) == 'g'
assert getname(h) == 'h'
assert _wrap(f)(4) == 16
assert _wrap(g)(4) == 12
assert _wrap(h)(4) == 0
assert getname(Foo) == 'Foo'
assert getname(Bar) == 'Bar'
assert getsource(Bar) == 'class Bar:\n pass\n'
assert getsource(Foo) == 'class Foo(object):\n def bar(self, x):\n return x*x+x\n'
#XXX: add getsource for _foo, _bar
# test itself
def test_itself():
assert getimport(getimport)=='from dill.source import getimport\n'
# builtin functions and objects
def test_builtin():
assert getimport(pow) == 'pow\n'
assert getimport(100) == '100\n'
assert getimport(True) == 'True\n'
assert getimport(pow, builtin=True) == 'from builtins import pow\n'
assert getimport(100, builtin=True) == '100\n'
assert getimport(True, builtin=True) == 'True\n'
# this is kinda BS... you can't import a None
assert getimport(None) == 'None\n'
assert getimport(None, builtin=True) == 'None\n'
# other imported functions
def test_imported():
from math import sin
assert getimport(sin) == 'from math import sin\n'
# interactively defined functions
def test_dynamic():
assert getimport(add) == 'from %s import add\n' % __name__
# interactive lambdas
assert getimport(squared) == 'from %s import squared\n' % __name__
# classes and class instances
def test_classes():
from io import BytesIO as StringIO
y = "from _io import BytesIO\n"
x = y if (IS_PYPY or sys.hexversion >= PY310b) else "from io import BytesIO\n"
s = StringIO()
assert getimport(StringIO) == x
assert getimport(s) == y
# interactively defined classes and class instances
assert getimport(Foo) == 'from %s import Foo\n' % __name__
assert getimport(_foo) == 'from %s import Foo\n' % __name__
# test importable
def test_importable():
assert importable(add, source=False) == 'from %s import add\n' % __name__
assert importable(squared, source=False) == 'from %s import squared\n' % __name__
assert importable(Foo, source=False) == 'from %s import Foo\n' % __name__
assert importable(Foo.bar, source=False) == 'from %s import bar\n' % __name__
assert importable(_foo.bar, source=False) == 'from %s import bar\n' % __name__
assert importable(None, source=False) == 'None\n'
assert importable(100, source=False) == '100\n'
assert importable(add, source=True) == 'def add(x,y):\n return x+y\n'
assert importable(squared, source=True) == 'squared = lambda x:x**2\n'
assert importable(None, source=True) == 'None\n'
assert importable(Bar, source=True) == 'class Bar:\n pass\n'
assert importable(Foo, source=True) == 'class Foo(object):\n def bar(self, x):\n return x*x+x\n'
assert importable(Foo.bar, source=True) == 'def bar(self, x):\n return x*x+x\n'
assert importable(Foo.bar, source=False) == 'from %s import bar\n' % __name__
assert importable(Foo.bar, alias='memo', source=False) == 'from %s import bar as memo\n' % __name__
assert importable(Foo, alias='memo', source=False) == 'from %s import Foo as memo\n' % __name__
assert importable(squared, alias='memo', source=False) == 'from %s import squared as memo\n' % __name__
assert importable(squared, alias='memo', source=True) == 'memo = squared = lambda x:x**2\n'
assert importable(add, alias='memo', source=True) == 'def add(x,y):\n return x+y\n\nmemo = add\n'
assert importable(None, alias='memo', source=True) == 'memo = None\n'
assert importable(100, alias='memo', source=True) == 'memo = 100\n'
assert importable(add, builtin=True, source=False) == 'from %s import add\n' % __name__
assert importable(squared, builtin=True, source=False) == 'from %s import squared\n' % __name__
assert importable(Foo, builtin=True, source=False) == 'from %s import Foo\n' % __name__
assert importable(Foo.bar, builtin=True, source=False) == 'from %s import bar\n' % __name__
assert importable(_foo.bar, builtin=True, source=False) == 'from %s import bar\n' % __name__
assert importable(None, builtin=True, source=False) == 'None\n'
assert importable(100, builtin=True, source=False) == '100\n'
def test_numpy():
try:
import numpy as np
y = np.array
x = y([1,2,3])
assert importable(x, source=False) == 'from numpy import array\narray([1, 2, 3])\n'
assert importable(y, source=False) == 'from %s import array\n' % y.__module__
assert importable(x, source=True) == 'from numpy import array\narray([1, 2, 3])\n'
assert importable(y, source=True) == 'from %s import array\n' % y.__module__
y = np.int64
x = y(0)
assert importable(x, source=False) == 'from numpy import int64\nint64(0)\n'
assert importable(y, source=False) == 'from %s import int64\n' % y.__module__
assert importable(x, source=True) == 'from numpy import int64\nint64(0)\n'
assert importable(y, source=True) == 'from %s import int64\n' % y.__module__
y = np.bool_
x = y(0)
import warnings
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=FutureWarning)
warnings.filterwarnings('ignore', category=DeprecationWarning)
if hasattr(np, 'bool'): b = 'bool_' if np.bool is bool else 'bool'
else: b = 'bool_'
assert importable(x, source=False) == 'from numpy import %s\n%s(False)\n' % (b,b)
assert importable(y, source=False) == 'from %s import %s\n' % (y.__module__,b)
assert importable(x, source=True) == 'from numpy import %s\n%s(False)\n' % (b,b)
assert importable(y, source=True) == 'from %s import %s\n' % (y.__module__,b)
except ImportError: pass
#NOTE: if before getimport(pow), will cause pow to throw AssertionError
def test_foo():
assert importable(_foo, source=True).startswith("import dill\nclass Foo(object):\n def bar(self, x):\n return x*x+x\ndill.loads(")
def test_safe():
import abc
obj = abc.ABC()
obj.__class__.__name__ = "(abc' + print('foo')]) # "
try:
source = getsource(obj, force=True)
assert False
except TypeError:
assert False
except SyntaxError:
pass
if __name__ == '__main__':
test_getsource()
test_itself()
test_builtin()
test_imported()
test_dynamic()
test_classes()
test_importable()
test_numpy()
test_foo()
test_safe()
| Bar |
python | ansible__ansible | lib/ansible/executor/play_iterator.py | {
"start": 1430,
"end": 5093
} | class ____:
def __init__(self, blocks):
self._blocks = blocks[:]
self.handlers = []
self.handler_notifications = []
self.cur_block = 0
self.cur_regular_task = 0
self.cur_rescue_task = 0
self.cur_always_task = 0
self.cur_handlers_task = 0
self.run_state = IteratingStates.SETUP
self.fail_state = FailedStates.NONE
self.pre_flushing_run_state = None
self.update_handlers = True
self.pending_setup = False
self.tasks_child_state = None
self.rescue_child_state = None
self.always_child_state = None
self.did_rescue = False
self.did_start_at_task = False
def __repr__(self):
return "HostState(%r)" % self._blocks
def __str__(self):
return ("HOST STATE: block=%d, task=%d, rescue=%d, always=%d, handlers=%d, run_state=%s, fail_state=%s, "
"pre_flushing_run_state=%s, update_handlers=%s, pending_setup=%s, "
"tasks child state? (%s), rescue child state? (%s), always child state? (%s), "
"did rescue? %s, did start at task? %s" % (
self.cur_block,
self.cur_regular_task,
self.cur_rescue_task,
self.cur_always_task,
self.cur_handlers_task,
self.run_state,
self.fail_state,
self.pre_flushing_run_state,
self.update_handlers,
self.pending_setup,
self.tasks_child_state,
self.rescue_child_state,
self.always_child_state,
self.did_rescue,
self.did_start_at_task,
))
def __eq__(self, other):
if not isinstance(other, HostState):
return False
for attr in ('_blocks',
'cur_block', 'cur_regular_task', 'cur_rescue_task', 'cur_always_task', 'cur_handlers_task',
'run_state', 'fail_state', 'pre_flushing_run_state', 'update_handlers', 'pending_setup',
'tasks_child_state', 'rescue_child_state', 'always_child_state'):
if getattr(self, attr) != getattr(other, attr):
return False
return True
def get_current_block(self):
return self._blocks[self.cur_block]
def copy(self):
new_state = HostState(self._blocks)
new_state.handlers = self.handlers[:]
new_state.handler_notifications = self.handler_notifications[:]
new_state.cur_block = self.cur_block
new_state.cur_regular_task = self.cur_regular_task
new_state.cur_rescue_task = self.cur_rescue_task
new_state.cur_always_task = self.cur_always_task
new_state.cur_handlers_task = self.cur_handlers_task
new_state.run_state = self.run_state
new_state.fail_state = self.fail_state
new_state.pre_flushing_run_state = self.pre_flushing_run_state
new_state.update_handlers = self.update_handlers
new_state.pending_setup = self.pending_setup
new_state.did_rescue = self.did_rescue
new_state.did_start_at_task = self.did_start_at_task
if self.tasks_child_state is not None:
new_state.tasks_child_state = self.tasks_child_state.copy()
if self.rescue_child_state is not None:
new_state.rescue_child_state = self.rescue_child_state.copy()
if self.always_child_state is not None:
new_state.always_child_state = self.always_child_state.copy()
return new_state
| HostState |
python | sqlalchemy__sqlalchemy | test/orm/test_unitofworkv2.py | {
"start": 45270,
"end": 50747
} | class ____(
fixtures.MappedTest, testing.AssertsExecutionResults, AssertsUOW
):
@classmethod
def define_tables(cls, metadata):
Table(
"nodes",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
Column("favorite_node_id", Integer, ForeignKey("nodes.id")),
)
Table(
"node_to_nodes",
metadata,
Column(
"left_node_id",
Integer,
ForeignKey("nodes.id"),
primary_key=True,
),
Column(
"right_node_id",
Integer,
ForeignKey("nodes.id"),
primary_key=True,
),
)
def test_many_to_many_one(self):
nodes, node_to_nodes = self.tables.nodes, self.tables.node_to_nodes
class Node(ComparableEntity):
pass
self.mapper_registry.map_imperatively(
Node,
nodes,
properties={
"children": relationship(
Node,
secondary=node_to_nodes,
primaryjoin=nodes.c.id == node_to_nodes.c.left_node_id,
secondaryjoin=nodes.c.id == node_to_nodes.c.right_node_id,
backref="parents",
),
"favorite": relationship(Node, remote_side=nodes.c.id),
},
)
with fixture_session(autoflush=False) as sess:
n1 = Node(data="n1")
n2 = Node(data="n2")
n3 = Node(data="n3")
n4 = Node(data="n4")
n5 = Node(data="n5")
n4.favorite = n3
n1.favorite = n5
n5.favorite = n2
n1.children = [n2, n3, n4]
n2.children = [n3, n5]
n3.children = [n5, n4]
sess.add_all([n1, n2, n3, n4, n5])
# can't really assert the SQL on this easily
# since there's too many ways to insert the rows.
# so check the end result
sess.flush()
eq_(
sess.query(
node_to_nodes.c.left_node_id, node_to_nodes.c.right_node_id
)
.order_by(
node_to_nodes.c.left_node_id, node_to_nodes.c.right_node_id
)
.all(),
sorted(
[
(n1.id, n2.id),
(n1.id, n3.id),
(n1.id, n4.id),
(n2.id, n3.id),
(n2.id, n5.id),
(n3.id, n5.id),
(n3.id, n4.id),
]
),
)
sess.delete(n1)
self.assert_sql_execution(
testing.db,
sess.flush,
# this is n1.parents firing off, as it should, since
# passive_deletes is False for n1.parents
CompiledSQL(
"SELECT nodes.id, nodes.data, "
"nodes.favorite_node_id FROM "
"nodes, node_to_nodes WHERE :param_1 = "
"node_to_nodes.right_node_id AND nodes.id = "
"node_to_nodes.left_node_id",
lambda ctx: {"param_1": n1.id},
),
CompiledSQL(
"DELETE FROM node_to_nodes WHERE "
"node_to_nodes.left_node_id = :left_node_id AND "
"node_to_nodes.right_node_id = :right_node_id",
lambda ctx: [
{"right_node_id": n2.id, "left_node_id": n1.id},
{"right_node_id": n3.id, "left_node_id": n1.id},
{"right_node_id": n4.id, "left_node_id": n1.id},
],
),
CompiledSQL(
"DELETE FROM nodes WHERE nodes.id = :id",
lambda ctx: {"id": n1.id},
),
)
for n in [n2, n3, n4, n5]:
sess.delete(n)
# load these collections
# outside of the flush() below
n4.children
n5.children
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"DELETE FROM node_to_nodes "
"WHERE node_to_nodes.left_node_id "
"= :left_node_id AND node_to_nodes.right_node_id = "
":right_node_id",
lambda ctx: [
{"right_node_id": n5.id, "left_node_id": n3.id},
{"right_node_id": n4.id, "left_node_id": n3.id},
{"right_node_id": n3.id, "left_node_id": n2.id},
{"right_node_id": n5.id, "left_node_id": n2.id},
],
),
CompiledSQL(
"DELETE FROM nodes WHERE nodes.id = :id",
lambda ctx: [{"id": n4.id}, {"id": n5.id}],
),
CompiledSQL(
"DELETE FROM nodes WHERE nodes.id = :id",
lambda ctx: [{"id": n2.id}, {"id": n3.id}],
),
)
| SingleCycleM2MTest |
python | getsentry__sentry | tests/sentry/workflow_engine/migration_helpers/test_migrate_alert_rule.py | {
"start": 40623,
"end": 47124
} | class ____(BaseMetricAlertMigrationTest):
def setUp(self) -> None:
# set up legacy objects
METADATA = {
"api_key": "1234-ABCD",
"base_url": "https://api.opsgenie.com/",
"domain_name": "test-app.app.opsgenie.com",
}
self.rpc_user = user_service.get_user(user_id=self.user.id)
self.og_team = {"id": "123-id", "team": "cool-team", "integration_key": "1234-5678"}
self.integration = self.create_provider_integration(
provider="opsgenie", name="hello-world", external_id="hello-world", metadata=METADATA
)
self.sentry_app = self.create_sentry_app(
name="foo",
organization=self.organization,
is_alertable=True,
verify_install=False,
)
self.create_sentry_app_installation(
slug=self.sentry_app.slug, organization=self.organization, user=self.rpc_user
)
with assume_test_silo_mode_of(Integration, OrganizationIntegration):
self.integration.add_organization(self.organization, self.user)
self.org_integration = OrganizationIntegration.objects.get(
organization_id=self.organization.id, integration_id=self.integration.id
)
self.org_integration.config = {"team_table": [self.og_team]}
self.org_integration.save()
self.metric_alert = self.create_alert_rule()
self.critical_trigger = self.create_alert_rule_trigger(
alert_rule=self.metric_alert, label="critical", alert_threshold=200
)
self.warning_trigger = self.create_alert_rule_trigger(
alert_rule=self.metric_alert, label="warning", alert_threshold=100
)
self.alert_rule_trigger_action_email = self.create_alert_rule_trigger_action(
alert_rule_trigger=self.warning_trigger,
type=AlertRuleTriggerAction.Type.EMAIL,
target_identifier=self.user.id,
target_type=AlertRuleTriggerAction.TargetType.USER,
# This shouldn't be migrated
sentry_app_config={
"priority": "p2",
},
)
self.alert_rule_trigger_action_integration = self.create_alert_rule_trigger_action(
target_identifier=self.og_team["id"],
type=AlertRuleTriggerAction.Type.OPSGENIE,
target_type=AlertRuleTriggerAction.TargetType.SPECIFIC,
integration=self.integration,
alert_rule_trigger=self.critical_trigger,
)
self.alert_rule_trigger_action_sentry_app = self.create_alert_rule_trigger_action(
type=AlertRuleTriggerAction.Type.SENTRY_APP,
target_type=AlertRuleTriggerAction.TargetType.SENTRY_APP,
sentry_app=self.sentry_app,
alert_rule_trigger=self.critical_trigger,
)
# set up ACI objects
self.create_migrated_metric_alert_objects(self.metric_alert)
self.create_migrated_metric_alert_rule_trigger_objects(
self.critical_trigger, DetectorPriorityLevel.HIGH, Condition.GREATER
)
self.create_migrated_metric_alert_rule_trigger_objects(
self.warning_trigger, DetectorPriorityLevel.MEDIUM, Condition.GREATER
)
def test_dual_write_metric_alert_trigger_action(self) -> None:
migrate_metric_action(self.alert_rule_trigger_action_email)
migrate_metric_action(self.alert_rule_trigger_action_integration)
migrate_metric_action(self.alert_rule_trigger_action_sentry_app)
assert_alert_rule_trigger_action_migrated(
self.alert_rule_trigger_action_email, Action.Type.EMAIL
)
assert_alert_rule_trigger_action_migrated(
self.alert_rule_trigger_action_integration, Action.Type.OPSGENIE
)
assert_alert_rule_trigger_action_migrated(
self.alert_rule_trigger_action_sentry_app, Action.Type.SENTRY_APP
)
# add some additional checks for sentry app and opsgenie actions to test with config
aarta_sentry_app_with_config = self.create_alert_rule_trigger_action(
type=AlertRuleTriggerAction.Type.SENTRY_APP,
target_type=AlertRuleTriggerAction.TargetType.SENTRY_APP,
sentry_app=self.sentry_app,
alert_rule_trigger=self.critical_trigger,
sentry_app_config=[
{
"name": "foo",
"value": "bar",
},
{
"name": "bufo",
"value": "bot",
},
],
)
aarta_opsgenie_with_config = self.create_alert_rule_trigger_action(
target_identifier=self.og_team["id"],
type=AlertRuleTriggerAction.Type.OPSGENIE,
target_type=AlertRuleTriggerAction.TargetType.SPECIFIC,
integration=self.integration,
alert_rule_trigger=self.critical_trigger,
sentry_app_config={
"priority": "P2",
},
)
migrate_metric_action(aarta_sentry_app_with_config)
migrate_metric_action(aarta_opsgenie_with_config)
assert_alert_rule_trigger_action_migrated(
aarta_sentry_app_with_config, Action.Type.SENTRY_APP
)
assert_alert_rule_trigger_action_migrated(aarta_opsgenie_with_config, Action.Type.OPSGENIE)
# broken config, should raise an error
aarta_sentry_app_with_config.sentry_app_config = {
"priority": "p2",
}
with pytest.raises(ValueError):
migrate_metric_action(aarta_sentry_app_with_config)
@mock.patch("sentry.workflow_engine.migration_helpers.alert_rule.logger")
def test_dual_write_metric_alert_trigger_action_no_type(
self, mock_logger: mock.MagicMock
) -> None:
"""
Test that if for some reason we don't find a match for Action.Type for the integration provider we return None and log.
"""
self.alert_rule_trigger_action_integration.type = 8
with pytest.raises(ValidationError):
migrate_metric_action(self.alert_rule_trigger_action_integration)
mock_logger.warning.assert_called_with(
"Could not find a matching Action.Type for the trigger action",
extra={
"alert_rule_trigger_action_id": self.alert_rule_trigger_action_integration.id,
},
)
| DualWriteAlertRuleTriggerActionTest |
python | encode__django-rest-framework | rest_framework/relations.py | {
"start": 1194,
"end": 1394
} | class ____(TypeError):
"""
Raised when `queryset.get()` failed due to an underlying `TypeError`.
Wrapping prevents calling code conflating this with unrelated errors.
"""
| ObjectTypeError |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 219534,
"end": 219784
} | class ____(VegaLiteSchema):
"""ConditionalAxisLabelAlign schema wrapper."""
_schema = {"$ref": "#/definitions/ConditionalAxisLabelAlign"}
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
| ConditionalAxisLabelAlign |
python | huggingface__transformers | src/transformers/pipelines/base.py | {
"start": 28294,
"end": 55630
} | class ____(_ScikitCompat, PushToHubMixin):
"""
The Pipeline class is the class from which all pipelines inherit. Refer to this class for methods shared across
different pipelines.
Base class implementing pipelined operations. Pipeline workflow is defined as a sequence of the following
operations:
Input -> Tokenization -> Model Inference -> Post-Processing (task dependent) -> Output
Pipeline supports running on CPU or GPU through the device argument (see below).
Some pipeline, like for instance [`FeatureExtractionPipeline`] (`'feature-extraction'`) output large tensor object
as nested-lists. In order to avoid dumping such large structure as textual data we provide the `binary_output`
constructor argument. If set to `True`, the output will be stored in the pickle format.
"""
# These flags should be overridden for downstream pipelines. They indicate which preprocessing classes are
# used by each pipeline. The possible values are:
# - True (the class is mandatory, raise an error if it's not present in the repo)
# - None (the class is optional; it should be loaded if present in the repo but the pipeline can work without it)
# - False (the class is never used by the pipeline and should not be loaded even if present)
_load_processor = None
_load_image_processor = None
_load_feature_extractor = None
_load_tokenizer = None
# Pipelines that call `generate` have shared logic, e.g. preparing the generation config.
_pipeline_calls_generate = False
default_input_names = None
def __init__(
self,
model: "PreTrainedModel",
tokenizer: PreTrainedTokenizer | None = None,
feature_extractor: Optional[PreTrainedFeatureExtractor] = None,
image_processor: BaseImageProcessor | None = None,
processor: ProcessorMixin | None = None,
modelcard: ModelCard | None = None,
task: str = "",
device: Union[int, "torch.device"] | None = None,
binary_output: bool = False,
**kwargs,
):
# We need to pop them for _sanitize_parameters call later
_, _, _ = kwargs.pop("args_parser", None), kwargs.pop("torch_dtype", None), kwargs.pop("dtype", None)
self.task = task
self.model = model
self.tokenizer = tokenizer
self.feature_extractor = feature_extractor
self.image_processor = image_processor
self.processor = processor
self.modelcard = modelcard
# `accelerate` device map
hf_device_map = getattr(self.model, "hf_device_map", None)
if hf_device_map is not None and device is not None:
raise ValueError(
"The model has been loaded with `accelerate` and therefore cannot be moved to a specific device. Please "
"discard the `device` argument when creating your pipeline object."
)
if device is None:
if hf_device_map is not None:
# Take the first device used by `accelerate`.
device = next(iter(hf_device_map.values()))
else:
device = 0
if device == -1 and self.model.device is not None:
device = self.model.device
if isinstance(device, torch.device):
if (device.type == "xpu" and not is_torch_xpu_available(check_device=True)) or (
device.type == "hpu" and not is_torch_hpu_available()
):
raise ValueError(f'{device} is not available, you should use device="cpu" instead')
self.device = device
elif isinstance(device, str):
if ("xpu" in device and not is_torch_xpu_available(check_device=True)) or (
"hpu" in device and not is_torch_hpu_available()
):
raise ValueError(f'{device} is not available, you should use device="cpu" instead')
self.device = torch.device(device)
elif device < 0:
self.device = torch.device("cpu")
elif is_torch_mlu_available():
self.device = torch.device(f"mlu:{device}")
elif is_torch_musa_available():
self.device = torch.device(f"musa:{device}")
elif is_torch_cuda_available():
self.device = torch.device(f"cuda:{device}")
elif is_torch_npu_available():
self.device = torch.device(f"npu:{device}")
elif is_torch_hpu_available():
self.device = torch.device(f"hpu:{device}")
elif is_torch_xpu_available(check_device=True):
self.device = torch.device(f"xpu:{device}")
elif is_torch_mps_available():
self.device = torch.device(f"mps:{device}")
else:
self.device = torch.device("cpu")
if torch.distributed.is_available() and torch.distributed.is_initialized():
self.device = self.model.device
logger.debug(f"Device set to use {self.device}")
self.binary_output = binary_output
# We shouldn't call `model.to()` for models loaded with accelerate as well as the case that model is already on device
if (
self.model.device != self.device
and not (isinstance(self.device, int) and self.device < 0)
and hf_device_map is None
):
self.model.to(self.device)
# If it's a generation pipeline and the model can generate:
# 1 - create a local generation config. This is done to avoid side-effects on the model as we apply local
# tweaks to the generation config.
# 2 - load the assistant model if it is passed.
if self._pipeline_calls_generate and self.model.can_generate():
self.assistant_model, self.assistant_tokenizer = load_assistant_model(
self.model, kwargs.pop("assistant_model", None), kwargs.pop("assistant_tokenizer", None)
)
self.prefix = self.model.config.prefix if hasattr(self.model.config, "prefix") else None
# each pipeline with text generation capabilities should define its own default generation in a
# `_default_generation_config` class attribute
default_pipeline_generation_config = getattr(self, "_default_generation_config", GenerationConfig())
if hasattr(self.model, "_prepare_generation_config"):
# Uses `generate`'s logic to enforce the following priority of arguments:
# 1. user-defined config options in `**kwargs`
# 2. model's generation config values
# 3. pipeline's default generation config values
# NOTE: _prepare_generation_config creates a deep copy of the generation config before updating it,
# and returns all kwargs that were not used to update the generation config
prepared_generation_config, kwargs = self.model._prepare_generation_config(
generation_config=default_pipeline_generation_config, use_model_defaults=True, **kwargs
)
self.generation_config = prepared_generation_config
# if the `max_new_tokens` is set to the pipeline default, but `max_length` is set to a non-default
# value: let's honor `max_length`. E.g. we want Whisper's default `max_length=448` take precedence
# over over the pipeline's length default.
if (
default_pipeline_generation_config.max_new_tokens is not None # there's a pipeline default
and self.generation_config.max_new_tokens == default_pipeline_generation_config.max_new_tokens
and self.generation_config.max_length is not None
and self.generation_config.max_length != 20 # global default
):
self.generation_config.max_new_tokens = None
else:
# TODO (joao): no PT model should reach this line. However, some audio models with complex
# inheritance patterns do. Streamline those models such that this line is no longer needed.
# In those models, the default generation config is not (yet) used.
self.generation_config = copy.deepcopy(self.model.generation_config)
# Update the generation config with task specific params if they exist.
# NOTE: 1. `prefix` is pipeline-specific and doesn't exist in the generation config.
# 2. `task_specific_params` is a legacy feature and should be removed in a future version.
task_specific_params = self.model.config.task_specific_params
if task_specific_params is not None and task in task_specific_params:
this_task_params = task_specific_params.get(task)
if "prefix" in this_task_params:
self.prefix = this_task_params.pop("prefix")
self.generation_config.update(**this_task_params)
# If the tokenizer has a pad token but the model doesn't, set it so that `generate` is aware of it.
if (
self.tokenizer is not None
and self.tokenizer.pad_token_id is not None
and self.generation_config.pad_token_id is None
):
self.generation_config.pad_token_id = self.tokenizer.pad_token_id
self.call_count = 0
self._batch_size = kwargs.pop("batch_size", None)
self._num_workers = kwargs.pop("num_workers", None)
self._preprocess_params, self._forward_params, self._postprocess_params = self._sanitize_parameters(**kwargs)
# In processor only mode, we can get the modality processors from the processor
if self.processor is not None and all(
[self.tokenizer is None, self.feature_extractor is None, self.image_processor is None]
):
self.tokenizer = getattr(self.processor, "tokenizer", None)
self.feature_extractor = getattr(self.processor, "feature_extractor", None)
self.image_processor = getattr(self.processor, "image_processor", None)
if self.image_processor is None and self.feature_extractor is not None:
if isinstance(self.feature_extractor, BaseImageProcessor):
# Backward compatible change, if users called
# ImageSegmentationPipeline(.., feature_extractor=MyFeatureExtractor())
# then we should keep working
self.image_processor = self.feature_extractor
def __repr__(self):
pipe_information = {
"model": self.model.__class__.__name__,
"dtype": str(self.dtype).split(".")[-1],
"device": self.device.type,
"input_modalities": self.model.input_modalities,
}
if self.model.can_generate():
pipe_information["output_modalities"] = self.model.output_modalities
return f"{self.__class__.__name__}: {pipe_information}"
def save_pretrained(
self,
save_directory: str | os.PathLike,
safe_serialization: bool = True,
**kwargs: Any,
):
"""
Save the pipeline's model and tokenizer.
Args:
save_directory (`str` or `os.PathLike`):
A path to the directory where to saved. It will be created if it doesn't exist.
safe_serialization (`str`):
Whether to save the model using `safetensors` or PyTorch serialization.
kwargs (`dict[str, Any]`, *optional*):
Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
"""
if os.path.isfile(save_directory):
logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
return
os.makedirs(save_directory, exist_ok=True)
if hasattr(self, "_registered_impl"):
# Add info to the config
pipeline_info = self._registered_impl.copy()
custom_pipelines = {}
for task, info in pipeline_info.items():
if info["impl"] != self.__class__:
continue
info = info.copy()
module_name = info["impl"].__module__
last_module = module_name.split(".")[-1]
# Change classes into their names/full names
info["impl"] = f"{last_module}.{info['impl'].__name__}"
info["pt"] = tuple(c.__name__ for c in info["pt"])
custom_pipelines[task] = info
self.model.config.custom_pipelines = custom_pipelines
# Save the pipeline custom code
custom_object_save(self, save_directory)
kwargs["safe_serialization"] = safe_serialization
self.model.save_pretrained(save_directory, **kwargs)
if self.tokenizer is not None:
self.tokenizer.save_pretrained(save_directory, **kwargs)
if self.feature_extractor is not None:
self.feature_extractor.save_pretrained(save_directory, **kwargs)
if self.image_processor is not None:
self.image_processor.save_pretrained(save_directory, **kwargs)
if self.modelcard is not None:
self.modelcard.save_pretrained(save_directory)
def transform(self, X):
"""
Scikit / Keras interface to transformers' pipelines. This method will forward to __call__().
"""
return self(X)
def predict(self, X):
"""
Scikit / Keras interface to transformers' pipelines. This method will forward to __call__().
"""
return self(X)
@property
def dtype(self) -> Optional["torch.dtype"]:
"""
Dtype of the model (if it's Pytorch model), `None` otherwise.
"""
return getattr(self.model, "dtype", None)
@property
def torch_dtype(self) -> Optional["torch.dtype"]:
"""
Torch dtype of the model (if it's Pytorch model), `None` otherwise.
"""
logger.warning_once("`torch_dtype` attribute is deprecated. Use `dtype` instead!")
return getattr(self.model, "dtype", None)
@contextmanager
def device_placement(self):
"""
Context Manager allowing tensor allocation on the user-specified device.
Returns:
Context manager
Examples:
```python
# Explicitly ask for tensor allocation on CUDA device :0
pipe = pipeline(..., device=0)
with pipe.device_placement():
# Every tensor allocation will be done on the request device
output = pipe(...)
```"""
if self.device.type == "cuda":
with torch.cuda.device(self.device):
yield
elif self.device.type == "mlu":
with torch.mlu.device(self.device):
yield
elif self.device.type == "musa":
with torch.musa.device(self.device):
yield
elif self.device.type == "xpu":
with torch.xpu.device(self.device):
yield
else:
yield
def ensure_tensor_on_device(self, **inputs):
"""
Ensure PyTorch tensors are on the specified device.
Args:
inputs (keyword arguments that should be `torch.Tensor`, the rest is ignored):
The tensors to place on `self.device`.
Recursive on lists **only**.
Return:
`dict[str, torch.Tensor]`: The same as `inputs` but on the proper device.
"""
return self._ensure_tensor_on_device(inputs, self.device)
def _ensure_tensor_on_device(self, inputs, device):
if isinstance(inputs, ModelOutput):
return ModelOutput(
{name: self._ensure_tensor_on_device(tensor, device) for name, tensor in inputs.items()}
)
elif isinstance(inputs, dict):
return {name: self._ensure_tensor_on_device(tensor, device) for name, tensor in inputs.items()}
elif isinstance(inputs, UserDict):
return UserDict({name: self._ensure_tensor_on_device(tensor, device) for name, tensor in inputs.items()})
elif isinstance(inputs, list):
return [self._ensure_tensor_on_device(item, device) for item in inputs]
elif isinstance(inputs, tuple):
return tuple(self._ensure_tensor_on_device(item, device) for item in inputs)
elif isinstance(inputs, torch.Tensor):
return inputs.to(device)
else:
return inputs
def check_model_type(self, supported_models: list[str] | dict):
"""
Check if the model class is in supported by the pipeline.
Args:
supported_models (`list[str]` or `dict`):
The list of models supported by the pipeline, or a dictionary with model class values.
"""
if not isinstance(supported_models, list): # Create from a model mapping
supported_models_names = []
if self.task in SUPPORTED_PEFT_TASKS:
supported_models_names.extend(SUPPORTED_PEFT_TASKS[self.task])
model_name = None
for model_name in supported_models.values():
# Mapping can now contain tuples of models for the same configuration.
if isinstance(model_name, tuple):
supported_models_names.extend(list(model_name))
else:
supported_models_names.append(model_name)
if hasattr(supported_models, "_model_mapping"):
for model in supported_models._model_mapping._extra_content.values():
if isinstance(model_name, tuple):
supported_models_names.extend([m.__name__ for m in model])
else:
supported_models_names.append(model.__name__)
supported_models = supported_models_names
if self.model.__class__.__name__ not in supported_models:
logger.error(
f"The model '{self.model.__class__.__name__}' is not supported for {self.task}. Supported models are"
f" {supported_models}."
)
@abstractmethod
def _sanitize_parameters(self, **pipeline_parameters):
"""
_sanitize_parameters will be called with any excessive named arguments from either `__init__` or `__call__`
methods. It should return 3 dictionaries of the resolved parameters used by the various `preprocess`,
`forward` and `postprocess` methods. Do not fill dictionaries if the caller didn't specify a kwargs. This
lets you keep defaults in function signatures, which is more "natural".
It is not meant to be called directly, it will be automatically called and the final parameters resolved by
`__init__` and `__call__`
"""
raise NotImplementedError("_sanitize_parameters not implemented")
@abstractmethod
def preprocess(self, input_: Any, **preprocess_parameters: dict) -> dict[str, GenericTensor]:
"""
Preprocess will take the `input_` of a specific pipeline and return a dictionary of everything necessary for
`_forward` to run properly. It should contain at least one tensor, but might have arbitrary other items.
"""
raise NotImplementedError("preprocess not implemented")
@abstractmethod
def _forward(self, input_tensors: dict[str, GenericTensor], **forward_parameters: dict) -> ModelOutput:
"""
_forward will receive the prepared dictionary from `preprocess` and run it on the model. This method might
involve the GPU or the CPU and should be agnostic to it. Isolating this function is the reason for `preprocess`
and `postprocess` to exist, so that the hot path, this method generally can run as fast as possible.
It is not meant to be called directly, `forward` is preferred. It is basically the same but contains additional
code surrounding `_forward` making sure tensors and models are on the same device, disabling the training part
of the code (leading to faster inference).
"""
raise NotImplementedError("_forward not implemented")
@abstractmethod
def postprocess(self, model_outputs: ModelOutput, **postprocess_parameters: dict) -> Any:
"""
Postprocess will receive the raw outputs of the `_forward` method, generally tensors, and reformat them into
something more friendly. Generally it will output a list or a dict or results (containing just strings and
numbers).
"""
raise NotImplementedError("postprocess not implemented")
def get_inference_context(self):
return torch.no_grad
def forward(self, model_inputs, **forward_params):
with self.device_placement():
inference_context = self.get_inference_context()
with inference_context():
model_inputs = self._ensure_tensor_on_device(model_inputs, device=self.device)
model_outputs = self._forward(model_inputs, **forward_params)
model_outputs = self._ensure_tensor_on_device(model_outputs, device=torch.device("cpu"))
return model_outputs
def get_iterator(
self, inputs, num_workers: int, batch_size: int, preprocess_params, forward_params, postprocess_params
):
if isinstance(inputs, collections.abc.Sized):
dataset = PipelineDataset(inputs, self.preprocess, preprocess_params)
else:
if num_workers > 1:
logger.warning(
"For iterable dataset using num_workers>1 is likely to result"
" in errors since everything is iterable, setting `num_workers=1`"
" to guarantee correctness."
)
num_workers = 1
dataset = PipelineIterator(inputs, self.preprocess, preprocess_params)
if "TOKENIZERS_PARALLELISM" not in os.environ:
logger.info("Disabling tokenizer parallelism, we're using DataLoader multithreading already")
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# TODO hack by collating feature_extractor and image_processor
feature_extractor = self.feature_extractor if self.feature_extractor is not None else self.image_processor
collate_fn = no_collate_fn if batch_size == 1 else pad_collate_fn(self.tokenizer, feature_extractor)
dataloader = DataLoader(dataset, num_workers=num_workers, batch_size=batch_size, collate_fn=collate_fn)
model_iterator = PipelineIterator(dataloader, self.forward, forward_params, loader_batch_size=batch_size)
final_iterator = PipelineIterator(model_iterator, self.postprocess, postprocess_params)
return final_iterator
def __call__(self, inputs, *args, num_workers=None, batch_size=None, **kwargs):
if args:
logger.warning(f"Ignoring args : {args}")
# Detect if inputs are a chat-style input(s) and cast as `Chat` or list of `Chat`
container_types = (list, tuple, types.GeneratorType)
if is_torch_available():
container_types = (*container_types, KeyDataset)
if isinstance(inputs, container_types):
if isinstance(inputs, types.GeneratorType):
inputs = list(inputs)
if is_valid_message(inputs[0]):
inputs = Chat(inputs)
elif isinstance(inputs[0], (list, tuple)) and all(chat and is_valid_message(chat[0]) for chat in inputs):
inputs = [Chat(chat) for chat in inputs]
if num_workers is None:
if self._num_workers is None:
num_workers = 0
else:
num_workers = self._num_workers
if batch_size is None:
if self._batch_size is None:
batch_size = 1
else:
batch_size = self._batch_size
preprocess_params, forward_params, postprocess_params = self._sanitize_parameters(**kwargs)
# Fuse __init__ params and __call__ params without modifying the __init__ ones.
preprocess_params = {**self._preprocess_params, **preprocess_params}
forward_params = {**self._forward_params, **forward_params}
postprocess_params = {**self._postprocess_params, **postprocess_params}
self.call_count += 1
if self.call_count > 10 and self.device.type == "cuda":
logger.warning_once(
"You seem to be using the pipelines sequentially on GPU. In order to maximize efficiency please use a"
" dataset",
)
is_dataset = Dataset is not None and isinstance(inputs, Dataset)
is_generator = isinstance(inputs, types.GeneratorType)
is_list = isinstance(inputs, list)
is_iterable = is_dataset or is_generator or is_list
can_use_iterator = is_dataset or is_generator or is_list
if is_list:
if can_use_iterator:
final_iterator = self.get_iterator(
inputs, num_workers, batch_size, preprocess_params, forward_params, postprocess_params
)
outputs = list(final_iterator)
return outputs
else:
return self.run_multi(inputs, preprocess_params, forward_params, postprocess_params)
elif can_use_iterator:
return self.get_iterator(
inputs, num_workers, batch_size, preprocess_params, forward_params, postprocess_params
)
elif is_iterable:
return self.iterate(inputs, preprocess_params, forward_params, postprocess_params)
elif isinstance(self, ChunkPipeline):
return next(
iter(
self.get_iterator(
[inputs], num_workers, batch_size, preprocess_params, forward_params, postprocess_params
)
)
)
else:
return self.run_single(inputs, preprocess_params, forward_params, postprocess_params)
def run_multi(self, inputs, preprocess_params, forward_params, postprocess_params):
return [self.run_single(item, preprocess_params, forward_params, postprocess_params) for item in inputs]
def run_single(self, inputs, preprocess_params, forward_params, postprocess_params):
model_inputs = self.preprocess(inputs, **preprocess_params)
model_outputs = self.forward(model_inputs, **forward_params)
outputs = self.postprocess(model_outputs, **postprocess_params)
return outputs
def iterate(self, inputs, preprocess_params, forward_params, postprocess_params):
# This function should become `get_iterator` again, this is a temporary
# easy solution.
for input_ in inputs:
yield self.run_single(input_, preprocess_params, forward_params, postprocess_params)
Pipeline.push_to_hub = copy_func(Pipeline.push_to_hub)
if Pipeline.push_to_hub.__doc__ is not None:
Pipeline.push_to_hub.__doc__ = Pipeline.push_to_hub.__doc__.format(
object="pipe", object_class="pipeline", object_files="pipeline file"
).replace(".from_pretrained", "")
| Pipeline |
python | PrefectHQ__prefect | src/prefect/server/schemas/filters.py | {
"start": 69947,
"end": 70473
} | class ____(PrefectFilterBaseModel):
"""Filter by `Worker.worker_config_id`."""
any_: Optional[list[UUID]] = Field(
default=None, description="A list of work pool ids to include"
)
def _get_filter_list(
self, db: "PrefectDBInterface"
) -> Iterable[sa.ColumnExpressionArgument[bool]]:
filters: list[sa.ColumnExpressionArgument[bool]] = []
if self.any_ is not None:
filters.append(db.Worker.work_pool_id.in_(self.any_))
return filters
| WorkerFilterWorkPoolId |
python | scipy__scipy | scipy/stats/tests/test_distributions.py | {
"start": 366743,
"end": 377645
} | class ____:
# Construct a distribution w/o explicit shapes parameter and test it.
def test_only__pdf(self):
dummy_distr = _distr_gen(name='dummy')
assert_equal(dummy_distr.pdf(1, a=1), 42)
def test_only__cdf(self):
# _pdf is determined from _cdf by taking numerical derivative
dummy_distr = _distr2_gen(name='dummy')
assert_almost_equal(dummy_distr.pdf(1, a=1), 1)
@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstring stripped")
def test_signature_inspection(self):
# check that _pdf signature inspection works correctly, and is used in
# the class docstring
dummy_distr = _distr_gen(name='dummy')
assert_equal(dummy_distr.numargs, 1)
assert_equal(dummy_distr.shapes, 'a')
res = re.findall(r'logpdf\(x, a, loc=0, scale=1\)',
dummy_distr.__doc__)
assert_(len(res) == 1)
@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstring stripped")
def test_signature_inspection_2args(self):
# same for 2 shape params and both _pdf and _cdf defined
dummy_distr = _distr6_gen(name='dummy')
assert_equal(dummy_distr.numargs, 2)
assert_equal(dummy_distr.shapes, 'a, b')
res = re.findall(r'logpdf\(x, a, b, loc=0, scale=1\)',
dummy_distr.__doc__)
assert_(len(res) == 1)
def test_signature_inspection_2args_incorrect_shapes(self):
# both _pdf and _cdf defined, but shapes are inconsistent: raises
assert_raises(TypeError, _distr3_gen, name='dummy')
def test_defaults_raise(self):
# default arguments should raise
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a=42):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
def test_starargs_raise(self):
# without explicit shapes, *args are not allowed
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a, *args):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
def test_kwargs_raise(self):
# without explicit shapes, **kwargs are not allowed
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a, **kwargs):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstring stripped")
def test_docstrings():
badones = [r',\s*,', r'\(\s*,', r'^\s*:']
for distname in stats.__all__:
dist = getattr(stats, distname)
if isinstance(dist, (stats.rv_discrete | stats.rv_continuous)):
for regex in badones:
assert_(re.search(regex, dist.__doc__) is None)
def test_infinite_input():
assert_almost_equal(stats.skellam.sf(np.inf, 10, 11), 0)
assert_almost_equal(stats.ncx2._cdf(np.inf, 8, 0.1), 1)
def test_lomax_accuracy():
# regression test for gh-4033
p = stats.lomax.ppf(stats.lomax.cdf(1e-100, 1), 1)
assert_allclose(p, 1e-100)
def test_truncexpon_accuracy():
# regression test for gh-4035
p = stats.truncexpon.ppf(stats.truncexpon.cdf(1e-100, 1), 1)
assert_allclose(p, 1e-100)
def test_rayleigh_accuracy():
# regression test for gh-4034
p = stats.rayleigh.isf(stats.rayleigh.sf(9, 1), 1)
assert_almost_equal(p, 9.0, decimal=15)
def test_genextreme_give_no_warnings():
"""regression test for gh-6219"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
stats.genextreme.cdf(.5, 0)
stats.genextreme.pdf(.5, 0)
stats.genextreme.ppf(.5, 0)
stats.genextreme.logpdf(-np.inf, 0.0)
number_of_warnings_thrown = len(w)
assert_equal(number_of_warnings_thrown, 0)
def test_moments_gh22400():
# Regression test for gh-22400
# Check for correct results at c=0 with no warnings. While we're at it,
# check that NaN and sufficiently negative input produce NaNs, and output
# with `c=1` also agrees with reference values.
res = np.asarray(stats.genextreme.stats([0.0, np.nan, 1, -1.5], moments='mvsk'))
# Reference values for c=0 (Wikipedia)
mean = np.euler_gamma
var = np.pi**2 / 6
skew = 12 * np.sqrt(6) * special.zeta(3) / np.pi**3
kurt = 12 / 5
ref_0 = [mean, var, skew, kurt]
ref_1 = ref_3 = [np.nan]*4
ref_2 = [0, 1, -2, 6] # Wolfram Alpha, MaxStableDistribution[0, 1, -1]
assert_allclose(res[:, 0], ref_0, rtol=1e-14)
assert_equal(res[:, 1], ref_1)
assert_allclose(res[:, 2], ref_2, rtol=1e-14)
assert_equal(res[:, 3], ref_3)
def test_genextreme_entropy():
# regression test for gh-5181
euler_gamma = 0.5772156649015329
h = stats.genextreme.entropy(-1.0)
assert_allclose(h, 2*euler_gamma + 1, rtol=1e-14)
h = stats.genextreme.entropy(0)
assert_allclose(h, euler_gamma + 1, rtol=1e-14)
h = stats.genextreme.entropy(1.0)
assert_equal(h, 1)
h = stats.genextreme.entropy(-2.0, scale=10)
assert_allclose(h, euler_gamma*3 + np.log(10) + 1, rtol=1e-14)
h = stats.genextreme.entropy(10)
assert_allclose(h, -9*euler_gamma + 1, rtol=1e-14)
h = stats.genextreme.entropy(-10)
assert_allclose(h, 11*euler_gamma + 1, rtol=1e-14)
def test_genextreme_sf_isf():
# Expected values were computed using mpmath:
#
# import mpmath
#
# def mp_genextreme_sf(x, xi, mu=0, sigma=1):
# # Formula from wikipedia, which has a sign convention for xi that
# # is the opposite of scipy's shape parameter.
# if xi != 0:
# t = mpmath.power(1 + ((x - mu)/sigma)*xi, -1/xi)
# else:
# t = mpmath.exp(-(x - mu)/sigma)
# return 1 - mpmath.exp(-t)
#
# >>> mpmath.mp.dps = 1000
# >>> s = mp_genextreme_sf(mpmath.mp.mpf("1e8"), mpmath.mp.mpf("0.125"))
# >>> float(s)
# 1.6777205262585625e-57
# >>> s = mp_genextreme_sf(mpmath.mp.mpf("7.98"), mpmath.mp.mpf("-0.125"))
# >>> float(s)
# 1.52587890625e-21
# >>> s = mp_genextreme_sf(mpmath.mp.mpf("7.98"), mpmath.mp.mpf("0"))
# >>> float(s)
# 0.00034218086528426593
x = 1e8
s = stats.genextreme.sf(x, -0.125)
assert_allclose(s, 1.6777205262585625e-57)
x2 = stats.genextreme.isf(s, -0.125)
assert_allclose(x2, x)
x = 7.98
s = stats.genextreme.sf(x, 0.125)
assert_allclose(s, 1.52587890625e-21)
x2 = stats.genextreme.isf(s, 0.125)
assert_allclose(x2, x)
x = 7.98
s = stats.genextreme.sf(x, 0)
assert_allclose(s, 0.00034218086528426593)
x2 = stats.genextreme.isf(s, 0)
assert_allclose(x2, x)
def test_burr12_ppf_small_arg():
prob = 1e-16
quantile = stats.burr12.ppf(prob, 2, 3)
# The expected quantile was computed using mpmath:
# >>> import mpmath
# >>> mpmath.mp.dps = 100
# >>> prob = mpmath.mpf('1e-16')
# >>> c = mpmath.mpf(2)
# >>> d = mpmath.mpf(3)
# >>> float(((1-prob)**(-1/d) - 1)**(1/c))
# 5.7735026918962575e-09
assert_allclose(quantile, 5.7735026918962575e-09)
def test_invweibull_fit():
"""
Test fitting invweibull to data.
Here is a the same calculation in R:
> library(evd)
> library(fitdistrplus)
> x = c(1, 1.25, 2, 2.5, 2.8, 3, 3.8, 4, 5, 8, 10, 12, 64, 99)
> result = fitdist(x, 'frechet', control=list(reltol=1e-13),
+ fix.arg=list(loc=0), start=list(shape=2, scale=3))
> result
Fitting of the distribution ' frechet ' by maximum likelihood
Parameters:
estimate Std. Error
shape 1.048482 0.2261815
scale 3.099456 0.8292887
Fixed parameters:
value
loc 0
"""
def optimizer(func, x0, args=(), disp=0):
return fmin(func, x0, args=args, disp=disp, xtol=1e-12, ftol=1e-12)
x = np.array([1, 1.25, 2, 2.5, 2.8, 3, 3.8, 4, 5, 8, 10, 12, 64, 99])
c, loc, scale = stats.invweibull.fit(x, floc=0, optimizer=optimizer)
assert_allclose(c, 1.048482, rtol=5e-6)
assert loc == 0
assert_allclose(scale, 3.099456, rtol=5e-6)
# Expected values were computed with mpmath.
@pytest.mark.parametrize('x, c, expected',
[(3, 1.5, 0.175064510070713299327),
(2000, 1.5, 1.11802773877318715787e-5),
(2000, 9.25, 2.92060308832269637092e-31),
(1e15, 1.5, 3.16227766016837933199884e-23)])
def test_invweibull_sf(x, c, expected):
computed = stats.invweibull.sf(x, c)
assert_allclose(computed, expected, rtol=1e-15)
# Expected values were computed with mpmath.
@pytest.mark.parametrize('p, c, expected',
[(0.5, 2.5, 1.15789669836468183976),
(3e-18, 5, 3195.77171838060906447)])
def test_invweibull_isf(p, c, expected):
computed = stats.invweibull.isf(p, c)
assert_allclose(computed, expected, rtol=1e-15)
@pytest.mark.parametrize(
'df1,df2,x',
[(2, 2, [-0.5, 0.2, 1.0, 2.3]),
(4, 11, [-0.5, 0.2, 1.0, 2.3]),
(7, 17, [1, 2, 3, 4, 5])]
)
def test_ncf_edge_case(df1, df2, x):
# Test for edge case described in gh-11660.
# Non-central Fisher distribution when nc = 0
# should be the same as Fisher distribution.
nc = 0
expected_cdf = stats.f.cdf(x, df1, df2)
calculated_cdf = stats.ncf.cdf(x, df1, df2, nc)
assert_allclose(expected_cdf, calculated_cdf, rtol=1e-14)
# when ncf_gen._skip_pdf will be used instead of generic pdf,
# this additional test will be useful.
expected_pdf = stats.f.pdf(x, df1, df2)
calculated_pdf = stats.ncf.pdf(x, df1, df2, nc)
assert_allclose(expected_pdf, calculated_pdf, rtol=1e-6)
def test_ncf_variance():
# Regression test for gh-10658 (incorrect variance formula for ncf).
# The correct value of ncf.var(2, 6, 4), 42.75, can be verified with, for
# example, Wolfram Alpha with the expression
# Variance[NoncentralFRatioDistribution[2, 6, 4]]
# or with the implementation of the noncentral F distribution in the C++
# library Boost.
v = stats.ncf.var(2, 6, 4)
assert_allclose(v, 42.75, rtol=1e-14)
def test_ncf_cdf_spotcheck():
# Regression test for gh-15582 testing against values from R/MATLAB
# Generate check_val from R or MATLAB as follows:
# R: pf(20, df1 = 6, df2 = 33, ncp = 30.4) = 0.998921
# MATLAB: ncfcdf(20, 6, 33, 30.4) = 0.998921
scipy_val = stats.ncf.cdf(20, 6, 33, 30.4)
check_val = 0.998921
assert_allclose(check_val, np.round(scipy_val, decimals=6))
def test_ncf_ppf_issue_17026():
# Regression test for gh-17026
x = np.linspace(0, 1, 600)
x[0] = 1e-16
par = (0.1, 2, 5, 0, 1)
q = stats.ncf.ppf(x, *par)
q0 = [stats.ncf.ppf(xi, *par) for xi in x]
assert_allclose(q, q0)
| TestSubclassingNoShapes |
python | has2k1__plotnine | plotnine/positions/position_dodge.py | {
"start": 326,
"end": 3941
} | class ____(position):
"""
Dodge overlaps and place objects side-by-side
Parameters
----------
width :
Dodging width, when different to the width of the
individual elements. This is useful when you want
to align narrow geoms with wider geoms
preserve :
Should dodging preserve the total width of all elements
at a position, or the width of a single element?
"""
REQUIRED_AES = {"x"}
def __init__(
self,
width: Optional[float] = None,
preserve: Literal["total", "single"] = "total",
):
self.params = {
"width": width,
"preserve": preserve,
}
def setup_data(self, data, params):
# # e.g. geom_segment should be dodgeable
if "x" in data and "xend" in data:
if "xmin" not in data:
data["xmin"] = data.pop("x")
if "xmax" not in data:
data["xmax"] = data["xend"]
if "x" not in data and "xmin" in data and "xmax" in data:
data["x"] = (data["xmin"] + data["xmax"]) / 2
return super().setup_data(data, params)
def setup_params(self, data):
if (
("xmin" not in data)
and ("xmax" not in data)
and (self.params["width"] is None)
):
msg = "Width not defined. Set with `position_dodge(width = ?)`"
raise PlotnineError(msg)
params = copy(self.params)
if params["preserve"] == "total":
params["n"] = None
else:
# Count at the xmin values per panel and find the highest
# overall count
def max_xmin_values(gdf):
try:
n = gdf["xmin"].value_counts().max()
except KeyError:
n = gdf["x"].value_counts().max()
return pd.DataFrame({"n": [n]})
res = groupby_apply(data, "PANEL", max_xmin_values)
params["n"] = res["n"].max()
return params
@classmethod
def compute_panel(cls, data, scales, params):
return cls.collide(data, params=params)
@staticmethod
def strategy(data, params):
"""
Dodge overlapping interval
Assumes that each set has the same horizontal position.
"""
width = params["width"]
with suppress(TypeError):
iter(width)
width = np.asarray(width)
width = width[data.index]
udata_group = data["group"].drop_duplicates()
n = params.get("n", None)
if n is None:
n = len(udata_group)
if n == 1:
return data
if not all(col in data.columns for col in ["xmin", "xmax"]):
data["xmin"] = data["x"]
data["xmax"] = data["x"]
d_width = np.max(data["xmax"] - data["xmin"])
# Have a new group index from 1 to number of groups.
# This might be needed if the group numbers in this set don't
# include all of 1:n
udata_group = udata_group.sort_values()
groupidx = match(data["group"], udata_group)
groupidx = np.asarray(groupidx) + 1
# Find the center for each group, then use that to
# calculate xmin and xmax
data["x"] = data["x"] + width * ((groupidx - 0.5) / n - 0.5)
data["xmin"] = data["x"] - (d_width / n) / 2
data["xmax"] = data["x"] + (d_width / n) / 2
if "x" in data and "xend" in data:
data["x"] = data["xmin"]
data["xend"] = data["xmax"]
return data
| position_dodge |
python | sympy__sympy | sympy/simplify/hyperexpand.py | {
"start": 40919,
"end": 42032
} | class ____(Operator):
""" Decrement an upper b index. """
def __init__(self, an, ap, bm, bq, i, z):
""" Note: i counts from zero! """
an, ap, bm, bq, i = list(map(sympify, [an, ap, bm, bq, i]))
self._an = an
self._ap = ap
self._bm = bm
self._bq = bq
self._i = i
an = list(an)
ap = list(ap)
bm = list(bm)
bq = list(bq)
bi = bm.pop(i) - 1
m = Poly(1, _x) * prod(Poly(b - _x, _x) for b in bm) * prod(Poly(_x - b, _x) for b in bq)
A = Dummy('A')
D = Poly(bi - A, A)
n = Poly(z, A) * prod((D + 1 - a) for a in an) * prod((-D + a - 1) for a in ap)
b0 = n.nth(0)
if b0 == 0:
raise ValueError('Cannot decrement upper b index (cancels)')
n = Poly(Poly(n.all_coeffs()[:-1], A).as_expr().subs(A, bi - _x), _x)
self._poly = Poly((m - n)/b0, _x)
def __str__(self):
return '<Decrement upper b index #%s of %s, %s, %s, %s.>' % (self._i,
self._an, self._ap, self._bm, self._bq)
| MeijerUnShiftA |
python | sqlalchemy__sqlalchemy | test/orm/test_attributes.py | {
"start": 37417,
"end": 40549
} | class ____(fixtures.TestBase):
"""test that infinite recursion due to incorrect backref assignments
is blocked.
"""
def test_scalar_set_type_assertion(self):
A, B, C = self._scalar_fixture()
c1 = C()
b1 = B()
assert_raises_message(
ValueError,
"Bidirectional attribute conflict detected: "
'Passing object <B at .*> to attribute "C.a" '
'triggers a modify event on attribute "C.b" '
'via the backref "B.c".',
setattr,
c1,
"a",
b1,
)
def test_collection_append_type_assertion(self):
A, B, C = self._collection_fixture()
c1 = C()
b1 = B()
assert_raises_message(
ValueError,
"Bidirectional attribute conflict detected: "
'Passing object <B at .*> to attribute "C.a" '
'triggers a modify event on attribute "C.b" '
'via the backref "B.c".',
c1.a.append,
b1,
)
def _scalar_fixture(self):
class A:
pass
class B:
pass
class C:
pass
instrumentation.register_class(A)
instrumentation.register_class(B)
instrumentation.register_class(C)
_register_attribute(C, "a", backref="c", useobject=True)
_register_attribute(C, "b", backref="c", useobject=True)
_register_attribute(A, "c", backref="a", useobject=True, uselist=True)
_register_attribute(B, "c", backref="b", useobject=True, uselist=True)
return A, B, C
def _collection_fixture(self):
class A:
pass
class B:
pass
class C:
pass
instrumentation.register_class(A)
instrumentation.register_class(B)
instrumentation.register_class(C)
_register_attribute(C, "a", backref="c", useobject=True, uselist=True)
_register_attribute(C, "b", backref="c", useobject=True, uselist=True)
_register_attribute(A, "c", backref="a", useobject=True)
_register_attribute(B, "c", backref="b", useobject=True)
return A, B, C
def _broken_collection_fixture(self):
class A:
pass
class B:
pass
instrumentation.register_class(A)
instrumentation.register_class(B)
_register_attribute(A, "b", backref="a1", useobject=True)
_register_attribute(B, "a1", backref="b", useobject=True, uselist=True)
_register_attribute(B, "a2", backref="b", useobject=True, uselist=True)
return A, B
def test_broken_collection_assertion(self):
A, B = self._broken_collection_fixture()
b1 = B()
a1 = A()
assert_raises_message(
ValueError,
"Bidirectional attribute conflict detected: "
'Passing object <A at .*> to attribute "B.a2" '
'triggers a modify event on attribute "B.a1" '
'via the backref "A.b".',
b1.a2.append,
a1,
)
| CyclicBackrefAssertionTest |
python | apache__airflow | providers/tableau/src/airflow/providers/tableau/sensors/tableau.py | {
"start": 1170,
"end": 2791
} | class ____(BaseSensorOperator):
"""
Watches the status of a Tableau Server Job.
.. seealso:: https://tableau.github.io/server-client-python/docs/api-ref#jobs
:param job_id: Id of the job to watch.
:param site_id: The id of the site where the workbook belongs to.
:param tableau_conn_id: The :ref:`Tableau Connection id <howto/connection:tableau>`
containing the credentials to authenticate to the Tableau Server.
"""
template_fields: Sequence[str] = ("job_id",)
def __init__(
self,
*,
job_id: str,
site_id: str | None = None,
tableau_conn_id: str = "tableau_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.tableau_conn_id = tableau_conn_id
self.job_id = job_id
self.site_id = site_id
def poke(self, context: Context) -> bool:
"""
Pokes until the job has successfully finished.
:param context: The task context during execution.
:return: True if it succeeded and False if not.
"""
with TableauHook(self.site_id, self.tableau_conn_id) as tableau_hook:
finish_code = tableau_hook.get_job_status(job_id=self.job_id)
self.log.info("Current finishCode is %s (%s)", finish_code.name, finish_code.value)
if finish_code in (TableauJobFinishCode.ERROR, TableauJobFinishCode.CANCELED):
message = "The Tableau Refresh Workbook Job failed!"
raise TableauJobFailedException(message)
return finish_code == TableauJobFinishCode.SUCCESS
| TableauJobStatusSensor |
python | getsentry__sentry | src/sentry/apidocs/schema.py | {
"start": 94,
"end": 916
} | class ____(AutoSchema):
"""DRF Documentation Schema for sentry endpoints"""
@property
def view_func(self):
return getattr(self.view, self.method.lower())
def get_operation_id(self) -> str:
"""
First line of an endpoint's docstring is the operation IDZ
"""
docstring = get_doc(self.view_func).splitlines()
if len(docstring) > 1:
return docstring[0]
return super().get_operation_id()
def get_description(self) -> str: # type: ignore[override]
"""
Docstring is used as a description for the endpoint. The operation ID is included in this.
"""
docstring = get_doc(self.view_func)
if len(docstring.splitlines()) > 1:
return docstring
return super().get_description()
| SentrySchema |
python | py-pdf__pypdf | pypdf/generic/_data_structures.py | {
"start": 8414,
"end": 24765
} | class ____(dict[Any, Any], PdfObject):
def replicate(
self,
pdf_dest: PdfWriterProtocol,
) -> "DictionaryObject":
d__ = cast(
"DictionaryObject",
self._reference_clone(self.__class__(), pdf_dest, False),
)
for k, v in self.items():
d__[k.replicate(pdf_dest)] = (
v.replicate(pdf_dest) if hasattr(v, "replicate") else v
)
return d__
def clone(
self,
pdf_dest: PdfWriterProtocol,
force_duplicate: bool = False,
ignore_fields: Optional[Sequence[Union[str, int]]] = (),
) -> "DictionaryObject":
"""Clone object into pdf_dest."""
try:
if self.indirect_reference.pdf == pdf_dest and not force_duplicate: # type: ignore
return self
except Exception:
pass
visited: set[tuple[int, int]] = set() # (idnum, generation)
d__ = cast(
"DictionaryObject",
self._reference_clone(self.__class__(), pdf_dest, force_duplicate),
)
if ignore_fields is None:
ignore_fields = []
if len(d__.keys()) == 0:
d__._clone(self, pdf_dest, force_duplicate, ignore_fields, visited)
return d__
def _clone(
self,
src: "DictionaryObject",
pdf_dest: PdfWriterProtocol,
force_duplicate: bool,
ignore_fields: Optional[Sequence[Union[str, int]]],
visited: set[tuple[int, int]], # (idnum, generation)
) -> None:
"""
Update the object from src.
Args:
src: "DictionaryObject":
pdf_dest:
force_duplicate:
ignore_fields:
"""
# First we remove the ignore_fields
# that are for a limited number of levels
assert ignore_fields is not None
ignore_fields = list(ignore_fields)
x = 0
while x < len(ignore_fields):
if isinstance(ignore_fields[x], int):
if cast(int, ignore_fields[x]) <= 0:
del ignore_fields[x]
del ignore_fields[x]
continue
ignore_fields[x] -= 1 # type:ignore
x += 1
# Check if this is a chain list, we need to loop to prevent recur
if any(
field not in ignore_fields
and field in src
and isinstance(src.raw_get(field), IndirectObject)
and isinstance(src[field], DictionaryObject)
and (
src.get("/Type", None) is None
or cast(DictionaryObject, src[field]).get("/Type", None) is None
or src.get("/Type", None)
== cast(DictionaryObject, src[field]).get("/Type", None)
)
for field in ["/Next", "/Prev", "/N", "/V"]
):
ignore_fields = list(ignore_fields)
for lst in (("/Next", "/Prev"), ("/N", "/V")):
for k in lst:
objs = []
if (
k in src
and k not in self
and isinstance(src.raw_get(k), IndirectObject)
and isinstance(src[k], DictionaryObject)
# If need to go further the idea is to check
# that the types are the same
and (
src.get("/Type", None) is None
or cast(DictionaryObject, src[k]).get("/Type", None) is None
or src.get("/Type", None)
== cast(DictionaryObject, src[k]).get("/Type", None)
)
):
cur_obj: Optional[DictionaryObject] = cast(
"DictionaryObject", src[k]
)
prev_obj: Optional[DictionaryObject] = self
while cur_obj is not None:
clon = cast(
"DictionaryObject",
cur_obj._reference_clone(
cur_obj.__class__(), pdf_dest, force_duplicate
),
)
# Check to see if we've previously processed our item
if clon.indirect_reference is not None:
idnum = clon.indirect_reference.idnum
generation = clon.indirect_reference.generation
if (idnum, generation) in visited:
cur_obj = None
break
visited.add((idnum, generation))
objs.append((cur_obj, clon))
assert prev_obj is not None
prev_obj[NameObject(k)] = clon.indirect_reference
prev_obj = clon
try:
if cur_obj == src:
cur_obj = None
else:
cur_obj = cast("DictionaryObject", cur_obj[k])
except Exception:
cur_obj = None
for s, c in objs:
c._clone(
s, pdf_dest, force_duplicate, ignore_fields, visited
)
for k, v in src.items():
if k not in ignore_fields:
if isinstance(v, StreamObject):
if not hasattr(v, "indirect_reference"):
v.indirect_reference = None
vv = v.clone(pdf_dest, force_duplicate, ignore_fields)
assert vv.indirect_reference is not None
self[k.clone(pdf_dest)] = vv.indirect_reference
elif k not in self:
self[NameObject(k)] = (
v.clone(pdf_dest, force_duplicate, ignore_fields)
if hasattr(v, "clone")
else v
)
def hash_bin(self) -> int:
"""
Used to detect modified object.
Returns:
Hash considering type and value.
"""
return hash(
(self.__class__, tuple(((k, v.hash_bin()) for k, v in self.items())))
)
def raw_get(self, key: Any) -> Any:
return dict.__getitem__(self, key)
def get_inherited(self, key: str, default: Any = None) -> Any:
"""
Returns the value of a key or from the parent if not found.
If not found returns default.
Args:
key: string identifying the field to return
default: default value to return
Returns:
Current key or inherited one, otherwise default value.
"""
if key in self:
return self[key]
try:
if "/Parent" not in self:
return default
raise KeyError("Not present")
except KeyError:
return cast("DictionaryObject", self["/Parent"].get_object()).get_inherited(
key, default
)
def __setitem__(self, key: Any, value: Any) -> Any:
if not isinstance(key, PdfObject):
raise ValueError("Key must be a PdfObject")
if not isinstance(value, PdfObject):
raise ValueError("Value must be a PdfObject")
return dict.__setitem__(self, key, value)
def setdefault(self, key: Any, value: Optional[Any] = None) -> Any:
if not isinstance(key, PdfObject):
raise ValueError("Key must be a PdfObject")
if not isinstance(value, PdfObject):
raise ValueError("Value must be a PdfObject")
return dict.setdefault(self, key, value)
def __getitem__(self, key: Any) -> PdfObject:
return dict.__getitem__(self, key).get_object()
@property
def xmp_metadata(self) -> Optional[XmpInformationProtocol]:
"""
Retrieve XMP (Extensible Metadata Platform) data relevant to this
object, if available.
See Table 347 — Additional entries in a metadata stream dictionary.
Returns:
Returns a :class:`~pypdf.xmp.XmpInformation` instance
that can be used to access XMP metadata from the document. Can also
return None if no metadata was found on the document root.
"""
from ..xmp import XmpInformation # noqa: PLC0415
metadata = self.get("/Metadata", None)
if is_null_or_none(metadata):
return None
assert metadata is not None, "mypy"
metadata = metadata.get_object()
return XmpInformation(metadata)
def write_to_stream(
self, stream: StreamType, encryption_key: Union[None, str, bytes] = None
) -> None:
if encryption_key is not None: # deprecated
deprecation_no_replacement(
"the encryption_key parameter of write_to_stream", "5.0.0"
)
stream.write(b"<<\n")
for key, value in self.items():
if len(key) > 2 and key[1] == "%" and key[-1] == "%":
continue
key.write_to_stream(stream, encryption_key)
stream.write(b" ")
value.write_to_stream(stream)
stream.write(b"\n")
stream.write(b">>")
@staticmethod
def read_from_stream(
stream: StreamType,
pdf: Optional[PdfReaderProtocol],
forced_encoding: Union[None, str, list[str], dict[int, str]] = None,
) -> "DictionaryObject":
def get_next_obj_pos(
p: int, p1: int, rem_gens: list[int], pdf: PdfReaderProtocol
) -> int:
out = p1
for gen in rem_gens:
loc = pdf.xref[gen]
try:
values = [x for x in loc.values() if p < x <= p1]
if values:
out = min(out, *values)
except ValueError:
pass
return out
def read_unsized_from_stream(
stream: StreamType, pdf: PdfReaderProtocol
) -> bytes:
# we are just pointing at beginning of the stream
eon = get_next_obj_pos(stream.tell(), 2**32, list(pdf.xref), pdf) - 1
curr = stream.tell()
rw = stream.read(eon - stream.tell())
p = rw.find(b"endstream")
if p < 0:
raise PdfReadError(
f"Unable to find 'endstream' marker for obj starting at {curr}."
)
stream.seek(curr + p + 9)
return rw[: p - 1]
tmp = stream.read(2)
if tmp != b"<<":
raise PdfReadError(
f"Dictionary read error at byte {hex(stream.tell())}: "
"stream must begin with '<<'"
)
data: dict[Any, Any] = {}
while True:
tok = read_non_whitespace(stream)
if tok == b"\x00":
continue
if tok == b"%":
stream.seek(-1, 1)
skip_over_comment(stream)
continue
if not tok:
raise PdfStreamError(STREAM_TRUNCATED_PREMATURELY)
if tok == b">":
stream.read(1)
break
stream.seek(-1, 1)
try:
try:
key = read_object(stream, pdf)
if isinstance(key, NullObject):
break
if not isinstance(key, NameObject):
raise PdfReadError(
f"Expecting a NameObject for key but found {key!r}"
)
except PdfReadError as exc:
if pdf is not None and pdf.strict:
raise
logger_warning(exc.__repr__(), __name__)
continue
tok = read_non_whitespace(stream)
stream.seek(-1, 1)
value = read_object(stream, pdf, forced_encoding)
except Exception as exc:
if pdf is not None and pdf.strict:
raise PdfReadError(exc.__repr__())
logger_warning(exc.__repr__(), __name__)
retval = DictionaryObject()
retval.update(data)
return retval # return partial data
if not data.get(key):
data[key] = value
else:
# multiple definitions of key not permitted
msg = (
f"Multiple definitions in dictionary at byte "
f"{hex(stream.tell())} for key {key}"
)
if pdf is not None and pdf.strict:
raise PdfReadError(msg)
logger_warning(msg, __name__)
pos = stream.tell()
s = read_non_whitespace(stream)
if s == b"s" and stream.read(5) == b"tream":
eol = stream.read(1)
# Occasional PDF file output has spaces after 'stream' keyword but before EOL.
# patch provided by Danial Sandler
while eol == b" ":
eol = stream.read(1)
if eol not in (b"\n", b"\r"):
raise PdfStreamError("Stream data must be followed by a newline")
if eol == b"\r" and stream.read(1) != b"\n":
stream.seek(-1, 1)
# this is a stream object, not a dictionary
if SA.LENGTH not in data:
if pdf is not None and pdf.strict:
raise PdfStreamError("Stream length not defined")
logger_warning(
f"Stream length not defined @pos={stream.tell()}", __name__
)
data[NameObject(SA.LENGTH)] = NumberObject(-1)
length = data[SA.LENGTH]
if isinstance(length, IndirectObject):
t = stream.tell()
assert pdf is not None, "mypy"
length = pdf.get_object(length)
stream.seek(t, 0)
if length is None: # if the PDF is damaged
length = -1
pstart = stream.tell()
if length >= 0:
data["__streamdata__"] = stream.read(length)
else:
data["__streamdata__"] = read_until_regex(
stream, re.compile(b"endstream")
)
e = read_non_whitespace(stream)
ndstream = stream.read(8)
if (e + ndstream) != b"endstream":
# the odd PDF file has a length that is too long, so
# we need to read backwards to find the "endstream" ending.
# ReportLab (unknown version) generates files with this bug,
# and Python users into PDF files tend to be our audience.
# we need to do this to correct the streamdata and chop off
# an extra character.
pos = stream.tell()
stream.seek(-10, 1)
end = stream.read(9)
if end == b"endstream":
# we found it by looking back one character further.
data["__streamdata__"] = data["__streamdata__"][:-1]
elif pdf is not None and not pdf.strict:
stream.seek(pstart, 0)
data["__streamdata__"] = read_unsized_from_stream(stream, pdf)
pos = stream.tell()
else:
stream.seek(pos, 0)
raise PdfReadError(
"Unable to find 'endstream' marker after stream at byte "
f"{hex(stream.tell())} (nd='{ndstream!r}', end='{end!r}')."
)
else:
stream.seek(pos, 0)
if "__streamdata__" in data:
return StreamObject.initialize_from_dictionary(data)
retval = DictionaryObject()
retval.update(data)
return retval
| DictionaryObject |
python | scipy__scipy | scipy/stats/_continuous_distns.py | {
"start": 199068,
"end": 202519
} | class ____(rv_continuous):
r"""A logistic (or Sech-squared) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `logistic` is:
.. math::
f(x) = \frac{\exp(-x)}
{(1+\exp(-x))^2}
`logistic` is a special case of `genlogistic` with ``c=1``.
Remark that the survival function (``logistic.sf``) is equal to the
Fermi-Dirac distribution describing fermionic statistics.
%(after_notes)s
%(example)s
"""
def _shape_info(self):
return []
def _rvs(self, size=None, random_state=None):
return random_state.logistic(size=size)
def _pdf(self, x):
# logistic.pdf(x) = exp(-x) / (1+exp(-x))**2
return np.exp(self._logpdf(x))
def _logpdf(self, x):
y = -np.abs(x)
return y - 2. * sc.log1p(np.exp(y))
def _cdf(self, x):
return sc.expit(x)
def _logcdf(self, x):
return sc.log_expit(x)
def _ppf(self, q):
return sc.logit(q)
def _sf(self, x):
return sc.expit(-x)
def _logsf(self, x):
return sc.log_expit(-x)
def _isf(self, q):
return -sc.logit(q)
def _stats(self):
return 0, np.pi*np.pi/3.0, 0, 6.0/5.0
def _entropy(self):
# https://en.wikipedia.org/wiki/Logistic_distribution
return 2.0
@_call_super_mom
@inherit_docstring_from(rv_continuous)
def fit(self, data, *args, **kwds):
if kwds.pop('superfit', False):
return super().fit(data, *args, **kwds)
data, floc, fscale = _check_fit_input_parameters(self, data,
args, kwds)
n = len(data)
# rv_continuous provided guesses
loc, scale = self._fitstart(data)
# these are trumped by user-provided guesses
loc, scale = kwds.get('loc', loc), kwds.get('scale', scale)
# the maximum likelihood estimators `a` and `b` of the location and
# scale parameters are roots of the two equations described in `func`.
# Source: Statistical Distributions, 3rd Edition. Evans, Hastings, and
# Peacock (2000), Page 130
def dl_dloc(loc, scale=fscale):
c = (data - loc) / scale
return np.sum(sc.expit(c)) - n/2
def dl_dscale(scale, loc=floc):
c = (data - loc) / scale
return np.sum(c*np.tanh(c/2)) - n
def func(params):
loc, scale = params
return dl_dloc(loc, scale), dl_dscale(scale, loc)
if fscale is not None and floc is None:
res = optimize.root(dl_dloc, (loc,))
loc = res.x[0]
scale = fscale
elif floc is not None and fscale is None:
res = optimize.root(dl_dscale, (scale,))
scale = res.x[0]
loc = floc
else:
res = optimize.root(func, (loc, scale))
loc, scale = res.x
# Note: gh-18176 reported data for which the reported MLE had
# `scale < 0`. To fix the bug, we return abs(scale). This is OK because
# `dl_dscale` and `dl_dloc` are even and odd functions of `scale`,
# respectively, so if `-scale` is a solution, so is `scale`.
scale = abs(scale)
return ((loc, scale) if res.success
else super().fit(data, *args, **kwds))
logistic = logistic_gen(name='logistic')
| logistic_gen |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_udp_port.py | {
"start": 700,
"end": 1692
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_udp_port"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_udp_port(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
| ColumnValuesToBeValidUdpPort |
python | apache__airflow | airflow-core/src/airflow/models/deadline.py | {
"start": 2382,
"end": 7970
} | class ____(Base):
"""A Deadline is a 'need-by' date which triggers a callback if the provided time has passed."""
__tablename__ = "deadline"
id: Mapped[str] = mapped_column(UUIDType(binary=False), primary_key=True, default=uuid6.uuid7)
# If the Deadline Alert is for a DAG, store the DAG run ID from the dag_run.
dagrun_id: Mapped[int | None] = mapped_column(
Integer, ForeignKey("dag_run.id", ondelete="CASCADE"), nullable=True
)
dagrun = relationship("DagRun", back_populates="deadlines")
# The time after which the Deadline has passed and the callback should be triggered.
deadline_time: Mapped[datetime] = mapped_column(UtcDateTime, nullable=False)
# Whether the deadline has been marked as missed by the scheduler
missed: Mapped[bool] = mapped_column(Boolean, nullable=False)
# Callback that will run when this deadline is missed
callback_id: Mapped[str] = mapped_column(
UUIDType(binary=False), ForeignKey("callback.id", ondelete="CASCADE"), nullable=False
)
callback = relationship("Callback", uselist=False, cascade="all, delete-orphan", single_parent=True)
__table_args__ = (Index("deadline_missed_deadline_time_idx", missed, deadline_time, unique=False),)
def __init__(
self,
deadline_time: datetime,
callback: CallbackDefinitionProtocol,
dagrun_id: int,
dag_id: str | None = None,
):
super().__init__()
self.deadline_time = deadline_time
self.dagrun_id = dagrun_id
self.missed = False
self.callback = Callback.create_from_sdk_def(
callback_def=callback, prefix=CALLBACK_METRICS_PREFIX, dag_id=dag_id
)
def __repr__(self):
def _determine_resource() -> tuple[str, str]:
"""Determine the type of resource based on which values are present."""
if self.dagrun_id:
# The deadline is for a Dag run:
return "DagRun", f"Dag: {self.dagrun.dag_id} Run: {self.dagrun_id}"
return "Unknown", ""
resource_type, resource_details = _determine_resource()
return (
f"[{resource_type} Deadline] {resource_details} needed by "
f"{self.deadline_time} or run: {self.callback}"
)
@classmethod
def prune_deadlines(cls, *, session: Session, conditions: dict[Mapped, Any]) -> int:
"""
Remove deadlines from the table which match the provided conditions and return the number removed.
NOTE: This should only be used to remove deadlines which are associated with
successful events (DagRuns, etc). If the deadline was missed, it will be
handled by the scheduler.
:param conditions: Dictionary of conditions to evaluate against.
:param session: Session to use.
"""
from airflow.models import DagRun # Avoids circular import
# Assemble the filter conditions.
filter_conditions = [column == value for column, value in conditions.items()]
if not filter_conditions:
return 0
try:
# Get deadlines which match the provided conditions and their associated DagRuns.
deadline_dagrun_pairs = session.execute(
select(Deadline, DagRun).join(DagRun).where(and_(*filter_conditions))
).all()
except AttributeError as e:
logger.exception("Error resolving deadlines: %s", e)
raise
if not deadline_dagrun_pairs:
return 0
deleted_count = 0
dagruns_to_refresh = set()
for deadline, dagrun in deadline_dagrun_pairs:
if dagrun.end_date <= deadline.deadline_time:
# If the DagRun finished before the Deadline:
session.delete(deadline)
Stats.incr(
"deadline_alerts.deadline_not_missed",
tags={"dag_id": dagrun.dag_id, "dagrun_id": dagrun.run_id},
)
deleted_count += 1
dagruns_to_refresh.add(dagrun)
session.flush()
logger.debug("%d deadline records were deleted matching the conditions %s", deleted_count, conditions)
# Refresh any affected DAG runs.
for dagrun in dagruns_to_refresh:
session.refresh(dagrun)
return deleted_count
def handle_miss(self, session: Session):
"""Handle a missed deadline by queueing the callback."""
def get_simple_context():
from airflow.api_fastapi.core_api.datamodels.dag_run import DAGRunResponse
from airflow.models import DagRun
# TODO: Use the TaskAPI from within Triggerer to fetch full context instead of sending this context
# from the scheduler
# Fetch the DagRun from the database again to avoid errors when self.dagrun's relationship fields
# are not in the current session.
dagrun = session.get(DagRun, self.dagrun_id)
return {
"dag_run": DAGRunResponse.model_validate(dagrun).model_dump(mode="json"),
"deadline": {"id": self.id, "deadline_time": self.deadline_time},
}
self.callback.data["kwargs"] = self.callback.data["kwargs"] | {"context": get_simple_context()}
self.missed = True
self.callback.queue()
session.add(self)
Stats.incr(
"deadline_alerts.deadline_missed",
tags={"dag_id": self.dagrun.dag_id, "dagrun_id": self.dagrun.run_id},
)
| Deadline |
python | ray-project__ray | rllib/evaluation/collectors/simple_list_collector.py | {
"start": 1186,
"end": 3613
} | class ____:
"""Collects already postprocessed (single agent) samples for one policy.
Samples come in through already postprocessed SampleBatches, which
contain single episode/trajectory data for a single agent and are then
appended to this policy's buffers.
"""
def __init__(self, policy: Policy):
"""Initializes a _PolicyCollector instance.
Args:
policy: The policy object.
"""
self.batches = []
self.policy = policy
# The total timestep count for all agents that use this policy.
# NOTE: This is not an env-step count (across n agents). AgentA and
# agentB, both using this policy, acting in the same episode and both
# doing n steps would increase the count by 2*n.
self.agent_steps = 0
def add_postprocessed_batch_for_training(
self, batch: SampleBatch, view_requirements: ViewRequirementsDict
) -> None:
"""Adds a postprocessed SampleBatch (single agent) to our buffers.
Args:
batch: An individual agent's (one trajectory)
SampleBatch to be added to the Policy's buffers.
view_requirements: The view
requirements for the policy. This is so we know, whether a
view-column needs to be copied at all (not needed for
training).
"""
# Add the agent's trajectory length to our count.
self.agent_steps += batch.count
# And remove columns not needed for training.
for view_col, view_req in view_requirements.items():
if view_col in batch and not view_req.used_for_training:
del batch[view_col]
self.batches.append(batch)
def build(self):
"""Builds a SampleBatch for this policy from the collected data.
Also resets all buffers for further sample collection for this policy.
Returns:
SampleBatch: The SampleBatch with all thus-far collected data for
this policy.
"""
# Create batch from our buffers.
batch = concat_samples(self.batches)
# Clear batches for future samples.
self.batches = []
# Reset agent steps to 0.
self.agent_steps = 0
# Add num_grad_updates counter to the policy's batch.
batch.num_grad_updates = self.policy.num_grad_updates
return batch
| _PolicyCollector |
python | PrefectHQ__prefect | src/prefect/server/events/ordering/memory.py | {
"start": 851,
"end": 1128
} | class ____(Exception):
"""Indicates that an event is currently being processed and should not be processed
until it is finished. This may happen due to concurrent processing."""
def __init__(self, event: ReceivedEvent):
self.event = event
| EventBeingProcessed |
python | keras-team__keras | keras/src/utils/timeseries_dataset_utils_test.py | {
"start": 105,
"end": 7771
} | class ____(testing.TestCase):
def test_basics(self):
# Test ordering, targets, sequence length, batch size
data = np.arange(100)
targets = data * 2
dataset = timeseries_dataset_utils.timeseries_dataset_from_array(
data, targets, sequence_length=9, batch_size=5
)
# Expect 19 batches
for i, batch in enumerate(dataset):
self.assertLen(batch, 2)
inputs, targets = batch
if i < 18:
self.assertEqual(inputs.shape, (5, 9))
if i == 18:
# Last batch: size 2
self.assertEqual(inputs.shape, (2, 9))
# Check target values
self.assertAllClose(targets, inputs[:, 0] * 2)
for j in range(min(5, len(inputs))):
# Check each sample in the batch
self.assertAllClose(
inputs[j], np.arange(i * 5 + j, i * 5 + j + 9)
)
def test_timeseries_regression(self):
# Test simple timeseries regression use case
data = np.arange(10)
offset = 3
targets = data[offset:]
dataset = timeseries_dataset_utils.timeseries_dataset_from_array(
data, targets, sequence_length=offset, batch_size=1
)
i = 0
for batch in dataset:
self.assertLen(batch, 2)
inputs, targets = batch
self.assertEqual(inputs.shape, (1, 3))
# Check values
self.assertAllClose(targets[0], data[offset + i])
self.assertAllClose(inputs[0], data[i : i + offset])
i += 1
self.assertEqual(i, 7) # Expect 7 batches
def test_no_targets(self):
data = np.arange(50)
dataset = timeseries_dataset_utils.timeseries_dataset_from_array(
data, None, sequence_length=10, batch_size=5
)
# Expect 9 batches
i = None
for i, batch in enumerate(dataset):
if i < 8:
self.assertEqual(batch.shape, (5, 10))
elif i == 8:
self.assertEqual(batch.shape, (1, 10))
for j in range(min(5, len(batch))):
# Check each sample in the batch
self.assertAllClose(
batch[j], np.arange(i * 5 + j, i * 5 + j + 10)
)
self.assertEqual(i, 8)
def test_shuffle(self):
# Test cross-epoch random order and seed determinism
data = np.arange(10)
targets = data * 2
dataset = timeseries_dataset_utils.timeseries_dataset_from_array(
data,
targets,
sequence_length=5,
batch_size=1,
shuffle=True,
seed=123,
)
first_seq = None
for x, y in dataset.take(1):
self.assertNotAllClose(x, np.arange(0, 5))
self.assertAllClose(x[:, 0] * 2, y)
first_seq = x
# Check that a new iteration with the same dataset yields different
# results
for x, _ in dataset.take(1):
self.assertNotAllClose(x, first_seq)
# Check determinism with same seed
dataset = timeseries_dataset_utils.timeseries_dataset_from_array(
data,
targets,
sequence_length=5,
batch_size=1,
shuffle=True,
seed=123,
)
for x, _ in dataset.take(1):
self.assertAllClose(x, first_seq)
def test_sampling_rate(self):
data = np.arange(100)
targets = data * 2
dataset = timeseries_dataset_utils.timeseries_dataset_from_array(
data, targets, sequence_length=9, batch_size=5, sampling_rate=2
)
for i, batch in enumerate(dataset):
self.assertLen(batch, 2)
inputs, targets = batch
if i < 16:
self.assertEqual(inputs.shape, (5, 9))
if i == 16:
# Last batch: size 4
self.assertEqual(inputs.shape, (4, 9))
# Check target values
self.assertAllClose(inputs[:, 0] * 2, targets)
for j in range(min(5, len(inputs))):
# Check each sample in the batch
start_index = i * 5 + j
end_index = start_index + 9 * 2
self.assertAllClose(
inputs[j], np.arange(start_index, end_index, 2)
)
def test_sequence_stride(self):
data = np.arange(100)
targets = data * 2
dataset = timeseries_dataset_utils.timeseries_dataset_from_array(
data, targets, sequence_length=9, batch_size=5, sequence_stride=3
)
for i, batch in enumerate(dataset):
self.assertLen(batch, 2)
inputs, targets = batch
if i < 6:
self.assertEqual(inputs.shape, (5, 9))
if i == 6:
# Last batch: size 1
self.assertEqual(inputs.shape, (1, 9))
# Check target values
self.assertAllClose(inputs[:, 0] * 2, targets)
for j in range(min(5, len(inputs))):
# Check each sample in the batch
start_index = i * 5 * 3 + j * 3
end_index = start_index + 9
self.assertAllClose(
inputs[j], np.arange(start_index, end_index)
)
def test_start_and_end_index(self):
data = np.arange(100)
dataset = timeseries_dataset_utils.timeseries_dataset_from_array(
data,
None,
sequence_length=9,
batch_size=5,
sequence_stride=3,
sampling_rate=2,
start_index=10,
end_index=90,
)
for batch in dataset:
self.assertLess(np.max(batch[0]), 90)
self.assertGreater(np.min(batch[0]), 9)
def test_errors(self):
# bad start index
with self.assertRaisesRegex(ValueError, "`start_index` must be "):
_ = timeseries_dataset_utils.timeseries_dataset_from_array(
np.arange(10), None, 3, start_index=-1
)
with self.assertRaisesRegex(ValueError, "`start_index` must be "):
_ = timeseries_dataset_utils.timeseries_dataset_from_array(
np.arange(10), None, 3, start_index=11
)
# bad end index
with self.assertRaisesRegex(ValueError, "`end_index` must be "):
_ = timeseries_dataset_utils.timeseries_dataset_from_array(
np.arange(10), None, 3, end_index=-1
)
with self.assertRaisesRegex(ValueError, "`end_index` must be "):
_ = timeseries_dataset_utils.timeseries_dataset_from_array(
np.arange(10), None, 3, end_index=11
)
# bad sampling_rate
with self.assertRaisesRegex(ValueError, "`sampling_rate` must be "):
_ = timeseries_dataset_utils.timeseries_dataset_from_array(
np.arange(10), None, 3, sampling_rate=0
)
# bad sequence stride
with self.assertRaisesRegex(ValueError, "`sequence_stride` must be "):
_ = timeseries_dataset_utils.timeseries_dataset_from_array(
np.arange(10), None, 3, sequence_stride=0
)
def test_not_batched(self):
data = np.arange(100)
dataset = timeseries_dataset_utils.timeseries_dataset_from_array(
data, None, sequence_length=9, batch_size=None, shuffle=True
)
sample = next(iter(dataset))
self.assertEqual(len(sample.shape), 1)
| TimeseriesDatasetTest |
python | apache__airflow | airflow-ctl/tests/airflow_ctl/ctl/test_cli_config.py | {
"start": 4881,
"end": 9799
} | class ____:
@classmethod
def _save_temp_operations_py(cls, temp_file: str, file_content) -> None:
"""
Save a temporary operations.py file with a simple Command Class to test the command factory.
"""
with open(temp_file, "w") as f:
f.write(dedent(file_content))
def teardown_method(self):
"""
Remove the temporary file after the test.
"""
try:
import os
os.remove("test_command.py")
except FileNotFoundError:
pass
def test_command_factory(
self, no_op_method, test_args_create, test_args_list, test_args_get, test_args_delete
):
"""
Test the command factory.
"""
# Create temporary file with pytest and write simple Command Class(check airflow-ctl/src/airflowctl/api/operations.py) to file
# to test the command factory
# Create a temporary file
temp_file = "test_command.py"
self._save_temp_operations_py(
temp_file=temp_file,
file_content="""
class NotAnOperation:
def test_method(self):
'''I am not included in the command factory.'''
pass
class BackfillsOperations(BaseOperations):
def create(self, backfill: BackfillPostBody) -> BackfillResponse | ServerResponseError:
try:
self.response = self.client.post("backfills", json=backfill.model_dump(mode="json"))
return BackfillResponse.model_validate_json(self.response.content)
except ServerResponseError as e:
raise e
def list(self) -> BackfillListResponse:
params = {"dag_id": dag_id} if dag_id else {}
self.response = self.client.get("backfills", params=params)
return BackfillListResponse.model_validate_json(self.response.content)
def get(self, backfill_id: str) -> BackfillResponse | ServerResponseError:
self.response = self.client.get(f"backfills/{backfill_id}")
return BackfillResponse.model_validate_json(self.response.content)
def delete(self, backfill_id: str) -> ServerResponseError | None:
self.response = self.client.delete(f"backfills/{backfill_id}")
return None
""",
)
command_factory = CommandFactory(file_path=temp_file)
generated_group_commands = command_factory.group_commands
for generated_group_command in generated_group_commands:
assert isinstance(generated_group_command, GroupCommand)
assert generated_group_command.name == "backfills"
assert generated_group_command.help == "Perform Backfills operations"
for sub_command in generated_group_command.subcommands:
if sub_command.name == "create":
for arg, test_arg in zip(sub_command.args, test_args_create):
assert arg.flags[0] == test_arg[0]
assert arg.kwargs["help"] == test_arg[1]["help"]
assert arg.kwargs["action"] == test_arg[1]["action"]
assert arg.kwargs["default"] == test_arg[1]["default"]
assert arg.kwargs["type"] == test_arg[1]["type"]
assert arg.kwargs["dest"] == test_arg[1]["dest"]
print(arg.flags)
elif sub_command.name == "list":
for arg, test_arg in zip(sub_command.args, test_args_list):
assert arg.flags[0] == test_arg[0]
assert arg.kwargs["help"] == test_arg[1]["help"]
assert arg.kwargs["default"] == test_arg[1]["default"]
assert arg.kwargs["type"] == test_arg[1]["type"]
elif sub_command.name == "get":
for arg, test_arg in zip(sub_command.args, test_args_get):
assert arg.flags[0] == test_arg[0]
assert arg.kwargs["help"] == test_arg[1]["help"]
assert arg.kwargs["default"] == test_arg[1]["default"]
assert arg.kwargs["type"] == test_arg[1]["type"]
elif sub_command.name == "delete":
for arg, test_arg in zip(sub_command.args, test_args_delete):
assert arg.flags[0] == test_arg[0]
assert arg.kwargs["help"] == test_arg[1]["help"]
assert arg.kwargs["default"] == test_arg[1]["default"]
assert arg.kwargs["type"] == test_arg[1]["type"]
| TestCommandFactory |
python | python-excel__xlwt | xlwt/BIFFRecords.py | {
"start": 79760,
"end": 80187
} | class ____(BiffRecord):
"""
This record is part of the Page Settings Block. It contains the right
page margin of the current worksheet.
Offset Size Contents
0 8 Right page margin in inches
(IEEE 754 floating-point value, 64?bit double precision)
"""
_REC_ID = 0x0027
def __init__(self, margin):
self._rec_data = pack('<d', margin)
| RightMarginRecord |
python | huggingface__transformers | tests/models/nanochat/test_modeling_nanochat.py | {
"start": 1283,
"end": 1408
} | class ____(CausalLMModelTest, unittest.TestCase):
model_tester_class = NanoChatModelTester
@require_torch
| NanoChatModelTest |
python | mlflow__mlflow | mlflow/utils/env_pack.py | {
"start": 651,
"end": 6607
} | class ____:
name: EnvPackType
install_dependencies: bool = True
_ARTIFACT_PATH = "_databricks"
_MODEL_VERSION_TAR = "model_version.tar"
_MODEL_ENVIRONMENT_TAR = "model_environment.tar"
def _validate_env_pack(env_pack):
"""Checks if env_pack is a supported value
Supported values are:
- the string "databricks_model_serving"
- an ``EnvPackConfig`` with ``name == 'databricks_model_serving'`` and a boolean
``install_dependencies`` field.
- None
"""
if env_pack is None:
return None
if isinstance(env_pack, str):
if env_pack == "databricks_model_serving":
return EnvPackConfig(name="databricks_model_serving", install_dependencies=True)
raise MlflowException.invalid_parameter_value(
f"Invalid env_pack value: {env_pack!r}. Expected: 'databricks_model_serving'."
)
if isinstance(env_pack, EnvPackConfig):
if env_pack.name != "databricks_model_serving":
raise MlflowException.invalid_parameter_value(
f"Invalid EnvPackConfig.name: {env_pack.name!r}. "
"Expected 'databricks_model_serving'."
)
if not isinstance(env_pack.install_dependencies, bool):
raise MlflowException.invalid_parameter_value(
"EnvPackConfig.install_dependencies must be a bool."
)
return env_pack
# Anything else is invalid
raise MlflowException.invalid_parameter_value(
"env_pack must be either None, the string 'databricks_model_serving', or an EnvPackConfig "
"with a boolean 'install_dependencies' field."
)
def _tar(root_path: Path, tar_path: Path) -> tarfile.TarFile:
"""
Package all files under root_path into a tar at tar_path, excluding __pycache__, *.pyc, and
wheels_info.json.
"""
def exclude(tarinfo: tarfile.TarInfo):
name = tarinfo.name
base = Path(name).name
if "__pycache__" in name or base.endswith(".pyc") or base == "wheels_info.json":
return None
return tarinfo
# Pull in symlinks
with tarfile.open(tar_path, "w", dereference=True) as tar:
tar.add(root_path, arcname=".", filter=exclude)
return tar
# TODO: Check pip requirements using uv instead.
@contextmanager
def pack_env_for_databricks_model_serving(
model_uri: str,
*,
enforce_pip_requirements: bool = False,
) -> Generator[str, None, None]:
"""
Generate Databricks artifacts for fast deployment.
Args:
model_uri: The URI of the model to package.
enforce_pip_requirements: Whether to enforce pip requirements installation.
Yields:
str: The path to the local artifacts directory containing the model artifacts and
environment.
Example:
>>> with pack_env_for_databricks_model_serving("models:/my-model/1") as artifacts_dir:
... # Use artifacts_dir here
... pass
"""
dbr_version = DatabricksRuntimeVersion.parse()
if not dbr_version.is_client_image:
raise ValueError(
f"Serverless environment is required when packing environment for Databricks Model "
f"Serving. Current version: {dbr_version}"
)
with tempfile.TemporaryDirectory() as temp_dir:
# Download model artifacts. Keep this separate from temp_dir to avoid noise in packaged
# artifacts.
local_artifacts_dir = Path(download_artifacts(artifact_uri=model_uri))
# Check runtime version consistency
# We read the MLmodel file directly instead of using Model.to_dict() because to_dict() adds
# the current runtime version via get_databricks_runtime_version(), which would prevent us
# from detecting runtime version mismatches.
mlmodel_path = local_artifacts_dir / MLMODEL_FILE_NAME
with open(mlmodel_path) as f:
model_dict = yaml.safe_load(f)
if "databricks_runtime" not in model_dict:
raise ValueError(
"Model must have been created in a Databricks runtime environment. "
"Missing 'databricks_runtime' field in MLmodel file."
)
current_runtime = DatabricksRuntimeVersion.parse()
model_runtime = DatabricksRuntimeVersion.parse(model_dict["databricks_runtime"])
if current_runtime.major != model_runtime.major:
raise ValueError(
f"Runtime version mismatch. Model was created with runtime "
f"{model_dict['databricks_runtime']} (major version {model_runtime.major}), "
f"but current runtime is {get_databricks_runtime_version()} "
f"(major version {current_runtime.major})"
)
if enforce_pip_requirements:
eprint("Installing model requirements...")
try:
subprocess.run(
[
sys.executable,
"-m",
"pip",
"install",
"-r",
str(local_artifacts_dir / _REQUIREMENTS_FILE_NAME),
],
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
)
except subprocess.CalledProcessError as e:
eprint("Error installing requirements:")
eprint(e.stdout)
raise
# Package model artifacts and env into temp_dir/_databricks
temp_artifacts_dir = Path(temp_dir) / _ARTIFACT_PATH
temp_artifacts_dir.mkdir(exist_ok=False)
_tar(local_artifacts_dir, temp_artifacts_dir / _MODEL_VERSION_TAR)
_tar(Path(sys.prefix), temp_artifacts_dir / _MODEL_ENVIRONMENT_TAR)
shutil.move(str(temp_artifacts_dir), local_artifacts_dir)
yield str(local_artifacts_dir)
| EnvPackConfig |
python | matplotlib__matplotlib | lib/matplotlib/patches.py | {
"start": 92784,
"end": 106388
} | class ____(_Style):
"""
`ConnectionStyle` is a container class which defines
several connectionstyle classes, which is used to create a path
between two points. These are mainly used with `FancyArrowPatch`.
A connectionstyle object can be either created as::
ConnectionStyle.Arc3(rad=0.2)
or::
ConnectionStyle("Arc3", rad=0.2)
or::
ConnectionStyle("Arc3, rad=0.2")
The following classes are defined
%(ConnectionStyle:table)s
An instance of any connection style class is a callable object,
whose call signature is::
__call__(self, posA, posB,
patchA=None, patchB=None,
shrinkA=2., shrinkB=2.)
and it returns a `.Path` instance. *posA* and *posB* are
tuples of (x, y) coordinates of the two points to be
connected. *patchA* (or *patchB*) is given, the returned path is
clipped so that it start (or end) from the boundary of the
patch. The path is further shrunk by *shrinkA* (or *shrinkB*)
which is given in points.
"""
_style_list = {}
class _Base:
"""
A base class for connectionstyle classes. The subclass needs
to implement a *connect* method whose call signature is::
connect(posA, posB)
where posA and posB are tuples of x, y coordinates to be
connected. The method needs to return a path connecting two
points. This base class defines a __call__ method, and a few
helper methods.
"""
def _in_patch(self, patch):
"""
Return a predicate function testing whether a point *xy* is
contained in *patch*.
"""
return lambda xy: patch.contains(
SimpleNamespace(x=xy[0], y=xy[1]))[0]
def _clip(self, path, in_start, in_stop):
"""
Clip *path* at its start by the region where *in_start* returns
True, and at its stop by the region where *in_stop* returns True.
The original path is assumed to start in the *in_start* region and
to stop in the *in_stop* region.
"""
if in_start:
try:
_, path = split_path_inout(path, in_start)
except ValueError:
pass
if in_stop:
try:
path, _ = split_path_inout(path, in_stop)
except ValueError:
pass
return path
def __call__(self, posA, posB,
shrinkA=2., shrinkB=2., patchA=None, patchB=None):
"""
Call the *connect* method to create a path between *posA* and
*posB*; then clip and shrink the path.
"""
path = self.connect(posA, posB)
path = self._clip(
path,
self._in_patch(patchA) if patchA else None,
self._in_patch(patchB) if patchB else None,
)
path = self._clip(
path,
inside_circle(*path.vertices[0], shrinkA) if shrinkA else None,
inside_circle(*path.vertices[-1], shrinkB) if shrinkB else None
)
return path
@_register_style(_style_list)
class Arc3(_Base):
"""
Creates a simple quadratic Bézier curve between two
points. The curve is created so that the middle control point
(C1) is located at the same distance from the start (C0) and
end points(C2) and the distance of the C1 to the line
connecting C0-C2 is *rad* times the distance of C0-C2.
"""
def __init__(self, rad=0.):
"""
Parameters
----------
rad : float
Curvature of the curve.
"""
self.rad = rad
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
x12, y12 = (x1 + x2) / 2., (y1 + y2) / 2.
dx, dy = x2 - x1, y2 - y1
f = self.rad
cx, cy = x12 + f * dy, y12 - f * dx
vertices = [(x1, y1),
(cx, cy),
(x2, y2)]
codes = [Path.MOVETO,
Path.CURVE3,
Path.CURVE3]
return Path(vertices, codes)
@_register_style(_style_list)
class Angle3(_Base):
"""
Creates a simple quadratic Bézier curve between two points. The middle
control point is placed at the intersecting point of two lines which
cross the start and end point, and have a slope of *angleA* and
*angleB*, respectively.
"""
def __init__(self, angleA=90, angleB=0):
"""
Parameters
----------
angleA : float
Starting angle of the path.
angleB : float
Ending angle of the path.
"""
self.angleA = angleA
self.angleB = angleB
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
cosA = math.cos(math.radians(self.angleA))
sinA = math.sin(math.radians(self.angleA))
cosB = math.cos(math.radians(self.angleB))
sinB = math.sin(math.radians(self.angleB))
cx, cy = get_intersection(x1, y1, cosA, sinA,
x2, y2, cosB, sinB)
vertices = [(x1, y1), (cx, cy), (x2, y2)]
codes = [Path.MOVETO, Path.CURVE3, Path.CURVE3]
return Path(vertices, codes)
@_register_style(_style_list)
class Angle(_Base):
"""
Creates a piecewise continuous quadratic Bézier path between two
points. The path has a one passing-through point placed at the
intersecting point of two lines which cross the start and end point,
and have a slope of *angleA* and *angleB*, respectively.
The connecting edges are rounded with *rad*.
"""
def __init__(self, angleA=90, angleB=0, rad=0.):
"""
Parameters
----------
angleA : float
Starting angle of the path.
angleB : float
Ending angle of the path.
rad : float
Rounding radius of the edge.
"""
self.angleA = angleA
self.angleB = angleB
self.rad = rad
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
cosA = math.cos(math.radians(self.angleA))
sinA = math.sin(math.radians(self.angleA))
cosB = math.cos(math.radians(self.angleB))
sinB = math.sin(math.radians(self.angleB))
cx, cy = get_intersection(x1, y1, cosA, sinA,
x2, y2, cosB, sinB)
vertices = [(x1, y1)]
codes = [Path.MOVETO]
if self.rad == 0.:
vertices.append((cx, cy))
codes.append(Path.LINETO)
else:
dx1, dy1 = x1 - cx, y1 - cy
d1 = np.hypot(dx1, dy1)
f1 = self.rad / d1
dx2, dy2 = x2 - cx, y2 - cy
d2 = np.hypot(dx2, dy2)
f2 = self.rad / d2
vertices.extend([(cx + dx1 * f1, cy + dy1 * f1),
(cx, cy),
(cx + dx2 * f2, cy + dy2 * f2)])
codes.extend([Path.LINETO, Path.CURVE3, Path.CURVE3])
vertices.append((x2, y2))
codes.append(Path.LINETO)
return Path(vertices, codes)
@_register_style(_style_list)
class Arc(_Base):
"""
Creates a piecewise continuous quadratic Bézier path between two
points. The path can have two passing-through points, a
point placed at the distance of *armA* and angle of *angleA* from
point A, another point with respect to point B. The edges are
rounded with *rad*.
"""
def __init__(self, angleA=0, angleB=0, armA=None, armB=None, rad=0.):
"""
Parameters
----------
angleA : float
Starting angle of the path.
angleB : float
Ending angle of the path.
armA : float or None
Length of the starting arm.
armB : float or None
Length of the ending arm.
rad : float
Rounding radius of the edges.
"""
self.angleA = angleA
self.angleB = angleB
self.armA = armA
self.armB = armB
self.rad = rad
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
vertices = [(x1, y1)]
rounded = []
codes = [Path.MOVETO]
if self.armA:
cosA = math.cos(math.radians(self.angleA))
sinA = math.sin(math.radians(self.angleA))
# x_armA, y_armB
d = self.armA - self.rad
rounded.append((x1 + d * cosA, y1 + d * sinA))
d = self.armA
rounded.append((x1 + d * cosA, y1 + d * sinA))
if self.armB:
cosB = math.cos(math.radians(self.angleB))
sinB = math.sin(math.radians(self.angleB))
x_armB, y_armB = x2 + self.armB * cosB, y2 + self.armB * sinB
if rounded:
xp, yp = rounded[-1]
dx, dy = x_armB - xp, y_armB - yp
dd = (dx * dx + dy * dy) ** .5
rounded.append((xp + self.rad * dx / dd,
yp + self.rad * dy / dd))
vertices.extend(rounded)
codes.extend([Path.LINETO,
Path.CURVE3,
Path.CURVE3])
else:
xp, yp = vertices[-1]
dx, dy = x_armB - xp, y_armB - yp
dd = (dx * dx + dy * dy) ** .5
d = dd - self.rad
rounded = [(xp + d * dx / dd, yp + d * dy / dd),
(x_armB, y_armB)]
if rounded:
xp, yp = rounded[-1]
dx, dy = x2 - xp, y2 - yp
dd = (dx * dx + dy * dy) ** .5
rounded.append((xp + self.rad * dx / dd,
yp + self.rad * dy / dd))
vertices.extend(rounded)
codes.extend([Path.LINETO,
Path.CURVE3,
Path.CURVE3])
vertices.append((x2, y2))
codes.append(Path.LINETO)
return Path(vertices, codes)
@_register_style(_style_list)
class Bar(_Base):
"""
A line with *angle* between A and B with *armA* and *armB*. One of the
arms is extended so that they are connected in a right angle. The
length of *armA* is determined by (*armA* + *fraction* x AB distance).
Same for *armB*.
"""
def __init__(self, armA=0., armB=0., fraction=0.3, angle=None):
"""
Parameters
----------
armA : float
Minimum length of armA.
armB : float
Minimum length of armB.
fraction : float
A fraction of the distance between two points that will be
added to armA and armB.
angle : float or None
Angle of the connecting line (if None, parallel to A and B).
"""
self.armA = armA
self.armB = armB
self.fraction = fraction
self.angle = angle
def connect(self, posA, posB):
x1, y1 = posA
x20, y20 = x2, y2 = posB
theta1 = math.atan2(y2 - y1, x2 - x1)
dx, dy = x2 - x1, y2 - y1
dd = (dx * dx + dy * dy) ** .5
ddx, ddy = dx / dd, dy / dd
armA, armB = self.armA, self.armB
if self.angle is not None:
theta0 = np.deg2rad(self.angle)
dtheta = theta1 - theta0
dl = dd * math.sin(dtheta)
dL = dd * math.cos(dtheta)
x2, y2 = x1 + dL * math.cos(theta0), y1 + dL * math.sin(theta0)
armB = armB - dl
# update
dx, dy = x2 - x1, y2 - y1
dd2 = (dx * dx + dy * dy) ** .5
ddx, ddy = dx / dd2, dy / dd2
arm = max(armA, armB)
f = self.fraction * dd + arm
cx1, cy1 = x1 + f * ddy, y1 - f * ddx
cx2, cy2 = x2 + f * ddy, y2 - f * ddx
vertices = [(x1, y1),
(cx1, cy1),
(cx2, cy2),
(x20, y20)]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO]
return Path(vertices, codes)
def _point_along_a_line(x0, y0, x1, y1, d):
"""
Return the point on the line connecting (*x0*, *y0*) -- (*x1*, *y1*) whose
distance from (*x0*, *y0*) is *d*.
"""
dx, dy = x0 - x1, y0 - y1
ff = d / (dx * dx + dy * dy) ** .5
x2, y2 = x0 - ff * dx, y0 - ff * dy
return x2, y2
@_docstring.interpd
| ConnectionStyle |
python | huggingface__transformers | src/transformers/models/ibert/quant_modules.py | {
"start": 13255,
"end": 16071
} | class ____(nn.Module):
"""
Quantized version of `torch.nn.Softmax`. Adds quantization-specific arguments on top of `torch.nn.Softmax`.
Args:
output_bit (`int`):
Bitwidth for the layer output activation.
quant_mode (`bool`, *optional*, defaults to `False`):
Whether or not the layer is quantized.
force_dequant (`str`, *optional*, defaults to `"none"`):
Force dequantize the layer if either "softmax" or "nonlinear" is given.
"""
def __init__(self, output_bit, quant_mode=False, force_dequant="none"):
super().__init__()
self.output_bit = output_bit
self.max_bit = 32
self.quant_mode = quant_mode
if force_dequant in ["nonlinear", "softmax"]:
logger.info("Force dequantize softmax")
self.quant_mode = False
self.act = QuantAct(16, quant_mode=self.quant_mode)
self.x0 = -0.6931 # -ln2
self.const = 30 # dummy integer constant
self.coef = [0.35815147, 0.96963238, 1.0] # ax**2 + bx + c
self.coef[1] /= self.coef[0]
self.coef[2] /= self.coef[0]
def int_polynomial(self, x_int, scaling_factor):
with torch.no_grad():
b_int = torch.floor(self.coef[1] / scaling_factor)
c_int = torch.floor(self.coef[2] / scaling_factor**2)
z = (x_int + b_int) * x_int + c_int
scaling_factor = self.coef[0] * scaling_factor**2
return z, scaling_factor
def int_exp(self, x_int, scaling_factor):
with torch.no_grad():
x0_int = torch.floor(self.x0 / scaling_factor)
x_int = torch.max(x_int, self.const * x0_int)
q = floor_ste.apply(x_int / x0_int)
r = x_int - x0_int * q
exp_int, exp_scaling_factor = self.int_polynomial(r, scaling_factor)
exp_int = torch.clamp(floor_ste.apply(exp_int * 2 ** (self.const - q)), min=0)
scaling_factor = exp_scaling_factor / 2**self.const
return exp_int, scaling_factor
def forward(self, x, scaling_factor):
if not self.quant_mode:
return nn.functional.softmax(x, dim=-1), None
x_int = x / scaling_factor
x_int_max, _ = x_int.max(dim=-1, keepdim=True)
x_int = x_int - x_int_max
exp_int, exp_scaling_factor = self.int_exp(x_int, scaling_factor)
# Avoid overflow
exp, exp_scaling_factor = self.act(exp_int, exp_scaling_factor)
exp_int = exp / exp_scaling_factor
exp_int_sum = exp_int.sum(dim=-1, keepdim=True)
factor = floor_ste.apply(2**self.max_bit / exp_int_sum)
exp_int = floor_ste.apply(exp_int * factor / 2 ** (self.max_bit - self.output_bit))
scaling_factor = 1 / 2**self.output_bit
return exp_int * scaling_factor, scaling_factor
| IntSoftmax |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeNarrowingTypedDict1.py | {
"start": 138,
"end": 184
} | class ____(TypedDict):
a: str
b: int
| TD1 |
python | scrapy__scrapy | tests/CrawlerProcess/reactor_select_twisted_reactor_select.py | {
"start": 127,
"end": 422
} | class ____(scrapy.Spider):
name = "no_request"
async def start(self):
return
yield
process = CrawlerProcess(
settings={
"TWISTED_REACTOR": "twisted.internet.selectreactor.SelectReactor",
}
)
process.crawl(NoRequestsSpider)
process.start()
| NoRequestsSpider |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_column04.py | {
"start": 315,
"end": 1318
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_column04.xlsx")
self.ignore_elements = {"xl/workbook.xml": ["<fileVersion", "<calcPr"]}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [63591936, 63593856]
chart.axis2_ids = [63613568, 63612032]
data = [[1, 2, 3, 4, 5], [6, 8, 6, 4, 2]]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5", "y2_axis": 1})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.