language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | fastapi__sqlmodel | docs_src/tutorial/many_to_many/tutorial003_py310.py | {
"start": 456,
"end": 681
} | class ____(SQLModel, table=True):
id: int | None = Field(default=None, primary_key=True)
name: str = Field(index=True)
headquarters: str
hero_links: list[HeroTeamLink] = Relationship(back_populates="team")
| Team |
python | coleifer__peewee | tests/regressions.py | {
"start": 45207,
"end": 45319
} | class ____(TestModel):
name = TextField()
fkma = ForeignKeyField(FKMA, backref='fkmb_set', null=True)
| FKMB |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/schedules/schedules.py | {
"start": 1220,
"end": 9911
} | class ____(graphene.ObjectType):
id = graphene.NonNull(graphene.ID)
name = graphene.NonNull(graphene.String)
cron_schedule = graphene.NonNull(graphene.String)
pipeline_name = graphene.NonNull(graphene.String)
solid_selection = graphene.List(graphene.String)
mode = graphene.NonNull(graphene.String)
execution_timezone = graphene.Field(graphene.String)
description = graphene.String()
defaultStatus = graphene.NonNull(GrapheneInstigationStatus)
canReset = graphene.NonNull(graphene.Boolean)
scheduleState = graphene.NonNull(GrapheneInstigationState)
partition_set = graphene.Field("dagster_graphql.schema.partition_sets.GraphenePartitionSet")
futureTicks = graphene.NonNull(
GrapheneDryRunInstigationTicks,
cursor=graphene.Float(),
limit=graphene.Int(),
until=graphene.Float(),
)
futureTick = graphene.NonNull(
GrapheneDryRunInstigationTick, tick_timestamp=graphene.NonNull(graphene.Int)
)
potentialTickTimestamps = graphene.NonNull(
graphene.List(graphene.NonNull(graphene.Float)),
start_timestamp=graphene.Float(),
upper_limit=graphene.Int(),
lower_limit=graphene.Int(),
)
assetSelection = graphene.Field(GrapheneAssetSelection)
owners = non_null_list(GrapheneDefinitionOwner)
tags = non_null_list(GrapheneDefinitionTag)
metadataEntries = non_null_list(GrapheneMetadataEntry)
class Meta:
name = "Schedule"
def __init__(
self,
remote_schedule: RemoteSchedule,
schedule_state: Optional[InstigatorState],
batch_loader: Optional[RepositoryScopedBatchLoader] = None,
):
self._remote_schedule = check.inst_param(remote_schedule, "remote_schedule", RemoteSchedule)
# optional run loader, provided by a parent graphene object (e.g. GrapheneRepository)
# that instantiates multiple schedules
self._batch_loader = check.opt_inst_param(
batch_loader, "batch_loader", RepositoryScopedBatchLoader
)
self._stored_state = schedule_state
self._schedule_state = self._remote_schedule.get_current_instigator_state(schedule_state)
super().__init__(
name=remote_schedule.name,
cron_schedule=str(
remote_schedule.cron_schedule
), # can be sequence, coercing to str for now
pipeline_name=remote_schedule.job_name,
solid_selection=remote_schedule.op_selection,
mode=remote_schedule.mode,
execution_timezone=(
self._remote_schedule.execution_timezone
if self._remote_schedule.execution_timezone
else "UTC"
),
description=remote_schedule.description,
assetSelection=GrapheneAssetSelection(
asset_selection=remote_schedule.asset_selection,
repository_handle=remote_schedule.handle.repository_handle,
)
if remote_schedule.asset_selection
else None,
)
def resolve_id(self, _graphene_info: ResolveInfo) -> str:
return self._remote_schedule.get_compound_id().to_string()
def resolve_defaultStatus(self, _graphene_info: ResolveInfo):
default_schedule_status = self._remote_schedule.default_status
if default_schedule_status == DefaultScheduleStatus.RUNNING:
return GrapheneInstigationStatus.RUNNING
elif default_schedule_status == DefaultScheduleStatus.STOPPED:
return GrapheneInstigationStatus.STOPPED
def resolve_canReset(self, _graphene_info: ResolveInfo):
return bool(
self._stored_state and self._stored_state.status != InstigatorStatus.DECLARED_IN_CODE
)
def resolve_scheduleState(self, _graphene_info: ResolveInfo):
# forward the batch run loader to the instigation state, which provides the schedule runs
return GrapheneInstigationState(self._schedule_state, self._batch_loader)
def resolve_partition_set(self, graphene_info: ResolveInfo):
from dagster_graphql.schema.partition_sets import GraphenePartitionSet
if self._remote_schedule.partition_set_name is None:
return None
repository = graphene_info.context.get_code_location(
self._remote_schedule.handle.location_name
).get_repository(self._remote_schedule.handle.repository_name)
partition_set = repository.get_partition_set(self._remote_schedule.partition_set_name)
return GraphenePartitionSet(
remote_partition_set=partition_set,
)
def resolve_futureTicks(
self,
_graphene_info: ResolveInfo,
cursor: Optional[float] = None,
limit: Optional[int] = None,
until: Optional[float] = None,
):
cursor = cursor or time.time()
tick_times: list[float] = []
time_iter = self._remote_schedule.execution_time_iterator(cursor)
if until:
currentTime = None
while (not currentTime or currentTime < until) and (
limit is None or len(tick_times) < limit
):
try:
currentTime = next(time_iter).timestamp()
if currentTime < until:
tick_times.append(currentTime)
except StopIteration:
break
else:
limit = limit or 10
for _ in range(limit):
tick_times.append(next(time_iter).timestamp())
schedule_selector = self._remote_schedule.schedule_selector
future_ticks = [
GrapheneDryRunInstigationTick(schedule_selector, tick_time) for tick_time in tick_times
]
new_cursor = tick_times[-1] + 1 if tick_times else cursor
return GrapheneDryRunInstigationTicks(results=future_ticks, cursor=new_cursor)
def resolve_futureTick(self, _graphene_info: ResolveInfo, tick_timestamp: int):
return GrapheneDryRunInstigationTick(
self._remote_schedule.schedule_selector, float(tick_timestamp)
)
def resolve_potentialTickTimestamps(
self,
_graphene_info: ResolveInfo,
start_timestamp: Optional[float] = None,
upper_limit: Optional[int] = None,
lower_limit: Optional[int] = None,
):
"""Get timestamps when ticks will occur before and after a given timestamp.
upper_limit defines how many ticks will be retrieved after the current timestamp, and lower_limit defines how many ticks will be retrieved before the current timestamp.
"""
start_timestamp = start_timestamp or get_current_timestamp()
upper_limit = upper_limit or 10
lower_limit = lower_limit or 10
tick_times: list[float] = []
ascending_tick_iterator = self._remote_schedule.execution_time_iterator(start_timestamp)
descending_tick_iterator = self._remote_schedule.execution_time_iterator(
start_timestamp, ascending=False
)
tick_times_below_timestamp: list[float] = []
first_past_tick = next(descending_tick_iterator)
# execution_time_iterator starts at first tick <= timestamp (or >= timestamp in
# ascending case), so we need to make sure not to double count start_timestamp
# if it falls on a tick time.
if first_past_tick.timestamp() < start_timestamp:
tick_times_below_timestamp.append(first_past_tick.timestamp())
lower_limit -= 1
for _ in range(lower_limit):
tick_times_below_timestamp.append(next(descending_tick_iterator).timestamp())
# Combine tick times < start_timestamp to tick times >= timestamp to get full
# list. We reverse timestamp range because ticks should be in ascending order when we give the full list.
tick_times = tick_times_below_timestamp[::-1] + [
next(ascending_tick_iterator).timestamp() for _ in range(upper_limit)
]
return tick_times
def resolve_owners(self, _graphene_info: ResolveInfo):
return [
definition_owner_from_owner_str(owner) for owner in (self._remote_schedule.owners or [])
]
def resolve_tags(self, _graphene_info: ResolveInfo) -> Sequence[GrapheneDefinitionTag]:
return [
GrapheneDefinitionTag(key, value)
for key, value in (self._remote_schedule.tags or {}).items()
]
def resolve_metadataEntries(self, _graphene_info: ResolveInfo) -> list[GrapheneMetadataEntry]:
return list(iterate_metadata_entries(self._remote_schedule.metadata))
| GrapheneSchedule |
python | scipy__scipy | benchmarks/benchmarks/spatial.py | {
"start": 14281,
"end": 15527
} | class ____(Benchmark):
params = (
[10, 20, 100],
['euclidean', 'minkowski', 'cityblock', 'sqeuclidean', 'cosine',
'correlation', 'hamming', 'jaccard', 'chebyshev', 'canberra',
'braycurtis', 'yule', 'dice', 'rogerstanimoto',
'russellrao', 'sokalsneath', 'minkowski-P3'])
param_names = ['num_points', 'metric']
def setup(self, num_points, metric):
rng = np.random.default_rng(123)
self.points = rng.random((num_points, 3))
self.metric = metric
if metric == 'minkowski-P3':
# p=2 is just the euclidean metric, try another p value as well
self.kwargs = {'p': 3.0}
self.metric = 'minkowski'
else:
self.kwargs = {}
self.weights = np.ones(3)
def time_cdist(self, num_points, metric):
"""Time scipy.spatial.distance.cdist for weighted distance metrics."""
distance.cdist(self.points, self.points, self.metric, w=self.weights,
**self.kwargs)
def time_pdist(self, num_points, metric):
"""Time scipy.spatial.distance.pdist for weighted distance metrics."""
distance.pdist(self.points, self.metric, w=self.weights, **self.kwargs)
| XdistWeighted |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 751764,
"end": 753373
} | class ____(VegaLiteSchema):
"""
NumberLocale schema wrapper.
Locale definition for formatting numbers.
Parameters
----------
currency : Sequence[str], :class:`Vector2string`
The currency prefix and suffix (e.g., ["$", ""]).
decimal : str
The decimal point (e.g., ".").
grouping : Sequence[float]
The array of group sizes (e.g., [3]), cycled as needed.
thousands : str
The group separator (e.g., ",").
minus : str
The minus sign (defaults to hyphen-minus, "-").
nan : str
The not-a-number value (defaults to "NaN").
numerals : Sequence[str], :class:`Vector10string`
An array of ten strings to replace the numerals 0-9.
percent : str
The percent sign (defaults to "%").
"""
_schema = {"$ref": "#/definitions/NumberLocale"}
def __init__(
self,
currency: Optional[SchemaBase | Sequence[str]] = Undefined,
decimal: Optional[str] = Undefined,
grouping: Optional[Sequence[float]] = Undefined,
thousands: Optional[str] = Undefined,
minus: Optional[str] = Undefined,
nan: Optional[str] = Undefined,
numerals: Optional[SchemaBase | Sequence[str]] = Undefined,
percent: Optional[str] = Undefined,
**kwds,
):
super().__init__(
currency=currency,
decimal=decimal,
grouping=grouping,
thousands=thousands,
minus=minus,
nan=nan,
numerals=numerals,
percent=percent,
**kwds,
)
| NumberLocale |
python | pandas-dev__pandas | pandas/tests/frame/methods/test_reindex.py | {
"start": 3671,
"end": 47321
} | class ____:
# These are specific reindex-based tests; other indexing tests should go in
# test_indexing
@pytest.mark.xfail(
not IS64 or (is_platform_windows() and not np_version_gt2),
reason="Passes int32 values to DatetimeArray in make_na_array on "
"windows, 32bit linux builds",
)
def test_reindex_tzaware_fill_value(self):
# GH#52586
df = DataFrame([[1]])
ts = pd.Timestamp("2023-04-10 17:32", tz="US/Pacific").as_unit("s")
res = df.reindex([0, 1], axis=1, fill_value=ts)
assert res.dtypes[1] == pd.DatetimeTZDtype(unit="s", tz="US/Pacific")
expected = DataFrame({0: [1], 1: [ts]})
expected[1] = expected[1].astype(res.dtypes[1])
tm.assert_frame_equal(res, expected)
per = ts.tz_localize(None).to_period("s")
res = df.reindex([0, 1], axis=1, fill_value=per)
assert res.dtypes[1] == pd.PeriodDtype("s")
expected = DataFrame({0: [1], 1: [per]})
tm.assert_frame_equal(res, expected)
interval = pd.Interval(ts, ts + pd.Timedelta(seconds=1))
res = df.reindex([0, 1], axis=1, fill_value=interval)
assert res.dtypes[1] == pd.IntervalDtype("datetime64[s, US/Pacific]", "right")
expected = DataFrame({0: [1], 1: [interval]})
expected[1] = expected[1].astype(res.dtypes[1])
tm.assert_frame_equal(res, expected)
def test_reindex_date_fill_value(self):
# passing date to dt64 is deprecated; enforced in 2.0 to cast to object
arr = date_range("2016-01-01", periods=6, unit="ns").values.reshape(3, 2)
df = DataFrame(arr, columns=["A", "B"], index=range(3))
ts = df.iloc[0, 0]
fv = ts.date()
res = df.reindex(index=range(4), columns=["A", "B", "C"], fill_value=fv)
expected = DataFrame(
{"A": df["A"].tolist() + [fv], "B": df["B"].tolist() + [fv], "C": [fv] * 4},
dtype=object,
)
tm.assert_frame_equal(res, expected)
# only reindexing rows
res = df.reindex(index=range(4), fill_value=fv)
tm.assert_frame_equal(res, expected[["A", "B"]])
# same with a datetime-castable str
res = df.reindex(
index=range(4), columns=["A", "B", "C"], fill_value="2016-01-01"
)
expected = DataFrame(
{"A": df["A"].tolist() + [ts], "B": df["B"].tolist() + [ts], "C": [ts] * 4},
)
tm.assert_frame_equal(res, expected)
def test_reindex_with_multi_index(self):
# https://github.com/pandas-dev/pandas/issues/29896
# tests for reindexing a multi-indexed DataFrame with a new MultiIndex
#
# confirms that we can reindex a multi-indexed DataFrame with a new
# MultiIndex object correctly when using no filling, backfilling, and
# padding
#
# The DataFrame, `df`, used in this test is:
# c
# a b
# -1 0 A
# 1 B
# 2 C
# 3 D
# 4 E
# 5 F
# 6 G
# 0 0 A
# 1 B
# 2 C
# 3 D
# 4 E
# 5 F
# 6 G
# 1 0 A
# 1 B
# 2 C
# 3 D
# 4 E
# 5 F
# 6 G
#
# and the other MultiIndex, `new_multi_index`, is:
# 0: 0 0.5
# 1: 2.0
# 2: 5.0
# 3: 5.8
df = DataFrame(
{
"a": [-1] * 7 + [0] * 7 + [1] * 7,
"b": list(range(7)) * 3,
"c": ["A", "B", "C", "D", "E", "F", "G"] * 3,
}
).set_index(["a", "b"])
new_index = [0.5, 2.0, 5.0, 5.8]
new_multi_index = MultiIndex.from_product([[0], new_index], names=["a", "b"])
# reindexing w/o a `method` value
reindexed = df.reindex(new_multi_index)
expected = DataFrame(
{"a": [0] * 4, "b": new_index, "c": [np.nan, "C", "F", np.nan]}
).set_index(["a", "b"])
tm.assert_frame_equal(expected, reindexed)
# reindexing with backfilling
expected = DataFrame(
{"a": [0] * 4, "b": new_index, "c": ["B", "C", "F", "G"]}
).set_index(["a", "b"])
reindexed_with_backfilling = df.reindex(new_multi_index, method="bfill")
tm.assert_frame_equal(expected, reindexed_with_backfilling)
reindexed_with_backfilling = df.reindex(new_multi_index, method="backfill")
tm.assert_frame_equal(expected, reindexed_with_backfilling)
# reindexing with padding
expected = DataFrame(
{"a": [0] * 4, "b": new_index, "c": ["A", "C", "F", "F"]}
).set_index(["a", "b"])
reindexed_with_padding = df.reindex(new_multi_index, method="pad")
tm.assert_frame_equal(expected, reindexed_with_padding)
reindexed_with_padding = df.reindex(new_multi_index, method="ffill")
tm.assert_frame_equal(expected, reindexed_with_padding)
@pytest.mark.parametrize(
"method,expected_values",
[
("nearest", [0, 1, 1, 2]),
("pad", [np.nan, 0, 1, 1]),
("backfill", [0, 1, 2, 2]),
],
)
def test_reindex_methods(self, method, expected_values):
df = DataFrame({"x": list(range(5))})
target = np.array([-0.1, 0.9, 1.1, 1.5])
expected = DataFrame({"x": expected_values}, index=target)
actual = df.reindex(target, method=method)
tm.assert_frame_equal(expected, actual)
actual = df.reindex(target, method=method, tolerance=1)
tm.assert_frame_equal(expected, actual)
actual = df.reindex(target, method=method, tolerance=[1, 1, 1, 1])
tm.assert_frame_equal(expected, actual)
e2 = expected[::-1]
actual = df.reindex(target[::-1], method=method)
tm.assert_frame_equal(e2, actual)
new_order = [3, 0, 2, 1]
e2 = expected.iloc[new_order]
actual = df.reindex(target[new_order], method=method)
tm.assert_frame_equal(e2, actual)
switched_method = (
"pad" if method == "backfill" else "backfill" if method == "pad" else method
)
actual = df[::-1].reindex(target, method=switched_method)
tm.assert_frame_equal(expected, actual)
def test_reindex_methods_nearest_special(self):
df = DataFrame({"x": list(range(5))})
target = np.array([-0.1, 0.9, 1.1, 1.5])
expected = DataFrame({"x": [0, 1, 1, np.nan]}, index=target)
actual = df.reindex(target, method="nearest", tolerance=0.2)
tm.assert_frame_equal(expected, actual)
expected = DataFrame({"x": [0, np.nan, 1, np.nan]}, index=target)
actual = df.reindex(target, method="nearest", tolerance=[0.5, 0.01, 0.4, 0.1])
tm.assert_frame_equal(expected, actual)
def test_reindex_nearest_tz(self, tz_aware_fixture):
# GH26683
tz = tz_aware_fixture
idx = date_range("2019-01-01", periods=5, tz=tz)
df = DataFrame({"x": list(range(5))}, index=idx)
expected = df.head(3)
actual = df.reindex(idx[:3], method="nearest")
tm.assert_frame_equal(expected, actual)
def test_reindex_nearest_tz_empty_frame(self):
# https://github.com/pandas-dev/pandas/issues/31964
dti = pd.DatetimeIndex(["2016-06-26 14:27:26+00:00"])
df = DataFrame(index=pd.DatetimeIndex(["2016-07-04 14:00:59+00:00"]))
expected = DataFrame(index=dti)
result = df.reindex(dti, method="nearest")
tm.assert_frame_equal(result, expected)
def test_reindex_frame_add_nat(self):
rng = date_range("1/1/2000 00:00:00", periods=10, freq="10s")
df = DataFrame(
{"A": np.random.default_rng(2).standard_normal(len(rng)), "B": rng}
)
result = df.reindex(range(15))
assert np.issubdtype(result["B"].dtype, np.dtype("M8[ns]"))
mask = isna(result)["B"]
assert mask[-5:].all()
assert not mask[:-5].any()
@pytest.mark.parametrize(
"method, exp_values",
[("ffill", [0, 1, 2, 3]), ("bfill", [1.0, 2.0, 3.0, np.nan])],
)
def test_reindex_frame_tz_ffill_bfill(self, frame_or_series, method, exp_values):
# GH#38566
obj = frame_or_series(
[0, 1, 2, 3],
index=date_range("2020-01-01 00:00:00", periods=4, freq="h", tz="UTC"),
)
new_index = date_range("2020-01-01 00:01:00", periods=4, freq="h", tz="UTC")
result = obj.reindex(new_index, method=method, tolerance=pd.Timedelta("1 hour"))
expected = frame_or_series(exp_values, index=new_index)
tm.assert_equal(result, expected)
def test_reindex_limit(self):
# GH 28631
data = [["A", "A", "A"], ["B", "B", "B"], ["C", "C", "C"], ["D", "D", "D"]]
exp_data = [
["A", "A", "A"],
["B", "B", "B"],
["C", "C", "C"],
["D", "D", "D"],
["D", "D", "D"],
[np.nan, np.nan, np.nan],
]
df = DataFrame(data)
result = df.reindex([0, 1, 2, 3, 4, 5], method="ffill", limit=1)
expected = DataFrame(exp_data)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"idx, check_index_type",
[
[["C", "B", "A"], True],
[["F", "C", "A", "D"], True],
[["A"], True],
[["A", "B", "C"], True],
[["C", "A", "B"], True],
[["C", "B"], True],
[["C", "A"], True],
[["A", "B"], True],
[["B", "A", "C"], True],
# reindex by these causes different MultiIndex levels
[["D", "F"], False],
[["A", "C", "B"], False],
],
)
def test_reindex_level_verify_first_level(self, idx, check_index_type):
df = DataFrame(
{
"jim": list("B" * 4 + "A" * 2 + "C" * 3),
"joe": list("abcdeabcd")[::-1],
"jolie": [10, 20, 30] * 3,
"joline": np.random.default_rng(2).integers(0, 1000, 9),
}
)
icol = ["jim", "joe", "jolie"]
def f(val):
return np.nonzero((df["jim"] == val).to_numpy())[0]
i = np.concatenate(list(map(f, idx)))
left = df.set_index(icol).reindex(idx, level="jim")
right = df.iloc[i].set_index(icol)
tm.assert_frame_equal(left, right, check_index_type=check_index_type)
@pytest.mark.parametrize(
"idx",
[
("mid",),
("mid", "btm"),
("mid", "btm", "top"),
("mid", "top"),
("mid", "top", "btm"),
("btm",),
("btm", "mid"),
("btm", "mid", "top"),
("btm", "top"),
("btm", "top", "mid"),
("top",),
("top", "mid"),
("top", "mid", "btm"),
("top", "btm"),
("top", "btm", "mid"),
],
)
def test_reindex_level_verify_first_level_repeats(self, idx):
df = DataFrame(
{
"jim": ["mid"] * 5 + ["btm"] * 8 + ["top"] * 7,
"joe": ["3rd"] * 2
+ ["1st"] * 3
+ ["2nd"] * 3
+ ["1st"] * 2
+ ["3rd"] * 3
+ ["1st"] * 2
+ ["3rd"] * 3
+ ["2nd"] * 2,
# this needs to be jointly unique with jim and joe or
# reindexing will fail ~1.5% of the time, this works
# out to needing unique groups of same size as joe
"jolie": np.concatenate(
[
np.random.default_rng(2).choice(1000, x, replace=False)
for x in [2, 3, 3, 2, 3, 2, 3, 2]
]
),
"joline": np.random.default_rng(2).standard_normal(20).round(3) * 10,
}
)
icol = ["jim", "joe", "jolie"]
def f(val):
return np.nonzero((df["jim"] == val).to_numpy())[0]
i = np.concatenate(list(map(f, idx)))
left = df.set_index(icol).reindex(idx, level="jim")
right = df.iloc[i].set_index(icol)
tm.assert_frame_equal(left, right)
@pytest.mark.parametrize(
"idx, indexer",
[
[
["1st", "2nd", "3rd"],
[2, 3, 4, 0, 1, 8, 9, 5, 6, 7, 10, 11, 12, 13, 14, 18, 19, 15, 16, 17],
],
[
["3rd", "2nd", "1st"],
[0, 1, 2, 3, 4, 10, 11, 12, 5, 6, 7, 8, 9, 15, 16, 17, 18, 19, 13, 14],
],
[["2nd", "3rd"], [0, 1, 5, 6, 7, 10, 11, 12, 18, 19, 15, 16, 17]],
[["3rd", "1st"], [0, 1, 2, 3, 4, 10, 11, 12, 8, 9, 15, 16, 17, 13, 14]],
],
)
def test_reindex_level_verify_repeats(self, idx, indexer):
df = DataFrame(
{
"jim": ["mid"] * 5 + ["btm"] * 8 + ["top"] * 7,
"joe": ["3rd"] * 2
+ ["1st"] * 3
+ ["2nd"] * 3
+ ["1st"] * 2
+ ["3rd"] * 3
+ ["1st"] * 2
+ ["3rd"] * 3
+ ["2nd"] * 2,
# this needs to be jointly unique with jim and joe or
# reindexing will fail ~1.5% of the time, this works
# out to needing unique groups of same size as joe
"jolie": np.concatenate(
[
np.random.default_rng(2).choice(1000, x, replace=False)
for x in [2, 3, 3, 2, 3, 2, 3, 2]
]
),
"joline": np.random.default_rng(2).standard_normal(20).round(3) * 10,
}
)
icol = ["jim", "joe", "jolie"]
left = df.set_index(icol).reindex(idx, level="joe")
right = df.iloc[indexer].set_index(icol)
tm.assert_frame_equal(left, right)
@pytest.mark.parametrize(
"idx, indexer, check_index_type",
[
[list("abcde"), [3, 2, 1, 0, 5, 4, 8, 7, 6], True],
[list("abcd"), [3, 2, 1, 0, 5, 8, 7, 6], True],
[list("abc"), [3, 2, 1, 8, 7, 6], True],
[list("eca"), [1, 3, 4, 6, 8], True],
[list("edc"), [0, 1, 4, 5, 6], True],
[list("eadbc"), [3, 0, 2, 1, 4, 5, 8, 7, 6], True],
[list("edwq"), [0, 4, 5], True],
[list("wq"), [], False],
],
)
def test_reindex_level_verify(self, idx, indexer, check_index_type):
df = DataFrame(
{
"jim": list("B" * 4 + "A" * 2 + "C" * 3),
"joe": list("abcdeabcd")[::-1],
"jolie": [10, 20, 30] * 3,
"joline": np.random.default_rng(2).integers(0, 1000, 9),
}
)
icol = ["jim", "joe", "jolie"]
left = df.set_index(icol).reindex(idx, level="joe")
right = df.iloc[indexer].set_index(icol)
tm.assert_frame_equal(left, right, check_index_type=check_index_type)
def test_non_monotonic_reindex_methods(self):
dr = date_range("2013-08-01", periods=6, freq="B")
data = np.random.default_rng(2).standard_normal((6, 1))
df = DataFrame(data, index=dr, columns=list("A"))
df_rev = DataFrame(data, index=dr[[3, 4, 5] + [0, 1, 2]], columns=list("A"))
# index is not monotonic increasing or decreasing
msg = "index must be monotonic increasing or decreasing"
with pytest.raises(ValueError, match=msg):
df_rev.reindex(df.index, method="pad")
with pytest.raises(ValueError, match=msg):
df_rev.reindex(df.index, method="ffill")
with pytest.raises(ValueError, match=msg):
df_rev.reindex(df.index, method="bfill")
with pytest.raises(ValueError, match=msg):
df_rev.reindex(df.index, method="nearest")
def test_reindex_sparse(self):
# https://github.com/pandas-dev/pandas/issues/35286
df = DataFrame(
{"A": [0, 1], "B": pd.array([0, 1], dtype=pd.SparseDtype("int64", 0))}
)
result = df.reindex([0, 2])
expected = DataFrame(
{
"A": [0.0, np.nan],
"B": pd.array([0.0, np.nan], dtype=pd.SparseDtype("float64", 0.0)),
},
index=[0, 2],
)
tm.assert_frame_equal(result, expected)
def test_reindex(self, float_frame):
datetime_series = Series(
np.arange(30, dtype=np.float64), index=date_range("2020-01-01", periods=30)
)
newFrame = float_frame.reindex(datetime_series.index)
for col in newFrame.columns:
for idx, val in newFrame[col].items():
if idx in float_frame.index:
if np.isnan(val):
assert np.isnan(float_frame[col][idx])
else:
assert val == float_frame[col][idx]
else:
assert np.isnan(val)
for col, series in newFrame.items():
tm.assert_index_equal(series.index, newFrame.index)
emptyFrame = float_frame.reindex(Index([]))
assert len(emptyFrame.index) == 0
# Cython code should be unit-tested directly
nonContigFrame = float_frame.reindex(datetime_series.index[::2])
for col in nonContigFrame.columns:
for idx, val in nonContigFrame[col].items():
if idx in float_frame.index:
if np.isnan(val):
assert np.isnan(float_frame[col][idx])
else:
assert val == float_frame[col][idx]
else:
assert np.isnan(val)
for col, series in nonContigFrame.items():
tm.assert_index_equal(series.index, nonContigFrame.index)
# corner cases
newFrame = float_frame.reindex(float_frame.index)
assert newFrame.index.is_(float_frame.index)
# length zero
newFrame = float_frame.reindex([])
assert newFrame.empty
assert len(newFrame.columns) == len(float_frame.columns)
# length zero with columns reindexed with non-empty index
newFrame = float_frame.reindex([])
newFrame = newFrame.reindex(float_frame.index)
assert len(newFrame.index) == len(float_frame.index)
assert len(newFrame.columns) == len(float_frame.columns)
# pass non-Index
newFrame = float_frame.reindex(list(datetime_series.index))
expected = datetime_series.index._with_freq(None)
tm.assert_index_equal(newFrame.index, expected)
# copy with no axes
result = float_frame.reindex()
tm.assert_frame_equal(result, float_frame)
assert result is not float_frame
def test_reindex_nan(self):
df = DataFrame(
[[1, 2], [3, 5], [7, 11], [9, 23]],
index=[2, np.nan, 1, 5],
columns=["joe", "jim"],
)
i, j = [np.nan, 5, 5, np.nan, 1, 2, np.nan], [1, 3, 3, 1, 2, 0, 1]
tm.assert_frame_equal(df.reindex(i), df.iloc[j])
df.index = df.index.astype("object")
tm.assert_frame_equal(df.reindex(i), df.iloc[j], check_index_type=False)
# GH10388
df = DataFrame(
{
"other": ["a", "b", np.nan, "c"],
"date": ["2015-03-22", np.nan, "2012-01-08", np.nan],
"amount": [2, 3, 4, 5],
}
)
df["date"] = pd.to_datetime(df.date)
df["delta"] = (pd.to_datetime("2015-06-18") - df["date"]).shift(1)
left = df.set_index(["delta", "other", "date"]).reset_index()
right = df.reindex(columns=["delta", "other", "date", "amount"])
tm.assert_frame_equal(left, right)
def test_reindex_name_remains(self):
s = Series(np.random.default_rng(2).random(10))
df = DataFrame(s, index=np.arange(len(s)))
i = Series(np.arange(10), name="iname")
df = df.reindex(i)
assert df.index.name == "iname"
df = df.reindex(Index(np.arange(10), name="tmpname"))
assert df.index.name == "tmpname"
s = Series(np.random.default_rng(2).random(10))
df = DataFrame(s.T, index=np.arange(len(s)))
i = Series(np.arange(10), name="iname")
df = df.reindex(columns=i)
assert df.columns.name == "iname"
def test_reindex_int(self, int_frame):
smaller = int_frame.reindex(int_frame.index[::2])
assert smaller["A"].dtype == np.int64
bigger = smaller.reindex(int_frame.index)
assert bigger["A"].dtype == np.float64
smaller = int_frame.reindex(columns=["A", "B"])
assert smaller["A"].dtype == np.int64
def test_reindex_columns(self, float_frame):
new_frame = float_frame.reindex(columns=["A", "B", "E"])
tm.assert_series_equal(new_frame["B"], float_frame["B"])
assert np.isnan(new_frame["E"]).all()
assert "C" not in new_frame
# Length zero
new_frame = float_frame.reindex(columns=[])
assert new_frame.empty
def test_reindex_columns_method(self):
# GH 14992, reindexing over columns ignored method
df = DataFrame(
data=[[11, 12, 13], [21, 22, 23], [31, 32, 33]],
index=[1, 2, 4],
columns=[1, 2, 4],
dtype=float,
)
# default method
result = df.reindex(columns=range(6))
expected = DataFrame(
data=[
[np.nan, 11, 12, np.nan, 13, np.nan],
[np.nan, 21, 22, np.nan, 23, np.nan],
[np.nan, 31, 32, np.nan, 33, np.nan],
],
index=[1, 2, 4],
columns=range(6),
dtype=float,
)
tm.assert_frame_equal(result, expected)
# method='ffill'
result = df.reindex(columns=range(6), method="ffill")
expected = DataFrame(
data=[
[np.nan, 11, 12, 12, 13, 13],
[np.nan, 21, 22, 22, 23, 23],
[np.nan, 31, 32, 32, 33, 33],
],
index=[1, 2, 4],
columns=range(6),
dtype=float,
)
tm.assert_frame_equal(result, expected)
# method='bfill'
result = df.reindex(columns=range(6), method="bfill")
expected = DataFrame(
data=[
[11, 11, 12, 13, 13, np.nan],
[21, 21, 22, 23, 23, np.nan],
[31, 31, 32, 33, 33, np.nan],
],
index=[1, 2, 4],
columns=range(6),
dtype=float,
)
tm.assert_frame_equal(result, expected)
def test_reindex_axes(self):
# GH 3317, reindexing by both axes loses freq of the index
df = DataFrame(
np.ones((3, 3)),
index=[datetime(2012, 1, 1), datetime(2012, 1, 2), datetime(2012, 1, 3)],
columns=["a", "b", "c"],
)
msg = "'d' is deprecated and will be removed in a future version."
with tm.assert_produces_warning(Pandas4Warning, match=msg):
time_freq = date_range("2012-01-01", "2012-01-03", freq="d")
some_cols = ["a", "b"]
index_freq = df.reindex(index=time_freq).index.freq
both_freq = df.reindex(index=time_freq, columns=some_cols).index.freq
seq_freq = df.reindex(index=time_freq).reindex(columns=some_cols).index.freq
assert index_freq == both_freq
assert index_freq == seq_freq
def test_reindex_fill_value(self):
df = DataFrame(np.random.default_rng(2).standard_normal((10, 4)))
# axis=0
result = df.reindex(list(range(15)))
assert np.isnan(result.values[-5:]).all()
result = df.reindex(range(15), fill_value=0)
expected = df.reindex(range(15)).fillna(0)
tm.assert_frame_equal(result, expected)
# axis=1
result = df.reindex(columns=range(5), fill_value=0.0)
expected = df.copy()
expected[4] = 0.0
tm.assert_frame_equal(result, expected)
result = df.reindex(columns=range(5), fill_value=0)
expected = df.copy()
expected[4] = 0
tm.assert_frame_equal(result, expected)
result = df.reindex(columns=range(5), fill_value="foo")
expected = df.copy()
expected[4] = "foo"
tm.assert_frame_equal(result, expected)
# other dtypes
df["foo"] = "foo"
result = df.reindex(range(15), fill_value="0")
expected = df.reindex(range(15)).fillna("0")
tm.assert_frame_equal(result, expected)
def test_reindex_uint_dtypes_fill_value(self, any_unsigned_int_numpy_dtype):
# GH#48184
df = DataFrame({"a": [1, 2], "b": [1, 2]}, dtype=any_unsigned_int_numpy_dtype)
result = df.reindex(columns=list("abcd"), index=[0, 1, 2, 3], fill_value=10)
expected = DataFrame(
{"a": [1, 2, 10, 10], "b": [1, 2, 10, 10], "c": 10, "d": 10},
dtype=any_unsigned_int_numpy_dtype,
)
tm.assert_frame_equal(result, expected)
def test_reindex_single_column_ea_index_and_columns(self, any_numeric_ea_dtype):
# GH#48190
df = DataFrame({"a": [1, 2]}, dtype=any_numeric_ea_dtype)
result = df.reindex(columns=list("ab"), index=[0, 1, 2], fill_value=10)
expected = DataFrame(
{"a": Series([1, 2, 10], dtype=any_numeric_ea_dtype), "b": 10}
)
tm.assert_frame_equal(result, expected)
def test_reindex_dups(self):
# GH4746, reindex on duplicate index error messages
arr = np.random.default_rng(2).standard_normal(10)
df = DataFrame(arr, index=[1, 2, 3, 4, 5, 1, 2, 3, 4, 5])
# set index is ok
result = df.copy()
result.index = list(range(len(df)))
expected = DataFrame(arr, index=list(range(len(df))))
tm.assert_frame_equal(result, expected)
# reindex fails
msg = "cannot reindex on an axis with duplicate labels"
with pytest.raises(ValueError, match=msg):
df.reindex(index=list(range(len(df))))
def test_reindex_with_duplicate_columns(self):
# reindex is invalid!
df = DataFrame(
[[1, 5, 7.0], [1, 5, 7.0], [1, 5, 7.0]], columns=["bar", "a", "a"]
)
msg = "cannot reindex on an axis with duplicate labels"
with pytest.raises(ValueError, match=msg):
df.reindex(columns=["bar"])
with pytest.raises(ValueError, match=msg):
df.reindex(columns=["bar", "foo"])
def test_reindex_axis_style(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
expected = DataFrame(
{"A": [1, 2, np.nan], "B": [4, 5, np.nan]}, index=[0, 1, 3]
)
result = df.reindex([0, 1, 3])
tm.assert_frame_equal(result, expected)
result = df.reindex([0, 1, 3], axis=0)
tm.assert_frame_equal(result, expected)
result = df.reindex([0, 1, 3], axis="index")
tm.assert_frame_equal(result, expected)
def test_reindex_positional_raises(self):
# https://github.com/pandas-dev/pandas/issues/12392
# Enforced in 2.0
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
msg = r"reindex\(\) takes from 1 to 2 positional arguments but 3 were given"
with pytest.raises(TypeError, match=msg):
df.reindex([0, 1], ["A", "B", "C"])
def test_reindex_axis_style_raises(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex([0, 1], columns=["A"], axis=1)
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex([0, 1], columns=["A"], axis="index")
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(index=[0, 1], axis="index")
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(index=[0, 1], axis="columns")
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(columns=[0, 1], axis="columns")
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(index=[0, 1], columns=[0, 1], axis="columns")
with pytest.raises(TypeError, match="Cannot specify all"):
df.reindex(labels=[0, 1], index=[0], columns=["A"])
# Mixing styles
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(index=[0, 1], axis="index")
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(index=[0, 1], axis="columns")
# Duplicates
with pytest.raises(TypeError, match="multiple values"):
df.reindex([0, 1], labels=[0, 1])
def test_reindex_single_named_indexer(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = DataFrame({"A": [1, 2, 3], "B": [1, 2, 3]})
result = df.reindex([0, 1], columns=["A"])
expected = DataFrame({"A": [1, 2]})
tm.assert_frame_equal(result, expected)
def test_reindex_api_equivalence(self):
# https://github.com/pandas-dev/pandas/issues/12392
# equivalence of the labels/axis and index/columns API's
df = DataFrame(
[[1, 2, 3], [3, 4, 5], [5, 6, 7]],
index=["a", "b", "c"],
columns=["d", "e", "f"],
)
res1 = df.reindex(["b", "a"])
res2 = df.reindex(index=["b", "a"])
res3 = df.reindex(labels=["b", "a"])
res4 = df.reindex(labels=["b", "a"], axis=0)
res5 = df.reindex(["b", "a"], axis=0)
for res in [res2, res3, res4, res5]:
tm.assert_frame_equal(res1, res)
res1 = df.reindex(columns=["e", "d"])
res2 = df.reindex(["e", "d"], axis=1)
res3 = df.reindex(labels=["e", "d"], axis=1)
for res in [res2, res3]:
tm.assert_frame_equal(res1, res)
res1 = df.reindex(index=["b", "a"], columns=["e", "d"])
res2 = df.reindex(columns=["e", "d"], index=["b", "a"])
res3 = df.reindex(labels=["b", "a"], axis=0).reindex(labels=["e", "d"], axis=1)
for res in [res2, res3]:
tm.assert_frame_equal(res1, res)
def test_reindex_boolean(self):
frame = DataFrame(
np.ones((10, 2), dtype=bool), index=np.arange(0, 20, 2), columns=[0, 2]
)
reindexed = frame.reindex(np.arange(10))
assert reindexed.values.dtype == np.object_
assert isna(reindexed[0][1])
reindexed = frame.reindex(columns=range(3))
assert reindexed.values.dtype == np.object_
assert isna(reindexed[1]).all()
def test_reindex_objects(self, float_string_frame):
reindexed = float_string_frame.reindex(columns=["foo", "A", "B"])
assert "foo" in reindexed
reindexed = float_string_frame.reindex(columns=["A", "B"])
assert "foo" not in reindexed
def test_reindex_corner(self, int_frame):
index = Index(["a", "b", "c"])
dm = DataFrame({}).reindex(index=[1, 2, 3])
reindexed = dm.reindex(columns=index)
tm.assert_index_equal(reindexed.columns, index)
# ints are weird
smaller = int_frame.reindex(columns=["A", "B", "E"])
assert smaller["E"].dtype == np.float64
def test_reindex_with_nans(self):
df = DataFrame(
[[1, 2], [3, 4], [np.nan, np.nan], [7, 8], [9, 10]],
columns=["a", "b"],
index=[100.0, 101.0, np.nan, 102.0, 103.0],
)
result = df.reindex(index=[101.0, 102.0, 103.0])
expected = df.iloc[[1, 3, 4]]
tm.assert_frame_equal(result, expected)
result = df.reindex(index=[103.0])
expected = df.iloc[[4]]
tm.assert_frame_equal(result, expected)
result = df.reindex(index=[101.0])
expected = df.iloc[[1]]
tm.assert_frame_equal(result, expected)
def test_reindex_without_upcasting(self):
# GH45857
df = DataFrame(np.zeros((10, 10), dtype=np.float32))
result = df.reindex(columns=np.arange(5, 15))
assert result.dtypes.eq(np.float32).all()
def test_reindex_multi(self):
df = DataFrame(np.random.default_rng(2).standard_normal((3, 3)))
result = df.reindex(index=range(4), columns=range(4))
expected = df.reindex(list(range(4))).reindex(columns=range(4))
tm.assert_frame_equal(result, expected)
df = DataFrame(np.random.default_rng(2).integers(0, 10, (3, 3)))
result = df.reindex(index=range(4), columns=range(4))
expected = df.reindex(list(range(4))).reindex(columns=range(4))
tm.assert_frame_equal(result, expected)
df = DataFrame(np.random.default_rng(2).integers(0, 10, (3, 3)))
result = df.reindex(index=range(2), columns=range(2))
expected = df.reindex(range(2)).reindex(columns=range(2))
tm.assert_frame_equal(result, expected)
df = DataFrame(
np.random.default_rng(2).standard_normal((5, 3)) + 1j,
columns=["a", "b", "c"],
)
result = df.reindex(index=[0, 1], columns=["a", "b"])
expected = df.reindex([0, 1]).reindex(columns=["a", "b"])
tm.assert_frame_equal(result, expected)
def test_reindex_multi_categorical_time(self):
# https://github.com/pandas-dev/pandas/issues/21390
midx = MultiIndex.from_product(
[
Categorical(["a", "b", "c"]),
Categorical(date_range("2012-01-01", periods=3, freq="h")),
]
)
df = DataFrame({"a": range(len(midx))}, index=midx)
df2 = df.iloc[[0, 1, 2, 3, 4, 5, 6, 8]]
result = df2.reindex(midx)
expected = DataFrame({"a": [0, 1, 2, 3, 4, 5, 6, np.nan, 8]}, index=midx)
tm.assert_frame_equal(result, expected)
def test_reindex_with_categoricalindex(self):
df = DataFrame(
{
"A": np.arange(3, dtype="int64"),
},
index=CategoricalIndex(
list("abc"), dtype=CategoricalDtype(list("cabe")), name="B"
),
)
# reindexing
# convert to a regular index
result = df.reindex(["a", "b", "e"])
expected = DataFrame({"A": [0, 1, np.nan], "B": Series(list("abe"))}).set_index(
"B"
)
tm.assert_frame_equal(result, expected, check_index_type=True)
result = df.reindex(["a", "b"])
expected = DataFrame({"A": [0, 1], "B": Series(list("ab"))}).set_index("B")
tm.assert_frame_equal(result, expected, check_index_type=True)
result = df.reindex(["e"])
expected = DataFrame({"A": [np.nan], "B": Series(["e"])}).set_index("B")
tm.assert_frame_equal(result, expected, check_index_type=True)
result = df.reindex(["d"])
expected = DataFrame({"A": [np.nan], "B": Series(["d"])}).set_index("B")
tm.assert_frame_equal(result, expected, check_index_type=True)
# since we are actually reindexing with a Categorical
# then return a Categorical
cats = list("cabe")
result = df.reindex(Categorical(["a", "e"], categories=cats))
expected = DataFrame(
{"A": [0, np.nan], "B": Series(list("ae")).astype(CategoricalDtype(cats))}
).set_index("B")
tm.assert_frame_equal(result, expected, check_index_type=True)
result = df.reindex(Categorical(["a"], categories=cats))
expected = DataFrame(
{"A": [0], "B": Series(list("a")).astype(CategoricalDtype(cats))}
).set_index("B")
tm.assert_frame_equal(result, expected, check_index_type=True)
result = df.reindex(["a", "b", "e"])
expected = DataFrame({"A": [0, 1, np.nan], "B": Series(list("abe"))}).set_index(
"B"
)
tm.assert_frame_equal(result, expected, check_index_type=True)
result = df.reindex(["a", "b"])
expected = DataFrame({"A": [0, 1], "B": Series(list("ab"))}).set_index("B")
tm.assert_frame_equal(result, expected, check_index_type=True)
result = df.reindex(["e"])
expected = DataFrame({"A": [np.nan], "B": Series(["e"])}).set_index("B")
tm.assert_frame_equal(result, expected, check_index_type=True)
# give back the type of categorical that we received
result = df.reindex(Categorical(["a", "e"], categories=cats, ordered=True))
expected = DataFrame(
{
"A": [0, np.nan],
"B": Series(list("ae")).astype(CategoricalDtype(cats, ordered=True)),
}
).set_index("B")
tm.assert_frame_equal(result, expected, check_index_type=True)
result = df.reindex(Categorical(["a", "d"], categories=["a", "d"]))
expected = DataFrame(
{
"A": [0, np.nan],
"B": Series(list("ad")).astype(CategoricalDtype(["a", "d"])),
}
).set_index("B")
tm.assert_frame_equal(result, expected, check_index_type=True)
df2 = DataFrame(
{
"A": np.arange(6, dtype="int64"),
},
index=CategoricalIndex(
list("aabbca"), dtype=CategoricalDtype(list("cabe")), name="B"
),
)
# passed duplicate indexers are not allowed
msg = "cannot reindex on an axis with duplicate labels"
with pytest.raises(ValueError, match=msg):
df2.reindex(["a", "b"])
# args NotImplemented ATM
msg = r"argument {} is not implemented for CategoricalIndex\.reindex"
with pytest.raises(NotImplementedError, match=msg.format("method")):
df.reindex(["a"], method="ffill")
with pytest.raises(NotImplementedError, match=msg.format("level")):
df.reindex(["a"], level=1)
with pytest.raises(NotImplementedError, match=msg.format("limit")):
df.reindex(["a"], limit=2)
def test_reindex_signature(self):
sig = inspect.signature(DataFrame.reindex)
parameters = set(sig.parameters)
assert parameters == {
"self",
"labels",
"index",
"columns",
"axis",
"limit",
"copy",
"level",
"method",
"fill_value",
"tolerance",
}
def test_reindex_multiindex_ffill_added_rows(self):
# GH#23693
# reindex added rows with nan values even when fill method was specified
mi = MultiIndex.from_tuples([("a", "b"), ("d", "e")])
df = DataFrame([[0, 7], [3, 4]], index=mi, columns=["x", "y"])
mi2 = MultiIndex.from_tuples([("a", "b"), ("d", "e"), ("h", "i")])
result = df.reindex(mi2, axis=0, method="ffill")
expected = DataFrame([[0, 7], [3, 4], [3, 4]], index=mi2, columns=["x", "y"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"kwargs",
[
{"method": "pad", "tolerance": timedelta(seconds=9)},
{"method": "backfill", "tolerance": timedelta(seconds=9)},
{"method": "nearest"},
{"method": None},
],
)
def test_reindex_empty_frame(self, kwargs):
# GH#27315
idx = date_range(start="2020", freq="30s", periods=3)
df = DataFrame([], index=Index([], name="time"), columns=["a"])
result = df.reindex(idx, **kwargs)
expected = DataFrame({"a": [np.nan] * 3}, index=idx, dtype=object)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("src_idx", [Index, CategoricalIndex])
@pytest.mark.parametrize(
"cat_idx",
[
# No duplicates
Index([]),
CategoricalIndex([]),
Index(["A", "B"]),
CategoricalIndex(["A", "B"]),
# Duplicates: GH#38906
Index(["A", "A"]),
CategoricalIndex(["A", "A"]),
],
)
def test_reindex_empty(self, src_idx, cat_idx):
df = DataFrame(columns=src_idx([]), index=["K"], dtype="f8")
result = df.reindex(columns=cat_idx)
expected = DataFrame(index=["K"], columns=cat_idx, dtype="f8")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("dtype", ["m8[ns]", "M8[ns]"])
def test_reindex_datetimelike_to_object(self, dtype):
# GH#39755 dont cast dt64/td64 to ints
mi = MultiIndex.from_product([list("ABCDE"), range(2)])
dti = date_range("2016-01-01", periods=10)
fv = np.timedelta64("NaT", "ns")
if dtype == "m8[ns]":
dti = dti - dti[0]
fv = np.datetime64("NaT", "ns")
ser = Series(dti, index=mi)
ser[::3] = pd.NaT
df = ser.unstack()
index = df.index.append(Index([1]))
columns = df.columns.append(Index(["foo"]))
res = df.reindex(index=index, columns=columns, fill_value=fv)
expected = DataFrame(
{
0: df[0].tolist() + [fv],
1: df[1].tolist() + [fv],
"foo": np.array(["NaT"] * 6, dtype=fv.dtype),
},
index=index,
)
assert (res.dtypes[[0, 1]] == object).all()
assert res.iloc[0, 0] is pd.NaT
assert res.iloc[-1, 0] is fv
assert res.iloc[-1, 1] is fv
tm.assert_frame_equal(res, expected)
@pytest.mark.parametrize("klass", [Index, CategoricalIndex])
@pytest.mark.parametrize("data", ["A", "B"])
def test_reindex_not_category(self, klass, data):
# GH#28690
df = DataFrame(index=CategoricalIndex([], categories=["A"]))
idx = klass([data])
result = df.reindex(index=idx)
expected = DataFrame(index=idx)
tm.assert_frame_equal(result, expected)
def test_invalid_method(self):
df = DataFrame({"A": [1, np.nan, 2]})
msg = "Invalid fill method"
with pytest.raises(ValueError, match=msg):
df.reindex([1, 0, 2], method="asfreq")
def test_reindex_index_name_matches_multiindex_level(self):
df = DataFrame(
{"value": [1, 2], "other": ["A", "B"]},
index=Index([10, 20], name="a"),
)
target = MultiIndex.from_product(
[[10, 20], ["x", "y"]],
names=["a", "b"],
)
result = df.reindex(index=target)
expected = DataFrame(
data={"value": [1, 1, 2, 2], "other": ["A", "A", "B", "B"]},
index=MultiIndex.from_product([[10, 20], ["x", "y"]], names=["a", "b"]),
)
tm.assert_frame_equal(result, expected)
def test_reindex_index_name_no_match_multiindex_level(self):
df = DataFrame({"value": [1, 2]}, index=Index([10, 20], name="different_name"))
target = MultiIndex.from_product([[10, 20], ["x", "y"]], names=["a", "b"])
result = df.reindex(index=target)
expected = DataFrame(
data={"value": [np.nan] * 4},
index=MultiIndex.from_product([[10, 20], ["x", "y"]], names=["a", "b"]),
)
tm.assert_frame_equal(result, expected)
| TestDataFrameSelectReindex |
python | readthedocs__readthedocs.org | readthedocs/rtd_tests/tests/test_extend.py | {
"start": 291,
"end": 524
} | class ____(FooBase):
def bar(self):
return 2
@classmethod
def baz(cls):
return 2
EXTEND_PATH = __name__ + ".Foo"
EXTEND_BASE_PATH = __name__ + ".FooBase"
EXTEND_OVERRIDE_PATH = __name__ + ".NewFoo"
| NewFoo |
python | weaviate__weaviate-python-client | weaviate/embedded.py | {
"start": 834,
"end": 1386
} | class ____:
persistence_data_path: str = os.environ.get("XDG_DATA_HOME", DEFAULT_PERSISTENCE_DATA_PATH)
binary_path: str = os.environ.get("XDG_CACHE_HOME", DEFAULT_BINARY_PATH)
version: str = WEAVIATE_VERSION
port: int = DEFAULT_PORT
hostname: str = "127.0.0.1"
additional_env_vars: Optional[Dict[str, str]] = None
grpc_port: int = DEFAULT_GRPC_PORT
def get_random_port() -> int:
sock = socket.socket()
sock.bind(("", 0))
port_num = int(sock.getsockname()[1])
sock.close()
return port_num
| EmbeddedOptions |
python | great-expectations__great_expectations | great_expectations/expectations/metrics/column_map_metrics/column_values_non_null.py | {
"start": 874,
"end": 1446
} | class ____(ColumnMapMetricProvider):
condition_metric_name = "column_values.nonnull"
filter_column_isnull = False
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return ~column.isnull()
@column_condition_partial(engine=SqlAlchemyExecutionEngine)
def _sqlalchemy(cls, column, **kwargs):
return column != None # noqa: E711 # FIXME CoP
@column_condition_partial(engine=SparkDFExecutionEngine)
def _spark(cls, column, **kwargs):
return column.isNotNull()
| ColumnValuesNonNull |
python | apache__airflow | airflow-core/tests/unit/cluster_policies/__init__.py | {
"start": 3202,
"end": 3733
} | class ____(BaseOperator, ABC):
timeout: timedelta
def task_policy(task: TimedOperator):
if task.task_type == "HivePartitionSensor":
task.queue = "sensor_queue"
if task.timeout > timedelta(hours=48):
task.timeout = timedelta(hours=48)
# [END example_task_cluster_policy]
# [START example_task_mutation_hook]
def task_instance_mutation_hook(task_instance: TaskInstance):
if task_instance.try_number >= 1:
task_instance.queue = "retry_queue"
# [END example_task_mutation_hook]
| TimedOperator |
python | huggingface__transformers | tests/quantization/hqq/test_hqq.py | {
"start": 7694,
"end": 10218
} | class ____(unittest.TestCase):
def tearDown(self):
cleanup()
def test_model_serialization(self):
"""
Simple HQQ LLM save/load test
"""
quant_config = HqqConfig(nbits=4, group_size=64)
hqq_runner = HQQLLMRunner(
model_id=MODEL_ID, quant_config=quant_config, compute_dtype=torch.float16, device=torch_device
)
input_tensor = torch.zeros((1, 8), dtype=torch.int32, device=torch_device)
with torch.no_grad():
logits_ref = hqq_runner.model.forward(input_tensor).logits
# Save
saved_model_id = "quant_model"
hqq_runner.model.save_pretrained(saved_model_id)
# Remove old model
del hqq_runner.model
backend_empty_cache(torch_device)
# Load and check if the logits match
model_loaded = AutoModelForCausalLM.from_pretrained(
"quant_model",
dtype=torch.float16,
device_map=torch_device,
)
with torch.no_grad():
logits_loaded = model_loaded.forward(input_tensor).logits
self.assertEqual((logits_loaded - logits_ref).abs().mean().item(), 0)
def test_model_serialization_dynamic_quant_with_skip(self):
"""
Simple HQQ LLM save/load test with dynamic quant
"""
q4_config = {"nbits": 4, "group_size": 64}
q3_config = {"nbits": 3, "group_size": 64}
quant_config = HqqConfig(
dynamic_config={
"self_attn.q_proj": q4_config,
"self_attn.k_proj": q4_config,
"self_attn.v_proj": q4_config,
"self_attn.o_proj": q4_config,
"mlp.gate_proj": q3_config,
"mlp.up_proj": q3_config,
},
skip_modules=["lm_head", "down_proj"],
)
hqq_runner = HQQLLMRunner(
model_id=MODEL_ID, quant_config=quant_config, compute_dtype=torch.float16, device=torch_device
)
model = hqq_runner.model
input_tensor = torch.zeros((1, 8), dtype=torch.int32, device=torch_device)
with torch.no_grad():
model.forward(input_tensor).logits
self.assertEqual(isinstance(model.model.layers[1].mlp.down_proj, torch.nn.Linear), True)
self.assertEqual(model.model.layers[1].self_attn.v_proj.quant_config["weight_quant_params"]["nbits"], 4)
self.assertEqual(model.model.layers[1].mlp.gate_proj.quant_config["weight_quant_params"]["nbits"], 3)
| HQQSerializationTest |
python | getsentry__sentry | src/sentry/mail/actions.py | {
"start": 624,
"end": 3111
} | class ____(EventAction):
id = "sentry.mail.actions.NotifyEmailAction"
label = "Send a notification to {targetType} and if none can be found then send a notification to {fallthroughType}"
prompt = "Send a notification"
metrics_slug = "EmailAction"
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.form_fields = {
"targetType": {"type": "mailAction", "choices": ACTION_CHOICES},
"fallthroughType": {"type": "choice", "choices": FALLTHROUGH_CHOICES},
}
def render_label(self) -> str:
if "fallthroughType" not in self.data:
self.data = {**self.data, "fallthroughType": FallthroughChoiceType.ACTIVE_MEMBERS.value}
return self.label.format(**self.data)
def after(
self, event: GroupEvent | Event, notification_uuid: str | None = None
) -> Generator[CallbackFuture]:
group = event.group
assert group is not None
extra = {
"event_id": event.event_id,
"group_id": group.id,
"notification_uuid": notification_uuid,
}
target_type = ActionTargetType(self.data["targetType"])
target_identifier = self.data.get("targetIdentifier", None)
skip_digests = self.data.get("skipDigests", False)
fallthrough_choice = self.data.get("fallthroughType", None)
fallthrough_type = (
FallthroughChoiceType(fallthrough_choice)
if fallthrough_choice
else FallthroughChoiceType.ACTIVE_MEMBERS
)
if not determine_eligible_recipients(
group.project, target_type, target_identifier, event, fallthrough_type
):
self.logger.info("rule.fail.should_notify", extra=extra)
return None
metrics.incr(
"notifications.sent",
instance=self.metrics_slug,
tags={
"issue_type": group.issue_type.slug,
},
skip_internal=False,
)
yield self.future(
lambda event, futures: mail_adapter.rule_notify(
event,
futures,
target_type,
target_identifier,
fallthrough_type,
skip_digests,
notification_uuid,
)
)
def get_form_instance(self) -> NotifyEmailForm:
return NotifyEmailForm(self.project, self.data)
| NotifyEmailAction |
python | kamyu104__LeetCode-Solutions | Python/max-value-of-equation.py | {
"start": 50,
"end": 650
} | class ____(object):
def findMaxValueOfEquation(self, points, k):
"""
:type points: List[List[int]]
:type k: int
:rtype: int
"""
result = float("-inf")
dq = collections.deque()
for i, (x, y) in enumerate(points):
while dq and points[dq[0]][0] < x-k:
dq.popleft()
if dq:
result = max(result, (points[dq[0]][1]-points[dq[0]][0])+y+x)
while dq and points[dq[-1]][1]-points[dq[-1]][0] <= y-x:
dq.pop()
dq.append(i)
return result
| Solution |
python | huggingface__transformers | src/transformers/models/speech_to_text/modeling_speech_to_text.py | {
"start": 21229,
"end": 22890
} | class ____(PreTrainedModel):
config: Speech2TextConfig
base_model_prefix = "model"
main_input_name = "input_features"
supports_gradient_checkpointing = True
# TODO: tests would need a rewrite to check for correct implementation
# Current tests always assume certain inputs to be passed
_supports_flash_attn = False
_supports_sdpa = False
_supports_flex_attn = False
def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor):
"""
Computes the output length of the convolutional layers
"""
for i in range(self.config.num_conv_layers):
input_lengths = (input_lengths - 1) // 2 + 1
return input_lengths
def _get_feature_vector_attention_mask(self, feature_vector_length, attention_mask):
# generate creates 3D attention mask, because of the shape of input_features
# convert it to 2D if that's the case
if len(attention_mask.shape) > 2:
attention_mask = attention_mask[:, :, -1]
subsampled_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1))
bsz = attention_mask.size()[0]
attention_mask = torch.zeros(
(bsz, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device
)
# these two operations makes sure that all values
# before the output lengths indices are attended to
attention_mask[(torch.arange(bsz, device=attention_mask.device), subsampled_lengths - 1)] = 1
attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).long()
return attention_mask
| Speech2TextPreTrainedModel |
python | psf__requests | src/requests/structures.py | {
"start": 170,
"end": 2470
} | class ____(MutableMapping):
"""A case-insensitive ``dict``-like object.
Implements all methods and operations of
``MutableMapping`` as well as dict's ``copy``. Also
provides ``lower_items``.
All keys are expected to be strings. The structure remembers the
case of the last key to be set, and ``iter(instance)``,
``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
will contain case-sensitive keys. However, querying and contains
testing is case insensitive::
cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json'
cid['aCCEPT'] == 'application/json' # True
list(cid) == ['Accept'] # True
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header, regardless
of how the header name was originally stored.
If the constructor, ``.update``, or equality comparison
operations are given keys that have equal ``.lower()``s, the
behavior is undefined.
"""
def __init__(self, data=None, **kwargs):
self._store = OrderedDict()
if data is None:
data = {}
self.update(data, **kwargs)
def __setitem__(self, key, value):
# Use the lowercased key for lookups, but store the actual
# key alongside the value.
self._store[key.lower()] = (key, value)
def __getitem__(self, key):
return self._store[key.lower()][1]
def __delitem__(self, key):
del self._store[key.lower()]
def __iter__(self):
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self):
return len(self._store)
def lower_items(self):
"""Like iteritems(), but with all lowercase keys."""
return ((lowerkey, keyval[1]) for (lowerkey, keyval) in self._store.items())
def __eq__(self, other):
if isinstance(other, Mapping):
other = CaseInsensitiveDict(other)
else:
return NotImplemented
# Compare insensitively
return dict(self.lower_items()) == dict(other.lower_items())
# Copy is required
def copy(self):
return CaseInsensitiveDict(self._store.values())
def __repr__(self):
return str(dict(self.items()))
| CaseInsensitiveDict |
python | walkccc__LeetCode | solutions/788. Rotated Digits/788.py | {
"start": 0,
"end": 411
} | class ____:
def rotatedDigits(self, n: int) -> int:
def isGoodNumber(i: int) -> bool:
isRotated = False
for c in str(i):
if c == '0' or c == '1' or c == '8':
continue
if c == '2' or c == '5' or c == '6' or c == '9':
isRotated = True
else:
return False
return isRotated
return sum(isGoodNumber(i) for i in range(1, n + 1))
| Solution |
python | chroma-core__chroma | chromadb/api/types.py | {
"start": 59795,
"end": 59896
} | class ____:
float_inverted_index: Optional[FloatInvertedIndexType] = None
@dataclass
| FloatValueType |
python | Netflix__metaflow | metaflow/_vendor/click/exceptions.py | {
"start": 1076,
"end": 2200
} | class ____(ClickException):
"""An internal exception that signals a usage error. This typically
aborts any further handling.
:param message: the error message to display.
:param ctx: optionally the context that caused this error. Click will
fill in the context automatically in some situations.
"""
exit_code = 2
def __init__(self, message, ctx=None):
ClickException.__init__(self, message)
self.ctx = ctx
self.cmd = self.ctx.command if self.ctx else None
def show(self, file=None):
if file is None:
file = get_text_stderr()
color = None
hint = ""
if self.cmd is not None and self.cmd.get_help_option(self.ctx) is not None:
hint = "Try '{} {}' for help.\n".format(
self.ctx.command_path, self.ctx.help_option_names[0]
)
if self.ctx is not None:
color = self.ctx.color
echo("{}\n{}".format(self.ctx.get_usage(), hint), file=file, color=color)
echo("Error: {}".format(self.format_message()), file=file, color=color)
| UsageError |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/prefect_databricks/models/jobs.py | {
"start": 73544,
"end": 73813
} | class ____(BaseModel):
"""
See source code for the fields' description.
"""
model_config = ConfigDict(extra="allow", frozen=True)
permission_level: Optional[PermissionLevel] = None
user_name: Optional[UserName] = None
| AccessControlRequestForUser |
python | tox-dev__tox | src/tox/config/sets.py | {
"start": 9647,
"end": 11426
} | class ____(ConfigSet):
"""Configuration set for a tox environment."""
def __init__(self, conf: Config, section: Section, env_name: str) -> None:
super().__init__(conf, section, env_name)
self.default_set_env_loader: Callable[[], Mapping[str, str]] = dict
def register_config(self) -> None:
def set_env_post_process(values: SetEnv) -> SetEnv:
values.update(self.default_set_env_loader(), override=False)
values.update({"PYTHONIOENCODING": "utf-8"}, override=True)
return values
def set_env_factory(raw: object) -> SetEnv:
if not (
isinstance(raw, str)
or (isinstance(raw, dict) and all(isinstance(k, str) and isinstance(v, str) for k, v in raw.items()))
or (
isinstance(raw, list)
and all(
isinstance(e, dict) and all(isinstance(k, str) and isinstance(v, str) for k, v in e.items())
for e in raw
)
)
):
raise TypeError(raw)
return SetEnv(raw, self.name, self.env_name, root)
root = self._conf.core["tox_root"]
self.add_config(
keys=["set_env", "setenv"],
of_type=SetEnv,
factory=set_env_factory,
default=SetEnv("", self.name, self.env_name, root),
desc="environment variables to set when running commands in the tox environment",
post_process=set_env_post_process,
)
def __repr__(self) -> str:
return f"{self.__class__.__name__}(name={self._env_name!r}, loaders={self.loaders!r})"
__all__ = (
"ConfigSet",
"CoreConfigSet",
"EnvConfigSet",
)
| EnvConfigSet |
python | apache__airflow | airflow-core/tests/integration/otel/test_otel.py | {
"start": 19502,
"end": 49472
} | class ____:
"""
This test is using a ConsoleSpanExporter so that it can capture
the spans from the stdout and run assertions on them.
It can also be used with otel and jaeger for manual testing.
To export the spans to otel and visualize them with jaeger,
- start breeze with '--integration otel'
- run on the shell 'export use_otel=true'
- run the test
- check 'http://localhost:36686/'
To get a db dump on the stdout, run 'export log_level=debug'.
"""
test_dir = os.path.dirname(os.path.abspath(__file__))
dag_folder = os.path.join(test_dir, "dags")
control_file = os.path.join(dag_folder, "dag_control.txt")
max_wait_seconds_for_pause = 180
use_otel = os.getenv("use_otel", default="false")
log_level = os.getenv("log_level", default="none")
celery_command_args = [
"celery",
"--app",
"airflow.providers.celery.executors.celery_executor.app",
"worker",
"--concurrency",
"1",
"--loglevel",
"INFO",
]
scheduler_command_args = [
"airflow",
"scheduler",
]
apiserver_command_args = [
"airflow",
"api-server",
"--port",
"8080",
"--daemon",
]
dags: dict[str, DAG] = {}
@classmethod
def setup_class(cls):
os.environ["AIRFLOW__TRACES__OTEL_ON"] = "True"
os.environ["AIRFLOW__TRACES__OTEL_HOST"] = "breeze-otel-collector"
os.environ["AIRFLOW__TRACES__OTEL_PORT"] = "4318"
if cls.use_otel != "true":
os.environ["AIRFLOW__TRACES__OTEL_DEBUGGING_ON"] = "True"
os.environ["AIRFLOW__SCHEDULER__STANDALONE_DAG_PROCESSOR"] = "False"
os.environ["AIRFLOW__SCHEDULER__PROCESSOR_POLL_INTERVAL"] = "2"
# The heartrate is determined by the conf "AIRFLOW__SCHEDULER__SCHEDULER_HEARTBEAT_SEC".
# By default, the heartrate is 5 seconds. Every iteration of the scheduler loop, checks the
# time passed since the last heartbeat and if it was longer than the 5 second heartrate,
# it performs a heartbeat update.
# If there hasn't been a heartbeat for an amount of time longer than the
# SCHEDULER_HEALTH_CHECK_THRESHOLD, then the scheduler is considered unhealthy.
# Approximately, there is a scheduler heartbeat every 5-6 seconds. Set the threshold to 15.
os.environ["AIRFLOW__SCHEDULER__SCHEDULER_HEALTH_CHECK_THRESHOLD"] = "15"
os.environ["AIRFLOW__CORE__DAGS_FOLDER"] = f"{cls.dag_folder}"
os.environ["AIRFLOW__CORE__LOAD_EXAMPLES"] = "False"
os.environ["AIRFLOW__CORE__PLUGINS_FOLDER"] = "/dev/null"
os.environ["AIRFLOW__CORE__UNIT_TEST_MODE"] = "False"
if cls.log_level == "debug":
log.setLevel(logging.DEBUG)
# Reset the DB once at the beginning and serialize the dags.
reset_command = ["airflow", "db", "reset", "--yes"]
subprocess.run(reset_command, check=True, env=os.environ.copy())
migrate_command = ["airflow", "db", "migrate"]
subprocess.run(migrate_command, check=True, env=os.environ.copy())
cls.dags = cls.serialize_and_get_dags()
@classmethod
def serialize_and_get_dags(cls) -> dict[str, SerializedDAG]:
log.info("Serializing Dags from directory %s", cls.dag_folder)
# Load DAGs from the dag directory.
dag_bag = DagBag(dag_folder=cls.dag_folder, include_examples=False)
dag_ids = dag_bag.dag_ids
assert len(dag_ids) == 3
dag_dict: dict[str, SerializedDAG] = {}
with create_session() as session:
for dag_id in dag_ids:
dag = dag_bag.get_dag(dag_id)
assert dag is not None, f"DAG with ID {dag_id} not found."
# Sync the DAG to the database.
if AIRFLOW_V_3_0_PLUS:
from airflow.models.dagbundle import DagBundleModel
if session.query(DagBundleModel).filter(DagBundleModel.name == "testing").count() == 0:
session.add(DagBundleModel(name="testing"))
session.commit()
SerializedDAG.bulk_write_to_db(
bundle_name="testing", bundle_version=None, dags=[dag], session=session
)
dag_dict[dag_id] = SerializedDAG.deserialize_dag(SerializedDAG.serialize_dag(dag))
else:
dag.sync_to_db(session=session)
dag_dict[dag_id] = dag
# Manually serialize the dag and write it to the db to avoid a db error.
if AIRFLOW_V_3_1_PLUS:
from airflow.serialization.serialized_objects import LazyDeserializedDAG
SerializedDagModel.write_dag(
LazyDeserializedDAG.from_dag(dag), bundle_name="testing", session=session
)
else:
SerializedDagModel.write_dag(dag, bundle_name="testing", session=session)
session.commit()
TESTING_BUNDLE_CONFIG = [
{
"name": "testing",
"classpath": "airflow.dag_processing.bundles.local.LocalDagBundle",
"kwargs": {"path": f"{cls.dag_folder}", "refresh_interval": 1},
}
]
os.environ["AIRFLOW__DAG_PROCESSOR__DAG_BUNDLE_CONFIG_LIST"] = json.dumps(TESTING_BUNDLE_CONFIG)
# Initial add
manager = DagBundlesManager()
manager.sync_bundles_to_db()
return dag_dict
@pytest.fixture
def celery_worker_env_vars(self, monkeypatch):
os.environ["AIRFLOW__CORE__EXECUTOR"] = "CeleryExecutor"
executor_name = ExecutorName(
module_path="airflow.providers.celery.executors.celery_executor.CeleryExecutor",
alias="CeleryExecutor",
)
monkeypatch.setattr(
executor_loader, "_alias_to_executors_per_team", {None: {"CeleryExecutor": executor_name}}
)
@pytest.fixture(autouse=True)
def cleanup_control_file_if_needed(self):
# Don't do anything before yield.
# This will run after each test and clean up the control file in case of failure.
yield
try:
if os.path.exists(self.control_file):
os.remove(self.control_file)
except Exception as ex:
log.error("Could not delete leftover control file '%s', error: '%s'.", self.control_file, ex)
@pytest.mark.execution_timeout(90)
def test_dag_execution_succeeds(self, monkeypatch, celery_worker_env_vars, capfd, session):
"""The same scheduler will start and finish the dag processing."""
celery_worker_process = None
scheduler_process = None
apiserver_process = None
try:
# Start the processes here and not as fixtures or in a common setup,
# so that the test can capture their output.
celery_worker_process, scheduler_process, apiserver_process = self.start_worker_and_scheduler1()
dag_id = "otel_test_dag"
assert len(self.dags) > 0
dag = self.dags[dag_id]
assert dag is not None
run_id = unpause_trigger_dag_and_get_run_id(dag_id=dag_id)
# Skip the span_status check.
wait_for_dag_run_and_check_span_status(
dag_id=dag_id, run_id=run_id, max_wait_time=90, span_status=None
)
# The ti span_status is updated while processing the executor events,
# which is after the dag_run state has been updated.
time.sleep(10)
with create_session() as session:
task_ids = session.scalars(select(TaskInstance.task_id).where(TaskInstance.dag_id == dag_id))
for task_id in task_ids:
# Skip the span_status check.
check_ti_state_and_span_status(
task_id=task_id, run_id=run_id, state=State.SUCCESS, span_status=None
)
print_ti_output_for_dag_run(dag_id=dag_id, run_id=run_id)
finally:
if self.log_level == "debug":
with create_session() as session:
dump_airflow_metadata_db(session)
# Terminate the processes.
celery_worker_process.terminate()
celery_worker_process.wait()
celery_status = celery_worker_process.poll()
assert celery_status is not None, (
"The celery worker process status is None, which means that it hasn't terminated as expected."
)
scheduler_process.terminate()
scheduler_process.wait()
scheduler_status = scheduler_process.poll()
assert scheduler_status is not None, (
"The scheduler_1 process status is None, which means that it hasn't terminated as expected."
)
apiserver_process.terminate()
apiserver_process.wait()
apiserver_status = apiserver_process.poll()
assert apiserver_status is not None, (
"The apiserver process status is None, which means that it hasn't terminated as expected."
)
out, err = capfd.readouterr()
log.info("out-start --\n%s\n-- out-end", out)
log.info("err-start --\n%s\n-- err-end", err)
@pytest.mark.execution_timeout(90)
def test_same_scheduler_processing_the_entire_dag(
self, monkeypatch, celery_worker_env_vars, capfd, session
):
"""The same scheduler will start and finish the dag processing."""
celery_worker_process = None
scheduler_process = None
apiserver_process = None
try:
# Start the processes here and not as fixtures or in a common setup,
# so that the test can capture their output.
celery_worker_process, scheduler_process, apiserver_process = self.start_worker_and_scheduler1()
dag_id = "otel_test_dag"
assert len(self.dags) > 0
dag = self.dags[dag_id]
assert dag is not None
run_id = unpause_trigger_dag_and_get_run_id(dag_id=dag_id)
wait_for_dag_run_and_check_span_status(
dag_id=dag_id, run_id=run_id, max_wait_time=90, span_status=SpanStatus.ENDED
)
# The ti span_status is updated while processing the executor events,
# which is after the dag_run state has been updated.
time.sleep(10)
with create_session() as session:
for ti in session.scalars(select(TaskInstance).where(TaskInstance.dag_id == dag.dag_id)):
check_ti_state_and_span_status(
task_id=ti.task_id, run_id=run_id, state=State.SUCCESS, span_status=SpanStatus.ENDED
)
print_ti_output_for_dag_run(dag_id=dag_id, run_id=run_id)
finally:
if self.log_level == "debug":
with create_session() as session:
dump_airflow_metadata_db(session)
# Terminate the processes.
celery_worker_process.terminate()
celery_worker_process.wait()
celery_status = celery_worker_process.poll()
assert celery_status is not None, (
"The celery worker process status is None, which means that it hasn't terminated as expected."
)
scheduler_process.terminate()
scheduler_process.wait()
scheduler_status = scheduler_process.poll()
assert scheduler_status is not None, (
"The scheduler_1 process status is None, which means that it hasn't terminated as expected."
)
apiserver_process.terminate()
apiserver_process.wait()
apiserver_status = apiserver_process.poll()
assert apiserver_status is not None, (
"The apiserver process status is None, which means that it hasn't terminated as expected."
)
out, err = capfd.readouterr()
log.info("out-start --\n%s\n-- out-end", out)
log.info("err-start --\n%s\n-- err-end", err)
if self.use_otel != "true":
# Dag run should have succeeded. Test the spans from the output.
check_spans_without_continuance(output=out, dag=dag)
@pytest.mark.execution_timeout(90)
def test_scheduler_change_after_the_first_task_finishes(
self, monkeypatch, celery_worker_env_vars, capfd, session
):
"""
The scheduler thread will be paused after the first task ends and a new scheduler process
will handle the rest of the dag processing. The paused thread will be resumed afterwards.
"""
# For this test, scheduler1 must be idle but still considered healthy by scheduler2.
# If scheduler2 marks the job as unhealthy, then it will recreate scheduler1's spans
# because it will consider them lost.
os.environ["AIRFLOW__SCHEDULER__SCHEDULER_HEALTH_CHECK_THRESHOLD"] = "90"
celery_worker_process = None
scheduler_process_1 = None
apiserver_process = None
scheduler_process_2 = None
try:
# Start the processes here and not as fixtures or in a common setup,
# so that the test can capture their output.
celery_worker_process, scheduler_process_1, apiserver_process = self.start_worker_and_scheduler1()
dag_id = "otel_test_dag_with_pause_between_tasks"
dag = self.dags[dag_id]
run_id = unpause_trigger_dag_and_get_run_id(dag_id=dag_id)
deadline = time.monotonic() + self.max_wait_seconds_for_pause
while True:
# To avoid get stuck waiting.
if time.monotonic() > deadline:
raise TimeoutError(
f"Timed out waiting for 'pause' to appear in {self.control_file}, after {self.max_wait_seconds_for_pause} seconds."
)
try:
with open(self.control_file) as file:
file_contents = file.read()
if "pause" in file_contents:
log.info("Control file exists and the task has been paused.")
break
time.sleep(1)
continue
except FileNotFoundError:
print("Control file not found. Waiting...")
time.sleep(3)
continue
with capfd.disabled():
# When the scheduler1 thread is paused, capfd keeps trying to read the
# file descriptors for the process and ends up freezing the test.
# Temporarily disable capfd to avoid that.
scheduler_process_1.send_signal(signal.SIGSTOP)
check_dag_run_state_and_span_status(
dag_id=dag_id, run_id=run_id, state=State.RUNNING, span_status=SpanStatus.ACTIVE
)
# Start the 2nd scheduler immediately without any delay to avoid having the 1st scheduler
# marked as unhealthy. If that happens, then the 2nd will recreate the spans that the
# 1st scheduler started.
# The scheduler would also be considered unhealthy in case it was paused
# and the dag run continued running.
scheduler_process_2 = subprocess.Popen(
self.scheduler_command_args,
env=os.environ.copy(),
stdout=None,
stderr=None,
)
# Rewrite the file to unpause the dag.
with open(self.control_file, "w") as file:
file.write("continue")
wait_for_dag_run_and_check_span_status(
dag_id=dag_id, run_id=run_id, max_wait_time=120, span_status=SpanStatus.SHOULD_END
)
# Stop scheduler2 in case it still has a db lock on the dag_run.
scheduler_process_2.terminate()
scheduler_process_1.send_signal(signal.SIGCONT)
# Wait for the scheduler to start again and continue running.
time.sleep(10)
wait_for_dag_run_and_check_span_status(
dag_id=dag_id, run_id=run_id, max_wait_time=30, span_status=SpanStatus.ENDED
)
print_ti_output_for_dag_run(dag_id=dag_id, run_id=run_id)
finally:
if self.log_level == "debug":
with create_session() as session:
dump_airflow_metadata_db(session)
# Reset for the rest of the tests.
os.environ["AIRFLOW__SCHEDULER__SCHEDULER_HEALTH_CHECK_THRESHOLD"] = "15"
# Terminate the processes.
celery_worker_process.terminate()
celery_worker_process.wait()
scheduler_process_1.terminate()
scheduler_process_1.wait()
apiserver_process.terminate()
apiserver_process.wait()
scheduler_process_2.wait()
out, err = capfd.readouterr()
log.info("out-start --\n%s\n-- out-end", out)
log.info("err-start --\n%s\n-- err-end", err)
if self.use_otel != "true":
# Dag run should have succeeded. Test the spans in the output.
check_spans_for_paused_dag(output=out, dag=dag, is_recreated=False, check_t1_sub_spans=False)
@pytest.mark.execution_timeout(90)
def test_scheduler_exits_gracefully_in_the_middle_of_the_first_task(
self, monkeypatch, celery_worker_env_vars, capfd, session
):
"""
The scheduler that starts the dag run will be stopped, while the first task is executing,
and start a new scheduler will be started. That way, the new process will pick up the dag processing.
The initial scheduler will exit gracefully.
"""
celery_worker_process = None
apiserver_process = None
scheduler_process_2 = None
try:
# Start the processes here and not as fixtures or in a common setup,
# so that the test can capture their output.
celery_worker_process, scheduler_process_1, apiserver_process = self.start_worker_and_scheduler1()
dag_id = "otel_test_dag_with_pause_in_task"
dag = self.dags[dag_id]
run_id = unpause_trigger_dag_and_get_run_id(dag_id=dag_id)
deadline = time.monotonic() + self.max_wait_seconds_for_pause
while True:
# To avoid get stuck waiting.
if time.monotonic() > deadline:
raise TimeoutError(
f"Timed out waiting for 'pause' to appear in {self.control_file}, after {self.max_wait_seconds_for_pause} seconds."
)
try:
with open(self.control_file) as file:
file_contents = file.read()
if "pause" in file_contents:
log.info("Control file exists and the task has been paused.")
break
time.sleep(1)
continue
except FileNotFoundError:
print("Control file not found. Waiting...")
time.sleep(3)
continue
# Since, we are past the loop, then the file exists and the dag has been paused.
# Terminate scheduler1 and start scheduler2.
with capfd.disabled():
scheduler_process_1.terminate()
assert scheduler_process_1.wait() == 0
check_dag_run_state_and_span_status(
dag_id=dag_id, run_id=run_id, state=State.RUNNING, span_status=SpanStatus.NEEDS_CONTINUANCE
)
scheduler_process_2 = subprocess.Popen(
self.scheduler_command_args,
env=os.environ.copy(),
stdout=None,
stderr=None,
)
# Rewrite the file to unpause the dag.
with open(self.control_file, "w") as file:
file.write("continue")
wait_for_dag_run_and_check_span_status(
dag_id=dag_id, run_id=run_id, max_wait_time=120, span_status=SpanStatus.ENDED
)
print_ti_output_for_dag_run(dag_id=dag_id, run_id=run_id)
finally:
if self.log_level == "debug":
with create_session() as session:
dump_airflow_metadata_db(session)
# Terminate the processes.
celery_worker_process.terminate()
celery_worker_process.wait()
apiserver_process.terminate()
apiserver_process.wait()
scheduler_process_2.terminate()
scheduler_process_2.wait()
out, err = capfd.readouterr()
log.info("out-start --\n%s\n-- out-end", out)
log.info("err-start --\n%s\n-- err-end", err)
if self.use_otel != "true":
# Dag run should have succeeded. Test the spans in the output.
check_spans_with_continuance(output=out, dag=dag)
@pytest.mark.execution_timeout(90)
def test_scheduler_exits_forcefully_in_the_middle_of_the_first_task(
self, monkeypatch, celery_worker_env_vars, capfd, session
):
"""
The first scheduler will exit forcefully while the first task is running,
so that it won't have time end any active spans.
"""
celery_worker_process = None
scheduler_process_2 = None
apiserver_process = None
try:
# Start the processes here and not as fixtures or in a common setup,
# so that the test can capture their output.
celery_worker_process, scheduler_process_1, apiserver_process = self.start_worker_and_scheduler1()
dag_id = "otel_test_dag_with_pause_in_task"
dag = self.dags[dag_id]
run_id = unpause_trigger_dag_and_get_run_id(dag_id=dag_id)
deadline = time.monotonic() + self.max_wait_seconds_for_pause
while True:
# To avoid get stuck waiting.
if time.monotonic() > deadline:
raise TimeoutError(
f"Timed out waiting for 'pause' to appear in {self.control_file}, after {self.max_wait_seconds_for_pause} seconds."
)
try:
with open(self.control_file) as file:
file_contents = file.read()
if "pause" in file_contents:
log.info("Control file exists and the task has been paused.")
break
time.sleep(1)
continue
except FileNotFoundError:
print("Control file not found. Waiting...")
time.sleep(3)
continue
# Since, we are past the loop, then the file exists and the dag has been paused.
# Kill scheduler1 and start scheduler2.
with capfd.disabled():
scheduler_process_1.send_signal(signal.SIGKILL)
# The process shouldn't have changed the span_status.
check_dag_run_state_and_span_status(
dag_id=dag_id, run_id=run_id, state=State.RUNNING, span_status=SpanStatus.ACTIVE
)
# Wait so that the health threshold passes and scheduler1 is considered unhealthy.
time.sleep(15)
scheduler_process_2 = subprocess.Popen(
self.scheduler_command_args,
env=os.environ.copy(),
stdout=None,
stderr=None,
)
# Wait for scheduler2 to be up and running.
time.sleep(10)
# Rewrite the file to unpause the dag.
with open(self.control_file, "w") as file:
file.write("continue")
wait_for_dag_run_and_check_span_status(
dag_id=dag_id, run_id=run_id, max_wait_time=120, span_status=SpanStatus.ENDED
)
print_ti_output_for_dag_run(dag_id=dag_id, run_id=run_id)
finally:
if self.log_level == "debug":
with create_session() as session:
dump_airflow_metadata_db(session)
# Terminate the processes.
celery_worker_process.terminate()
celery_worker_process.wait()
apiserver_process.terminate()
apiserver_process.wait()
scheduler_process_2.terminate()
scheduler_process_2.wait()
out, err = capfd.readouterr()
log.info("out-start --\n%s\n-- out-end", out)
log.info("err-start --\n%s\n-- err-end", err)
if self.use_otel != "true":
# Dag run should have succeeded. Test the spans in the output.
check_spans_without_continuance(output=out, dag=dag, is_recreated=True, check_t1_sub_spans=False)
@pytest.mark.execution_timeout(90)
def test_scheduler_exits_forcefully_after_the_first_task_finishes(
self, monkeypatch, celery_worker_env_vars, capfd, session
):
"""
The first scheduler will exit forcefully after the first task finishes,
so that it won't have time to end any active spans.
In this scenario, the sub-spans for the first task will be lost.
The only way to retrieve them, would be to re-run the task.
"""
celery_worker_process = None
apiserver_process = None
scheduler_process_2 = None
try:
# Start the processes here and not as fixtures or in a common setup,
# so that the test can capture their output.
celery_worker_process, scheduler_process_1, apiserver_process = self.start_worker_and_scheduler1()
dag_id = "otel_test_dag_with_pause_between_tasks"
dag = self.dags[dag_id]
run_id = unpause_trigger_dag_and_get_run_id(dag_id=dag_id)
deadline = time.monotonic() + self.max_wait_seconds_for_pause
while True:
# To avoid get stuck waiting.
if time.monotonic() > deadline:
raise TimeoutError(
f"Timed out waiting for 'pause' to appear in {self.control_file}, after {self.max_wait_seconds_for_pause} seconds."
)
try:
with open(self.control_file) as file:
file_contents = file.read()
if "pause" in file_contents:
log.info("Control file exists and the task has been paused.")
break
time.sleep(1)
continue
except FileNotFoundError:
print("Control file not found. Waiting...")
time.sleep(3)
continue
# Since, we are past the loop, then the file exists and the dag has been paused.
# Kill scheduler1 and start scheduler2.
with capfd.disabled():
scheduler_process_1.send_signal(signal.SIGKILL)
# The process shouldn't have changed the span_status.
check_dag_run_state_and_span_status(
dag_id=dag_id, run_id=run_id, state=State.RUNNING, span_status=SpanStatus.ACTIVE
)
# Rewrite the file to unpause the dag.
with open(self.control_file, "w") as file:
file.write("continue")
time.sleep(15)
# The task should be adopted.
scheduler_process_2 = subprocess.Popen(
self.scheduler_command_args,
env=os.environ.copy(),
stdout=None,
stderr=None,
)
wait_for_dag_run_and_check_span_status(
dag_id=dag_id, run_id=run_id, max_wait_time=120, span_status=SpanStatus.ENDED
)
print_ti_output_for_dag_run(dag_id=dag_id, run_id=run_id)
finally:
if self.log_level == "debug":
with create_session() as session:
dump_airflow_metadata_db(session)
# Terminate the processes.
celery_worker_process.terminate()
celery_worker_process.wait()
apiserver_process.terminate()
apiserver_process.wait()
scheduler_process_2.terminate()
scheduler_process_2.wait()
out, err = capfd.readouterr()
log.info("out-start --\n%s\n-- out-end", out)
log.info("err-start --\n%s\n-- err-end", err)
if self.use_otel != "true":
# Dag run should have succeeded. Test the spans in the output.
check_spans_for_paused_dag(output=out, dag=dag, is_recreated=True, check_t1_sub_spans=False)
def start_worker_and_scheduler1(self):
celery_worker_process = subprocess.Popen(
self.celery_command_args,
env=os.environ.copy(),
stdout=None,
stderr=None,
)
scheduler_process = subprocess.Popen(
self.scheduler_command_args,
env=os.environ.copy(),
stdout=None,
stderr=None,
)
apiserver_process = subprocess.Popen(
self.apiserver_command_args,
env=os.environ.copy(),
stdout=None,
stderr=None,
)
# Wait to ensure both processes have started.
time.sleep(10)
return celery_worker_process, scheduler_process, apiserver_process
| TestOtelIntegration |
python | PyCQA__pylint | tests/functional/u/unsupported/unsupported_binary_operation.py | {
"start": 818,
"end": 879
} | class ____:
pass
A() + B() # [unsupported-binary-operation]
| B |
python | has2k1__plotnine | plotnine/coords/coord_cartesian.py | {
"start": 455,
"end": 3038
} | class ____(coord):
"""
Cartesian coordinate system
Parameters
----------
xlim :
Limits (in data type of the x-aesthetic) for x axis.
If None, then they are automatically computed.
ylim :
Limits (in data type of the x-aesthetic) for y axis.
If None, then they are automatically computed.
expand :
If `True`, expand the coordinate axes by some factor. If `False`,
use the limits from the data.
"""
is_linear = True
def __init__(
self,
xlim: tuple[Any, Any] | None = None,
ylim: tuple[Any, Any] | None = None,
expand: bool = True,
):
self.limits = SimpleNamespace(x=xlim, y=ylim)
self.expand = expand
def transform(
self, data: pd.DataFrame, panel_params: panel_view, munch: bool = False
) -> pd.DataFrame:
from mizani.bounds import squish_infinite
def squish_infinite_x(col: FloatSeries) -> FloatArray:
return squish_infinite(col, range=panel_params.x.range)
def squish_infinite_y(col: FloatSeries) -> FloatArray:
return squish_infinite(col, range=panel_params.y.range)
return transform_position(data, squish_infinite_x, squish_infinite_y)
def setup_panel_params(self, scale_x: scale, scale_y: scale) -> panel_view:
"""
Compute the range and break information for the panel
"""
from mizani.transforms import identity_trans
from plotnine.scales.scale_continuous import scale_continuous
def get_scale_view(
scale: scale, limits: tuple[Any, Any]
) -> scale_view:
coord_limits = (
scale.transform(limits)
if limits and isinstance(scale, scale_continuous)
else limits
)
expansion = scale.default_expansion(expand=self.expand)
ranges = scale.expand_limits(
scale.final_limits, expansion, coord_limits, identity_trans()
)
sv = scale.view(limits=coord_limits, range=ranges.range)
return sv
out = panel_view(
x=get_scale_view(scale_x, self.limits.x),
y=get_scale_view(scale_y, self.limits.y),
)
return out
def distance(
self,
x: FloatSeries,
y: FloatSeries,
panel_params: panel_view,
) -> FloatArray:
max_dist = dist_euclidean(panel_params.x.range, panel_params.y.range)[
0
]
return dist_euclidean(x, y) / max_dist
| coord_cartesian |
python | django__django | tests/postgres_tests/models.py | {
"start": 3296,
"end": 3648
} | class ____(PostgreSQLModel):
scene = models.ForeignKey("Scene", models.CASCADE)
character = models.ForeignKey("Character", models.CASCADE)
dialogue = models.TextField(blank=True, null=True)
dialogue_search_vector = SearchVectorField(blank=True, null=True)
dialogue_config = models.CharField(max_length=100, blank=True, null=True)
| Line |
python | django__django | tests/forms_tests/tests/test_media.py | {
"start": 270,
"end": 555
} | class ____(MediaAsset):
element_template = '<link href="{path}"{attributes}>'
def __init__(self, href, **attributes):
super().__init__(href, **attributes)
self.attributes["rel"] = "stylesheet"
@override_settings(STATIC_URL="http://media.example.com/static/")
| CSS |
python | pydantic__pydantic | pydantic/_internal/_decorators_v1.py | {
"start": 360,
"end": 527
} | class ____(Protocol):
"""A simple validator, supported for V1 validators and V2 validators."""
def __call__(self, __value: Any) -> Any: ...
| V1OnlyValueValidator |
python | django__django | tests/backends/test_ddl_references.py | {
"start": 448,
"end": 1382
} | class ____(SimpleTestCase):
def setUp(self):
self.reference = Table("table", lambda table: table.upper())
def test_references_table(self):
self.assertIs(self.reference.references_table("table"), True)
self.assertIs(self.reference.references_table("other"), False)
def test_rename_table_references(self):
self.reference.rename_table_references("other", "table")
self.assertIs(self.reference.references_table("table"), True)
self.assertIs(self.reference.references_table("other"), False)
self.reference.rename_table_references("table", "other")
self.assertIs(self.reference.references_table("table"), False)
self.assertIs(self.reference.references_table("other"), True)
def test_repr(self):
self.assertEqual(repr(self.reference), "<Table 'TABLE'>")
def test_str(self):
self.assertEqual(str(self.reference), "TABLE")
| TableTests |
python | numpy__numpy | numpy/lib/tests/test_function_base.py | {
"start": 49337,
"end": 54055
} | class ____:
a = np.array([0, 0, 1, 0, 2, 3, 4, 0])
b = a.astype(float)
c = a.astype(complex)
d = a.astype(object)
def construct_input_output(self, rng, shape, axis, trim):
"""Construct an input/output test pair for trim_zeros"""
# Standardize axis to a tuple.
if axis is None:
axis = tuple(range(len(shape)))
elif isinstance(axis, int):
axis = (len(shape) + axis if axis < 0 else axis,)
else:
axis = tuple(len(shape) + ax if ax < 0 else ax for ax in axis)
# Populate a random interior slice with nonzero entries.
data = np.zeros(shape)
i_start = rng.integers(low=0, high=np.array(shape) - 1)
i_end = rng.integers(low=i_start + 1, high=shape)
inner_shape = tuple(i_end - i_start)
inner_data = 1 + rng.random(inner_shape)
data[tuple(slice(i, j) for i, j in zip(i_start, i_end))] = inner_data
# Construct the expected output of N-dimensional trim_zeros
# with the given axis and trim arguments.
if 'f' not in trim:
i_start = np.array([None for _ in shape])
if 'b' not in trim:
i_end = np.array([None for _ in shape])
idx = tuple(slice(i, j) if ax in axis else slice(None)
for ax, (i, j) in enumerate(zip(i_start, i_end)))
expected = data[idx]
return data, expected
def values(self):
attr_names = ('a', 'b', 'c', 'd')
return (getattr(self, name) for name in attr_names)
def test_basic(self):
slc = np.s_[2:-1]
for arr in self.values():
res = trim_zeros(arr)
assert_array_equal(res, arr[slc])
def test_leading_skip(self):
slc = np.s_[:-1]
for arr in self.values():
res = trim_zeros(arr, trim='b')
assert_array_equal(res, arr[slc])
def test_trailing_skip(self):
slc = np.s_[2:]
for arr in self.values():
res = trim_zeros(arr, trim='F')
assert_array_equal(res, arr[slc])
def test_all_zero(self):
for _arr in self.values():
arr = np.zeros_like(_arr, dtype=_arr.dtype)
res1 = trim_zeros(arr, trim='B')
assert len(res1) == 0
res2 = trim_zeros(arr, trim='f')
assert len(res2) == 0
def test_size_zero(self):
arr = np.zeros(0)
res = trim_zeros(arr)
assert_array_equal(arr, res)
@pytest.mark.parametrize(
'arr',
[np.array([0, 2**62, 0]),
np.array([0, 2**63, 0]),
np.array([0, 2**64, 0])]
)
def test_overflow(self, arr):
slc = np.s_[1:2]
res = trim_zeros(arr)
assert_array_equal(res, arr[slc])
def test_no_trim(self):
arr = np.array([None, 1, None])
res = trim_zeros(arr)
assert_array_equal(arr, res)
def test_list_to_list(self):
res = trim_zeros(self.a.tolist())
assert isinstance(res, list)
@pytest.mark.parametrize("ndim", (0, 1, 2, 3, 10))
def test_nd_basic(self, ndim):
a = np.ones((2,) * ndim)
b = np.pad(a, (2, 1), mode="constant", constant_values=0)
res = trim_zeros(b, axis=None)
assert_array_equal(a, res)
@pytest.mark.parametrize("ndim", (0, 1, 2, 3))
def test_allzero(self, ndim):
a = np.zeros((3,) * ndim)
res = trim_zeros(a, axis=None)
assert_array_equal(res, np.zeros((0,) * ndim))
def test_trim_arg(self):
a = np.array([0, 1, 2, 0])
res = trim_zeros(a, trim='f')
assert_array_equal(res, [1, 2, 0])
res = trim_zeros(a, trim='b')
assert_array_equal(res, [0, 1, 2])
@pytest.mark.parametrize("trim", ("front", ""))
def test_unexpected_trim_value(self, trim):
arr = self.a
with pytest.raises(ValueError, match=r"unexpected character\(s\) in `trim`"):
trim_zeros(arr, trim=trim)
@pytest.mark.parametrize("shape, axis", [
[(5,), None],
[(5,), ()],
[(5,), 0],
[(5, 6), None],
[(5, 6), ()],
[(5, 6), 0],
[(5, 6), (-1,)],
[(5, 6, 7), None],
[(5, 6, 7), ()],
[(5, 6, 7), 1],
[(5, 6, 7), (0, 2)],
[(5, 6, 7, 8), None],
[(5, 6, 7, 8), ()],
[(5, 6, 7, 8), -2],
[(5, 6, 7, 8), (0, 1, 3)],
])
@pytest.mark.parametrize("trim", ['fb', 'f', 'b'])
def test_multiple_axes(self, shape, axis, trim):
rng = np.random.default_rng(4321)
data, expected = self.construct_input_output(rng, shape, axis, trim)
assert_array_equal(trim_zeros(data, axis=axis, trim=trim), expected)
| TestTrimZeros |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/math_ops/cwise_ops_test.py | {
"start": 28829,
"end": 32754
} | class ____(test.TestCase):
def _compare(self, x, y, use_gpu):
np_min, np_max = np.minimum(x, y), np.maximum(x, y)
with test_util.device(use_gpu=use_gpu):
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
omin, omax = math_ops.minimum(inx, iny), math_ops.maximum(inx, iny)
tf_min, tf_max = self.evaluate([omin, omax])
self.assertAllEqual(np_min, tf_min)
self.assertAllEqual(np_max, tf_max)
def testBasic(self):
x = np.random.rand(1, 3, 2) * 100.
y = np.random.rand(1, 3, 2) * 100.
for t in [np.float16, np.float32, np.float64, np.int8, np.uint8, np.int16,
np.uint16, np.int32, np.uint32, np.int64, np.uint64]:
with self.subTest(t=t):
self._compare(x.astype(t), y.astype(t), use_gpu=False)
self._compare(x.astype(t), y.astype(t), use_gpu=True)
# When eager_op_as_function mode is enabled xla auto-clustering kicks in.
# By default xla enables fast min_max computations which do not propagate NaN.
# TODO(b/205140614): remove decorators once TF and XLA behaviour are the same.
@test_util.set_xla_env_flag(flag="--xla_cpu_enable_fast_min_max=false")
@test_util.set_xla_env_flag(flag="--xla_gpu_enable_fast_min_max=false")
def testNaNPropagation(self):
x = np.array([1., np.nan, 1., np.nan], dtype=np.float64)
y = np.array([1., 1., np.nan, np.nan], dtype=np.float64)
for t in [np.float16, np.float32, np.float64]:
with self.subTest(t=t):
self._compare(x.astype(t), y.astype(t), use_gpu=False)
self._compare(x.astype(t), y.astype(t), use_gpu=True)
def testDifferentShapes(self):
x = np.random.rand(1, 3, 2) * 100.
y = np.random.rand(2) * 100. # should broadcast
for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:
with self.subTest(t=t):
self._compare(x.astype(t), y.astype(t), use_gpu=False)
self._compare(x.astype(t), y.astype(t), use_gpu=True)
def testScalar(self):
x = np.random.rand(1, 3, 2) * 100.
y = np.random.rand(1).item() * 100. # should broadcast
# dropped np.float64, int64 because TF automatically converts to 32 bit
for t in [np.float32, np.int32]:
with self.subTest(t=t):
self._compare(x.astype(t), t(y), use_gpu=False)
self._compare(x.astype(t), t(y), use_gpu=True)
def _compareGradientX(self, func, x, y):
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = func(inx, iny)
s = list(np.shape(x))
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, s, out, s, x_init_value=x)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _compareGradientY(self, func, x, y):
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = func(inx, iny)
s = list(np.shape(x))
jacob_t, jacob_n = gradient_checker.compute_gradient(
iny, s, out, s, x_init_value=y)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
@test_util.run_deprecated_v1
def testGradients(self):
x = np.random.rand(1, 3, 2) * 100.
# ensure x != y
y = x + (np.random.randint(2, size=x.shape) - .5) * 2 # -1 or +1
self._compareGradientX(math_ops.maximum, x, y)
self._compareGradientY(math_ops.maximum, x, y)
self._compareGradientX(math_ops.minimum, x, y)
self._compareGradientY(math_ops.minimum, x, y)
| MinMaxOpTest |
python | spack__spack | lib/spack/spack/test/web.py | {
"start": 8089,
"end": 8180
} | class ____:
def paginate(self, *args, **kwargs):
return MockPages()
| MockPaginator |
python | apache__airflow | providers/databricks/tests/unit/databricks/plugins/test_databricks_workflow.py | {
"start": 11081,
"end": 15157
} | class ____:
"""Test Databricks Workflow Plugin functionality specific to Airflow 3.x."""
def test_plugin_operator_extra_links_limited_functionality(self):
"""Test that operator_extra_links are limited in Airflow 3.x (only job run link)."""
plugin = DatabricksWorkflowPlugin()
# In Airflow 3, only WorkflowJobRunLink should be present
assert len(plugin.operator_extra_links) == 1
assert isinstance(plugin.operator_extra_links[0], WorkflowJobRunLink)
# Verify repair links are not present
link_types = [type(link).__name__ for link in plugin.operator_extra_links]
assert not any("Repair" in link_type for link_type in link_types)
def test_plugin_no_appbuilder_views(self):
"""Test that appbuilder_views are not configured in Airflow 3.x."""
plugin = DatabricksWorkflowPlugin()
# In Airflow 3, appbuilder_views should not be set (repair functionality disabled)
assert not getattr(plugin, "appbuilder_views", [])
def test_store_databricks_job_run_link_function_works(self):
"""Test that store_databricks_job_run_link works correctly in Airflow 3.x."""
ti_mock = Mock()
ti_mock.xcom_push = Mock()
context = {
"ti": ti_mock,
"dag": Mock(dag_id="test_dag"),
"dag_run": Mock(run_id="test_run"),
"task": Mock(task_id="test_task"),
}
metadata = Mock(conn_id="databricks_default", job_id=12345, run_id=67890)
with patch("airflow.providers.databricks.plugins.databricks_workflow.DatabricksHook") as mock_hook:
mock_hook_instance = Mock()
mock_hook_instance.host = "test-databricks-host"
mock_hook.return_value = mock_hook_instance
store_databricks_job_run_link(context, metadata, logger)
ti_mock.xcom_push.assert_called_once()
call_args = ti_mock.xcom_push.call_args
assert call_args[1]["key"] == "databricks_job_run_link"
assert "test-databricks-host" in call_args[1]["value"]
assert "12345" in call_args[1]["value"]
assert "67890" in call_args[1]["value"]
assert ti_mock.xcom_push.call_count == 1
def test_workflow_job_run_link_uses_xcom(self):
"""Test that WorkflowJobRunLink.get_link uses XCom in Airflow 3.x."""
link = WorkflowJobRunLink()
operator = Mock()
ti_key = TaskInstanceKey(dag_id="test_dag", task_id="test_task", run_id="test_run", try_number=1)
expected_link = "https://test-host/#job/123/run/456"
with patch("airflow.providers.databricks.plugins.databricks_workflow.XCom") as mock_xcom:
mock_xcom.get_value.return_value = expected_link
result = link.get_link(operator, ti_key=ti_key)
mock_xcom.get_value.assert_called_once_with(ti_key=ti_key, key="databricks_job_run_link")
assert result == expected_link
def test_store_databricks_job_run_link_exception_handling(self):
"""Test that exceptions are properly handled in store_databricks_job_run_link."""
ti_mock = Mock()
ti_mock.xcom_push = Mock()
context = {
"ti": ti_mock,
"dag": Mock(dag_id="test_dag"),
"dag_run": Mock(run_id="test_run"),
"task": Mock(task_id="test_task"),
}
metadata = Mock(conn_id="databricks_default", job_id=12345, run_id=67890)
with patch("airflow.providers.databricks.plugins.databricks_workflow.DatabricksHook") as mock_hook:
mock_hook_instance = Mock()
type(mock_hook_instance).host = PropertyMock(side_effect=Exception("Connection failed"))
mock_hook.return_value = mock_hook_instance
store_databricks_job_run_link(context, metadata, logger)
# Verify no XCom was pushed due to the exception
ti_mock.xcom_push.assert_not_called()
@pytest.mark.skipif(AIRFLOW_V_3_0_PLUS, reason="Test only for Airflow < 3.0")
| TestDatabricksWorkflowPluginAirflow3 |
python | ray-project__ray | python/ray/data/preprocessors/encoder.py | {
"start": 26681,
"end": 36495
} | class ____(SerializablePreprocessorBase):
r"""Convert columns to ``pd.CategoricalDtype``.
Use this preprocessor with frameworks that have built-in support for
``pd.CategoricalDtype`` like LightGBM.
.. warning::
If you don't specify ``dtypes``, fit this preprocessor before splitting
your dataset into train and test splits. This ensures categories are
consistent across splits.
Examples:
>>> import pandas as pd
>>> import ray
>>> from ray.data.preprocessors import Categorizer
>>>
>>> df = pd.DataFrame(
... {
... "sex": ["male", "female", "male", "female"],
... "level": ["L4", "L5", "L3", "L4"],
... })
>>> ds = ray.data.from_pandas(df) # doctest: +SKIP
>>> categorizer = Categorizer(columns=["sex", "level"])
>>> categorizer.fit_transform(ds).schema().types # doctest: +SKIP
[CategoricalDtype(categories=['female', 'male'], ordered=False), CategoricalDtype(categories=['L3', 'L4', 'L5'], ordered=False)]
:class:`Categorizer` can also be used in append mode by providing the
name of the output_columns that should hold the categorized values.
>>> categorizer = Categorizer(columns=["sex", "level"], output_columns=["sex_cat", "level_cat"])
>>> categorizer.fit_transform(ds).to_pandas() # doctest: +SKIP
sex level sex_cat level_cat
0 male L4 male L4
1 female L5 female L5
2 male L3 male L3
3 female L4 female L4
If you know the categories in advance, you can specify the categories with the
``dtypes`` parameter.
>>> categorizer = Categorizer(
... columns=["sex", "level"],
... dtypes={"level": pd.CategoricalDtype(["L3", "L4", "L5", "L6"], ordered=True)},
... )
>>> categorizer.fit_transform(ds).schema().types # doctest: +SKIP
[CategoricalDtype(categories=['female', 'male'], ordered=False), CategoricalDtype(categories=['L3', 'L4', 'L5', 'L6'], ordered=True)]
Args:
columns: The columns to convert to ``pd.CategoricalDtype``.
dtypes: An optional dictionary that maps columns to ``pd.CategoricalDtype``
objects. If you don't include a column in ``dtypes``, the categories
are inferred.
output_columns: The names of the transformed columns. If None, the transformed
columns will be the same as the input columns. If not None, the length of
``output_columns`` must match the length of ``columns``, othwerwise an error
will be raised.
""" # noqa: E501
def __init__(
self,
columns: List[str],
dtypes: Optional[Dict[str, pd.CategoricalDtype]] = None,
output_columns: Optional[List[str]] = None,
):
super().__init__()
if not dtypes:
dtypes = {}
self.columns = columns
self.dtypes = dtypes
self.output_columns = Preprocessor._derive_and_validate_output_columns(
columns, output_columns
)
def _fit(self, dataset: "Dataset") -> Preprocessor:
columns_to_get = [
column for column in self.columns if column not in self.dtypes
]
self.stats_ |= self.dtypes
if not columns_to_get:
return self
def callback(unique_indices: Dict[str, Dict]) -> pd.CategoricalDtype:
return pd.CategoricalDtype(unique_indices.keys())
self.stat_computation_plan.add_callable_stat(
stat_fn=lambda key_gen: compute_unique_value_indices(
dataset=dataset,
columns=columns_to_get,
key_gen=key_gen,
),
post_process_fn=make_post_processor(
base_fn=unique_post_fn(drop_na_values=True),
callbacks=[callback],
),
stat_key_fn=lambda col: f"unique({col})",
post_key_fn=lambda col: col,
columns=columns_to_get,
)
return self
def _transform_pandas(self, df: pd.DataFrame):
df[self.output_columns] = df[self.columns].astype(self.stats_)
return df
def _get_serializable_fields(self) -> Dict[str, Any]:
return {
"columns": self.columns,
"output_columns": self.output_columns,
"_fitted": getattr(self, "_fitted", None),
"dtypes": {
col: {"categories": list(dtype.categories), "ordered": dtype.ordered}
for col, dtype in self.dtypes.items()
}
if hasattr(self, "dtypes") and self.dtypes
else None,
}
def _set_serializable_fields(self, fields: Dict[str, Any], version: int):
# required fields
# Handle dtypes field specially
self.dtypes = (
{
col: pd.CategoricalDtype(
categories=dtype_data["categories"], ordered=dtype_data["ordered"]
)
for col, dtype_data in fields["dtypes"].items()
}
if fields.get("dtypes")
else {}
)
self.columns = fields["columns"]
self.output_columns = fields["output_columns"]
# optional fields
self._fitted = fields.get("_fitted")
def __repr__(self):
return (
f"{self.__class__.__name__}(columns={self.columns!r}, "
f"dtypes={self.dtypes!r}, output_columns={self.output_columns!r})"
)
def compute_unique_value_indices(
*,
dataset: "Dataset",
columns: List[str],
key_gen: Callable,
encode_lists: bool = True,
max_categories: Optional[Dict[str, int]] = None,
):
if max_categories is None:
max_categories = {}
columns_set = set(columns)
for column in max_categories:
if column not in columns_set:
raise ValueError(
f"You set `max_categories` for {column}, which is not present in "
f"{columns}."
)
def get_pd_value_counts_per_column(col: pd.Series) -> Dict:
# special handling for lists
if _is_series_composed_of_lists(col):
if encode_lists:
counter = Counter()
def update_counter(element):
counter.update(element)
return element
col.map(update_counter)
return counter
else:
# convert to tuples to make lists hashable
col = col.map(lambda x: tuple(x))
return Counter(col.value_counts(dropna=False).to_dict())
def get_pd_value_counts(df: pd.DataFrame) -> Dict[str, List[Dict]]:
df_columns = df.columns.tolist()
result = {}
for col in columns:
if col in df_columns:
result[col] = [get_pd_value_counts_per_column(df[col])]
else:
raise ValueError(
f"Column '{col}' does not exist in DataFrame, which has columns: {df_columns}" # noqa: E501
)
return result
value_counts_ds = dataset.map_batches(get_pd_value_counts, batch_format="pandas")
unique_values_by_col: Dict[str, Set] = {key_gen(col): set() for col in columns}
for batch in value_counts_ds.iter_batches(batch_size=None):
for col, counters in batch.items():
for counter in counters:
counter: Dict[Any, int] = {
k: v for k, v in counter.items() if v is not None
}
if col in max_categories:
counter: Dict[Any, int] = dict(
Counter(counter).most_common(max_categories[col])
)
# add only column values since frequencies are needed beyond this point
unique_values_by_col[key_gen(col)].update(counter.keys())
return unique_values_by_col
def unique_post_fn(drop_na_values: bool = False) -> Callable[[Set], Dict[str, int]]:
"""
Returns a post-processing function that generates an encoding map by
sorting the unique values produced during aggregation or stats computation.
Args:
drop_na_values: If True, NA/null values will be silently dropped from the
encoding map. If False, raises an error if any NA/null values are present.
Returns:
A callable that takes a set of unique values and returns a dictionary
mapping each value to a unique integer index.
"""
def gen_value_index(values: Set) -> Dict[str, int]:
if drop_na_values:
values = {k for k in values if not pd.isnull(k)}
else:
if any(pd.isnull(k) for k in values):
raise ValueError(
"Unable to fit column because it contains null"
" values. Consider imputing missing values first."
)
return {k: j for j, k in enumerate(sorted(values))}
return gen_value_index
def _validate_df(df: pd.DataFrame, *columns: str) -> None:
null_columns = [column for column in columns if df[column].isnull().values.any()]
if null_columns:
raise ValueError(
f"Unable to transform columns {null_columns} because they contain "
f"null values. Consider imputing missing values first."
)
def _is_series_composed_of_lists(series: pd.Series) -> bool:
# we assume that all elements are a list here
first_not_none_element = next(
(element for element in series if element is not None), None
)
return pandas.api.types.is_object_dtype(series.dtype) and isinstance(
first_not_none_element, (list, np.ndarray)
)
| Categorizer |
python | scipy__scipy | scipy/sparse/tests/test_base.py | {
"start": 161238,
"end": 163036
} | class ____:
def _test_setdiag_sorted(self, D):
A = self.spcreator(D)
# Force sorted indices
A.has_sorted_indices = False
A.sort_indices()
assert A.has_sorted_indices
# Set the diagonal (only 1 new entry / 1002, so _insert_many is used)
with check_remains_sorted(A):
A.setdiag(5)
assert np.all(A.diagonal() == 5)
def test_setdiag_noconvert(self):
# Test small ratio of new elements
# see gh-21791 setting mixture of existing and not when new_values < 0.001*nnz
# see gh-23644
# Create off-main-diagonal elements so that we have multiple elements
# per column to see if the indices are sorted or not
N = 1002
vals = np.arange(1, N + 1)
diags = np.c_[[-1, 2, 1]] * vals # rows are diagonal entries
# Remove a small number of diagonal elements so we have a small ratio
# of new ones to force _cs_matrix._setdiag to remain in CSC/CSR format
N_new = 3
diags[1, -N_new:] = 0.0
offsets = [-1, 0, 1]
D = self.dia_container((diags, offsets), shape=(N, N))
return self._test_setdiag_sorted(D)
def test_setdiag_cooconvert(self):
# Test large ratio of new elements
# see gh-23644
# Create off-main-diagonal elements so that we have multiple elements
# per column to see if the indices are sorted or not
N = 1002
vals = np.arange(1, N + 1) # only a few non-zeros
diags = np.c_[[-1, 2, 1]] * vals
# Remove many entries so we have a large ratio of new entries
diags[1, 5:] = 0.0
offsets = [-1, 0, 1]
D = self.dia_container((diags, offsets), shape=(N, N))
return self._test_setdiag_sorted(D)
| _CompressedMixin |
python | pandas-dev__pandas | pandas/tests/series/indexing/test_xs.py | {
"start": 543,
"end": 2760
} | class ____:
def test_xs_level_series(self, multiindex_dataframe_random_data):
df = multiindex_dataframe_random_data
ser = df["A"]
expected = ser[:, "two"]
result = df.xs("two", level=1)["A"]
tm.assert_series_equal(result, expected)
def test_series_getitem_multiindex_xs_by_label(self):
# GH#5684
idx = MultiIndex.from_tuples(
[("a", "one"), ("a", "two"), ("b", "one"), ("b", "two")]
)
ser = Series([1, 2, 3, 4], index=idx)
return_value = ser.index.set_names(["L1", "L2"], inplace=True)
assert return_value is None
expected = Series([1, 3], index=["a", "b"])
return_value = expected.index.set_names(["L1"], inplace=True)
assert return_value is None
result = ser.xs("one", level="L2")
tm.assert_series_equal(result, expected)
def test_series_getitem_multiindex_xs(self):
# GH#6258
dt = list(date_range("20130903", periods=3))
idx = MultiIndex.from_product([list("AB"), dt])
ser = Series([1, 3, 4, 1, 3, 4], index=idx)
expected = Series([1, 1], index=list("AB"))
result = ser.xs("20130903", level=1)
tm.assert_series_equal(result, expected)
def test_series_xs_droplevel_false(self):
# GH: 19056
mi = MultiIndex.from_tuples(
[("a", "x"), ("a", "y"), ("b", "x")], names=["level1", "level2"]
)
ser = Series([1, 1, 1], index=mi)
result = ser.xs("a", axis=0, drop_level=False)
expected = Series(
[1, 1],
index=MultiIndex.from_tuples(
[("a", "x"), ("a", "y")], names=["level1", "level2"]
),
)
tm.assert_series_equal(result, expected)
def test_xs_key_as_list(self):
# GH#41760
mi = MultiIndex.from_tuples([("a", "x")], names=["level1", "level2"])
ser = Series([1], index=mi)
with pytest.raises(TypeError, match="list keys are not supported"):
ser.xs(["a", "x"], axis=0, drop_level=False)
with pytest.raises(TypeError, match="list keys are not supported"):
ser.xs(["a"], axis=0, drop_level=False)
| TestXSWithMultiIndex |
python | joke2k__faker | tests/providers/test_bank.py | {
"start": 6301,
"end": 6767
} | class ____:
"""Test en_IE bank provider"""
def test_bban(self, faker, num_samples):
for _ in range(num_samples):
assert re.fullmatch(r"\d{23}", faker.bban())
def test_iban(self, faker, num_samples):
for _ in range(num_samples):
iban = faker.iban()
assert is_valid_iban(iban)
assert iban[:2] == EnIeBankProvider.country_code
assert re.fullmatch(r"\d{2}\d{23}", iban[2:])
| TestEnIe |
python | pytest-dev__pytest-django | tests/test_fixtures.py | {
"start": 24855,
"end": 26907
} | class ____(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
('app', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='MyCustomUser',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(null=True, verbose_name='last login', blank=True)),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(error_messages={'unique': 'A user with that email address already exists.'}, max_length=100, unique=True, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('identifier', models.CharField(unique=True, max_length=100)),
('groups', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', verbose_name='groups')),
('user_permissions', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Permission', blank=True, help_text='Specific permissions for this user.', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
bases=None,
),
]
""",
"migrations/0002_custom_user_model.py",
)
result = django_pytester.runpytest_subprocess("-s")
result.stdout.fnmatch_lines(["* 1 passed*"])
assert result.ret == 0
| Migration |
python | pypa__installer | src/installer/exceptions.py | {
"start": 134,
"end": 248
} | class ____(InstallerError):
"""When a wheel source violates a contract, or is not supported."""
| InvalidWheelSource |
python | RaRe-Technologies__gensim | gensim/matutils.py | {
"start": 11716,
"end": 17100
} | class ____:
"""Convert a sequence of dense/sparse vectors into a streamed Gensim corpus object.
See Also
--------
:func:`~gensim.matutils.corpus2csc`
Convert corpus in Gensim format to `scipy.sparse.csc` matrix.
"""
def __init__(self, vecs):
"""
Parameters
----------
vecs : iterable of {`numpy.ndarray`, `scipy.sparse`}
Input vectors.
"""
self.vecs = vecs
def __iter__(self):
for vec in self.vecs:
if isinstance(vec, np.ndarray):
yield full2sparse(vec)
else:
yield scipy2sparse(vec)
def __len__(self):
return len(self.vecs)
def sparse2full(doc, length):
"""Convert a document in Gensim bag-of-words format into a dense numpy array.
Parameters
----------
doc : list of (int, number)
Document in BoW format.
length : int
Vector dimensionality. This cannot be inferred from the BoW, and you must supply it explicitly.
This is typically the vocabulary size or number of topics, depending on how you created `doc`.
Returns
-------
numpy.ndarray
Dense numpy vector for `doc`.
See Also
--------
:func:`~gensim.matutils.full2sparse`
Convert dense array to gensim bag-of-words format.
"""
result = np.zeros(length, dtype=np.float32) # fill with zeroes (default value)
# convert indices to int as numpy 1.12 no longer indexes by floats
doc = ((int(id_), float(val_)) for (id_, val_) in doc)
doc = dict(doc)
# overwrite some of the zeroes with explicit values
result[list(doc)] = list(doc.values())
return result
def full2sparse(vec, eps=1e-9):
"""Convert a dense numpy array into the Gensim bag-of-words format.
Parameters
----------
vec : numpy.ndarray
Dense input vector.
eps : float
Feature weight threshold value. Features with `abs(weight) < eps` are considered sparse and
won't be included in the BOW result.
Returns
-------
list of (int, float)
BoW format of `vec`, with near-zero values omitted (sparse vector).
See Also
--------
:func:`~gensim.matutils.sparse2full`
Convert a document in Gensim bag-of-words format into a dense numpy array.
"""
vec = np.asarray(vec, dtype=float)
nnz = np.nonzero(abs(vec) > eps)[0]
return list(zip(nnz, vec.take(nnz)))
dense2vec = full2sparse
def full2sparse_clipped(vec, topn, eps=1e-9):
"""Like :func:`~gensim.matutils.full2sparse`, but only return the `topn` elements of the greatest magnitude (abs).
This is more efficient that sorting a vector and then taking the greatest values, especially
where `len(vec) >> topn`.
Parameters
----------
vec : numpy.ndarray
Input dense vector
topn : int
Number of greatest (abs) elements that will be presented in result.
eps : float
Threshold value, if coordinate in `vec` < eps, this will not be presented in result.
Returns
-------
list of (int, float)
Clipped vector in BoW format.
See Also
--------
:func:`~gensim.matutils.full2sparse`
Convert dense array to gensim bag-of-words format.
"""
# use np.argpartition/argsort and only form tuples that are actually returned.
# this is about 40x faster than explicitly forming all 2-tuples to run sort() or heapq.nlargest() on.
if topn <= 0:
return []
vec = np.asarray(vec, dtype=float)
nnz = np.nonzero(abs(vec) > eps)[0]
biggest = nnz.take(argsort(abs(vec).take(nnz), topn, reverse=True))
return list(zip(biggest, vec.take(biggest)))
def corpus2dense(corpus, num_terms, num_docs=None, dtype=np.float32):
"""Convert corpus into a dense numpy 2D array, with documents as columns.
Parameters
----------
corpus : iterable of iterable of (int, number)
Input corpus in the Gensim bag-of-words format.
num_terms : int
Number of terms in the dictionary. X-axis of the resulting matrix.
num_docs : int, optional
Number of documents in the corpus. If provided, a slightly more memory-efficient code path is taken.
Y-axis of the resulting matrix.
dtype : data-type, optional
Data type of the output matrix.
Returns
-------
numpy.ndarray
Dense 2D array that presents `corpus`.
See Also
--------
:class:`~gensim.matutils.Dense2Corpus`
Convert dense matrix to Gensim corpus format.
"""
if num_docs is not None:
# we know the number of documents => don't bother column_stacking
docno, result = -1, np.empty((num_terms, num_docs), dtype=dtype)
for docno, doc in enumerate(corpus):
result[:, docno] = sparse2full(doc, num_terms)
assert docno + 1 == num_docs
else:
# The below used to be a generator, but NumPy deprecated generator as of 1.16 with:
# """
# FutureWarning: arrays to stack must be passed as a "sequence" type such as list or tuple.
# Support for non-sequence iterables such as generators is deprecated as of NumPy 1.16 and will raise an error
# in the future.
# """
result = np.column_stack([sparse2full(doc, num_terms) for doc in corpus])
return result.astype(dtype)
| Scipy2Corpus |
python | PyCQA__pylint | tests/functional/a/access/access_member_before_definition.py | {
"start": 758,
"end": 1104
} | class ____:
def test_mixin(self):
"""Don't emit access-member-before-definition for mixin classes."""
if self.already_defined:
# pylint: disable=attribute-defined-outside-init
self.already_defined = None
# Test for regression in bitbucket issue 164
# https://bitbucket.org/logilab/pylint/issue/164/
| Mixin |
python | google__pytype | pytype/rewrite/flow/frame_base.py | {
"start": 808,
"end": 928
} | class ____(Exception):
"""Raised when step() is called on a frame with no more opcodes to execute."""
| FrameConsumedError |
python | huggingface__transformers | src/transformers/models/convnext/modeling_convnext.py | {
"start": 2037,
"end": 2514
} | class ____(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob: Optional[float] = None) -> None:
super().__init__()
self.drop_prob = drop_prob
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return drop_path(hidden_states, self.drop_prob, self.training)
def extra_repr(self) -> str:
return f"p={self.drop_prob}"
| ConvNextDropPath |
python | jupyterlab__jupyterlab | jupyterlab/labextensions.py | {
"start": 3144,
"end": 5851
} | class ____(JupyterApp, DebugLogFileMixin):
version = VERSION
flags = flags
aliases = aliases
name = "lab"
# Not configurable!
core_config = Instance(CoreConfig, allow_none=True)
app_dir = Unicode("", config=True, help="The app directory to target")
should_build = Bool(True, config=True, help="Whether to build the app after the action")
dev_build = Bool(
None,
allow_none=True,
config=True,
help="Whether to build in dev mode. Defaults to True (dev mode) if there are any locally linked extensions, else defaults to False (production mode).",
)
minimize = Bool(
True,
config=True,
help="Whether to minimize a production build (defaults to True).",
)
should_clean = Bool(
False,
config=True,
help="Whether temporary files should be cleaned up after building jupyterlab",
)
splice_source = Bool(False, config=True, help="Splice source packages into app directory.")
labextensions_path = List(
Unicode(),
help="The standard paths to look in for prebuilt JupyterLab extensions",
)
@default("labextensions_path")
def _default_labextensions_path(self):
lab = LabApp()
lab.load_config_file()
return lab.labextensions_path + lab.extra_labextensions_path
@default("splice_source")
def _default_splice_source(self):
version = get_app_version(AppOptions(app_dir=self.app_dir))
return version.endswith("-spliced")
def start(self):
if self.app_dir and self.app_dir.startswith(HERE):
msg = "Cannot run lab extension commands in core app"
raise ValueError(msg)
with self.debug_logging():
ans = self.run_task()
if ans and self.should_build:
production = None if self.dev_build is None else not self.dev_build
app_options = AppOptions(
app_dir=self.app_dir,
logger=self.log,
core_config=self.core_config,
splice_source=self.splice_source,
)
build(
clean_staging=self.should_clean,
production=production,
minimize=self.minimize,
app_options=app_options,
)
def run_task(self):
pass
def deprecation_warning(self, msg):
return self.log.warning(
f"\033[33m(Deprecated) {msg}\n\n{LABEXTENSION_COMMAND_WARNING} \033[0m"
)
def _log_format_default(self):
"""A default format for messages"""
return "%(message)s"
| BaseExtensionApp |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/dataform.py | {
"start": 15049,
"end": 18552
} | class ____(GoogleCloudBaseOperator):
"""
Returns WorkflowInvocationActions in a given WorkflowInvocation.
:param project_id: Required. The ID of the Google Cloud project that the task belongs to.
:param region: Required. The ID of the Google Cloud region that the task belongs to.
:param repository_id: Required. The ID of the Dataform repository that the task belongs to.
:param workflow_invocation_id: the workflow invocation resource's id.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = (
"project_id",
"region",
"repository_id",
"workflow_invocation_id",
"impersonation_chain",
)
operator_extra_links = (DataformWorkflowInvocationLink(),)
def __init__(
self,
project_id: str,
region: str,
repository_id: str,
workflow_invocation_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.project_id = project_id
self.region = region
self.repository_id = repository_id
self.workflow_invocation_id = workflow_invocation_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = DataformHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
DataformWorkflowInvocationLink.persist(
context=context,
project_id=self.project_id,
region=self.region,
repository_id=self.repository_id,
workflow_invocation_id=self.workflow_invocation_id,
)
actions = hook.query_workflow_invocation_actions(
project_id=self.project_id,
region=self.region,
repository_id=self.repository_id,
workflow_invocation_id=self.workflow_invocation_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
actions_list = [WorkflowInvocationAction.to_dict(action) for action in actions]
self.log.info("Workflow Query invocation actions: %s", actions_list)
return actions_list
| DataformQueryWorkflowInvocationActionsOperator |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 341621,
"end": 342482
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of
UpdateEnterpriseMembersCanMakePurchasesSetting
"""
__schema__ = github_schema
__field_names__ = ("enterprise_id", "setting_value", "client_mutation_id")
enterprise_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="enterpriseId")
"""The ID of the enterprise on which to set the members can make
purchases setting.
"""
setting_value = sgqlc.types.Field(sgqlc.types.non_null(EnterpriseMembersCanMakePurchasesSettingValue), graphql_name="settingValue")
"""The value for the members can make purchases setting on the
enterprise.
"""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| UpdateEnterpriseMembersCanMakePurchasesSettingInput |
python | ray-project__ray | rllib/env/external_multi_agent_env.py | {
"start": 265,
"end": 5487
} | class ____(ExternalEnv):
"""This is the multi-agent version of ExternalEnv."""
def __init__(
self,
action_space: gym.Space,
observation_space: gym.Space,
):
"""Initializes an ExternalMultiAgentEnv instance.
Args:
action_space: Action space of the env.
observation_space: Observation space of the env.
"""
ExternalEnv.__init__(self, action_space, observation_space)
# We require to know all agents' spaces.
if isinstance(self.action_space, dict) or isinstance(
self.observation_space, dict
):
if not (self.action_space.keys() == self.observation_space.keys()):
raise ValueError(
"Agent ids disagree for action space and obs "
"space dict: {} {}".format(
self.action_space.keys(), self.observation_space.keys()
)
)
def run(self):
"""Override this to implement the multi-agent run loop.
Your loop should continuously:
1. Call self.start_episode(episode_id)
2. Call self.get_action(episode_id, obs_dict)
-or-
self.log_action(episode_id, obs_dict, action_dict)
3. Call self.log_returns(episode_id, reward_dict)
4. Call self.end_episode(episode_id, obs_dict)
5. Wait if nothing to do.
Multiple episodes may be started at the same time.
"""
raise NotImplementedError
@override(ExternalEnv)
def start_episode(
self, episode_id: Optional[str] = None, training_enabled: bool = True
) -> str:
if episode_id is None:
episode_id = uuid.uuid4().hex
if episode_id in self._finished:
raise ValueError("Episode {} has already completed.".format(episode_id))
if episode_id in self._episodes:
raise ValueError("Episode {} is already started".format(episode_id))
self._episodes[episode_id] = _ExternalEnvEpisode(
episode_id, self._results_avail_condition, training_enabled, multiagent=True
)
return episode_id
@override(ExternalEnv)
def get_action(
self, episode_id: str, observation_dict: MultiAgentDict
) -> MultiAgentDict:
"""Record an observation and get the on-policy action.
Thereby, observation_dict is expected to contain the observation
of all agents acting in this episode step.
Args:
episode_id: Episode id returned from start_episode().
observation_dict: Current environment observation.
Returns:
action: Action from the env action space.
"""
episode = self._get(episode_id)
return episode.wait_for_action(observation_dict)
@override(ExternalEnv)
def log_action(
self,
episode_id: str,
observation_dict: MultiAgentDict,
action_dict: MultiAgentDict,
) -> None:
"""Record an observation and (off-policy) action taken.
Args:
episode_id: Episode id returned from start_episode().
observation_dict: Current environment observation.
action_dict: Action for the observation.
"""
episode = self._get(episode_id)
episode.log_action(observation_dict, action_dict)
@override(ExternalEnv)
def log_returns(
self,
episode_id: str,
reward_dict: MultiAgentDict,
info_dict: MultiAgentDict = None,
multiagent_done_dict: MultiAgentDict = None,
) -> None:
"""Record returns from the environment.
The reward will be attributed to the previous action taken by the
episode. Rewards accumulate until the next action. If no reward is
logged before the next action, a reward of 0.0 is assumed.
Args:
episode_id: Episode id returned from start_episode().
reward_dict: Reward from the environment agents.
info_dict: Optional info dict.
multiagent_done_dict: Optional done dict for agents.
"""
episode = self._get(episode_id)
# Accumulate reward by agent.
# For existing agents, we want to add the reward up.
for agent, rew in reward_dict.items():
if agent in episode.cur_reward_dict:
episode.cur_reward_dict[agent] += rew
else:
episode.cur_reward_dict[agent] = rew
if multiagent_done_dict:
for agent, done in multiagent_done_dict.items():
episode.cur_done_dict[agent] = done
if info_dict:
episode.cur_info_dict = info_dict or {}
@override(ExternalEnv)
def end_episode(self, episode_id: str, observation_dict: MultiAgentDict) -> None:
"""Record the end of an episode.
Args:
episode_id: Episode id returned from start_episode().
observation_dict: Current environment observation.
"""
episode = self._get(episode_id)
self._finished.add(episode.episode_id)
episode.done(observation_dict)
| ExternalMultiAgentEnv |
python | spack__spack | lib/spack/spack/database.py | {
"start": 73617,
"end": 73730
} | class ____(SpackError):
"""Raised to request an explicit DB upgrade to the user"""
| ExplicitDatabaseUpgradeError |
python | pytorch__pytorch | test/mobile/lightweight_dispatch/tests_setup.py | {
"start": 3141,
"end": 3425
} | class ____(torch.nn.Module):
def forward(self, b):
a = torch.tensor(3, dtype=torch.int64)
out = torch.empty(size=[1], dtype=torch.float)
torch.div(b, a, out=out)
return [torch.div(b, a, rounding_mode="trunc"), out]
@save_model
| ModelWithStringOptional |
python | ray-project__ray | python/ray/data/_internal/logical/operators/from_operators.py | {
"start": 2744,
"end": 2834
} | class ____(AbstractFrom):
"""Logical operator for `from_blocks`."""
pass
| FromBlocks |
python | readthedocs__readthedocs.org | readthedocs/subscriptions/notifications.py | {
"start": 1373,
"end": 1732
} | class ____(SubscriptionNotificationMixin, EmailNotification):
"""
Subscription has ended.
Notify the customer that the Organization will be disabled *soon* if the
subscription is not renewed for the organization.
"""
name = "subscription_ended"
subject = "Your subscription to Read the Docs has ended"
| SubscriptionEndedNotification |
python | pytorch__pytorch | torch/__init__.py | {
"start": 70755,
"end": 70979
} | class ____(_LegacyStorage):
@classproperty
def dtype(self):
_warn_typed_storage_removal(stacklevel=3)
return self._dtype
@classproperty
def _dtype(self):
return torch.float
| FloatStorage |
python | coleifer__peewee | peewee.py | {
"start": 155924,
"end": 160013
} | class ____(ColumnBase):
_field_counter = 0
_order = 0
accessor_class = FieldAccessor
auto_increment = False
default_index_type = None
field_type = 'DEFAULT'
unpack = True
def __init__(self, null=False, index=False, unique=False, column_name=None,
default=None, primary_key=False, constraints=None,
sequence=None, collation=None, unindexed=False, choices=None,
help_text=None, verbose_name=None, index_type=None,
db_column=None, _hidden=False):
if db_column is not None:
__deprecated__('"db_column" has been deprecated in favor of '
'"column_name" for Field objects.')
column_name = db_column
self.null = null
self.index = index
self.unique = unique
self.column_name = column_name
self.default = default
self.primary_key = primary_key
self.constraints = constraints # List of column constraints.
self.sequence = sequence # Name of sequence, e.g. foo_id_seq.
self.collation = collation
self.unindexed = unindexed
self.choices = choices
self.help_text = help_text
self.verbose_name = verbose_name
self.index_type = index_type or self.default_index_type
self._hidden = _hidden
# Used internally for recovering the order in which Fields were defined
# on the Model class.
Field._field_counter += 1
self._order = Field._field_counter
self._sort_key = (self.primary_key and 1 or 2), self._order
def __hash__(self):
return hash(self.name + '.' + self.model.__name__)
def __repr__(self):
if hasattr(self, 'model') and getattr(self, 'name', None):
return '<%s: %s.%s>' % (type(self).__name__,
self.model.__name__,
self.name)
return '<%s: (unbound)>' % type(self).__name__
def bind(self, model, name, set_attribute=True):
self.model = model
self.name = self.safe_name = name
self.column_name = self.column_name or name
if set_attribute:
setattr(model, name, self.accessor_class(model, self, name))
@property
def column(self):
return Column(self.model._meta.table, self.column_name)
def adapt(self, value):
return value
def db_value(self, value):
return value if value is None else self.adapt(value)
def python_value(self, value):
return value if value is None else self.adapt(value)
def to_value(self, value):
return Value(value, self.db_value, unpack=False)
def get_sort_key(self, ctx):
return self._sort_key
def __sql__(self, ctx):
return ctx.sql(self.column)
def get_modifiers(self):
pass
def ddl_datatype(self, ctx):
if ctx and ctx.state.field_types:
column_type = ctx.state.field_types.get(self.field_type,
self.field_type)
else:
column_type = self.field_type
modifiers = self.get_modifiers()
if column_type and modifiers:
modifier_literal = ', '.join([str(m) for m in modifiers])
return SQL('%s(%s)' % (column_type, modifier_literal))
else:
return SQL(column_type)
def ddl(self, ctx):
accum = [Entity(self.column_name)]
data_type = self.ddl_datatype(ctx)
if data_type:
accum.append(data_type)
if self.unindexed:
accum.append(SQL('UNINDEXED'))
if not self.null:
accum.append(SQL('NOT NULL'))
if self.primary_key:
accum.append(SQL('PRIMARY KEY'))
if self.sequence:
accum.append(SQL("DEFAULT NEXTVAL('%s')" % self.sequence))
if self.constraints:
accum.extend(self.constraints)
if self.collation:
accum.append(SQL('COLLATE %s' % self.collation))
return NodeList(accum)
| Field |
python | ApeWorX__ape | src/ape_pm/project.py | {
"start": 4224,
"end": 13055
} | class ____(ProjectAPI):
"""
Helps Ape read configurations from foundry projects
and lessens the need of specifying ``config_override:``
for foundry-based dependencies.
"""
_github_client: _GithubClient = github_client
@property
def foundry_config_file(self) -> Path:
return self.path / "foundry.toml"
@property
def gitmodules_file(self) -> Path:
return self.path / ".gitmodules"
@property
def remapping_file(self) -> Path:
return self.path / "remapping.txt"
@property
def is_valid(self) -> bool:
return self.foundry_config_file.is_file()
def extract_config(self, **overrides) -> "ApeConfig":
ape_cfg: dict = {}
root_data = self._parse_foundry_toml()
# Handle root project configuration.
# NOTE: The default contracts folder name is `src` in foundry
# instead of `contracts`, hence the default.
contracts_folder = root_data.get("src")
if not contracts_folder:
if (self.foundry_config_file.parent / "src").is_dir():
contracts_folder = "src"
elif (self.foundry_config_file.parent / "contracts").is_dir():
contracts_folder = "contracts"
if contracts_folder:
ape_cfg["contracts_folder"] = contracts_folder
# Foundry uses git-submodules for dependencies.
if dependencies := self._parse_dependencies_from_gitmodules():
ape_cfg["dependencies"] = dependencies
lib_paths = root_data.get("libs", ("lib",))
if solidity := self._parse_solidity_config(
root_data, dependencies, lib_paths, contracts_folder=contracts_folder
):
ape_cfg["solidity"] = solidity
return ApeConfig.model_validate({**ape_cfg, **overrides})
def _parse_foundry_toml(self) -> dict:
data = tomllib.loads(self.foundry_config_file.read_text())
profile = data.get("profile", {})
return profile.get("default", {})
def _parse_dependencies_from_gitmodules(self) -> list[dict]:
if not self.gitmodules_file.is_file():
return []
dependencies: list[dict] = []
module_data = self._parse_gitmodules()
for module in module_data:
if not (url := module.get("url")):
continue
elif not url.startswith("https://github.com/"):
# Not from GitHub.
continue
github = url.replace("https://github.com/", "").replace(".git", "")
dependency = {"github": github}
version_type, version = self._parse_version_from_module(module)
dependency[version_type] = version
dependency["name"] = github.split("/")[-1].lower().replace("_", "-")
dependencies.append(dependency)
return dependencies
def _parse_gitmodules(self) -> list[dict[str, str]]:
submodules: list[dict[str, str]] = []
submodule: dict[str, str] = {}
content = Path(self.gitmodules_file).read_text()
for line in content.splitlines():
line = line.strip()
if line.startswith("[submodule"):
# Add the submodule we have been building to the list
# if it exists. This happens on submodule after the first one.
if submodule:
submodules.append(submodule)
submodule = {}
for key in ("path", "url", "release", "branch"):
if not line.startswith(f"{key} ="):
continue
submodule[key] = line.split("=")[1].strip()
break # No need to try the rest.
# Add the last submodule.
if submodule:
submodules.append(submodule)
return submodules
def _parse_version_from_module(
self, module: dict, default_version: str = "main"
) -> tuple[str, str]:
if "release" in module:
# Use GitHub version API.
return ("version", module["release"])
elif "branch" in module:
# Use clone-by-reference.
return ("ref", module["branch"])
elif "url" not in module:
return ("ref", default_version)
url = module["url"]
github = url.replace("https://github.com/", "").replace(".git", "")
gh_parts = github.split("/")
if len(gh_parts) != 2:
# Likely not possible, but just try `main`.
return ("ref", default_version)
# Use the default branch of the repo.
org_name, repo_name = github.split("/")
repo = self._github_client.get_repo(org_name, repo_name)
return ("ref", repo.get("default_branch", default_version))
def _parse_solidity_config(
self,
data: dict,
dependencies: list[dict],
lib_paths: Iterable[str],
contracts_folder: Optional[str] = None,
) -> dict:
sol_cfg: dict = {}
# Different foundry versions use a different key for the solc version.
if version := (data.get("solc") or data.get("solc_version")):
sol_cfg["version"] = version
if evm_version := data.get("evm_version"):
sol_cfg["evm_version"] = evm_version
if via_ir := data.get("via_ir"):
sol_cfg["via_ir"] = via_ir
foundry_remappings = [*data.get("remappings", []), *self._parse_remappings_file()]
if remappings := self._parse_remappings(
foundry_remappings, dependencies, lib_paths, contracts_folder=contracts_folder
):
sol_cfg["import_remapping"] = remappings
return sol_cfg
def _parse_remappings_file(self) -> list[str]:
if not self.remapping_file.is_file():
return []
return self.remapping_file.read_text(encoding="utf8").splitlines()
def _parse_remappings(
self,
foundry_remappings: list[str],
dependencies: list[dict],
lib_paths: Iterable[str],
contracts_folder: Optional[str] = None,
) -> list[str]:
ape_sol_remappings: set[str] = set()
for f_remap in foundry_remappings:
key, value = f_remap.split("=")
sep = "\\" if "\\" in value else "/"
real_key = key.rstrip(sep)
clean_key = real_key.lstrip("@")
# Check if is from one of the dependencies.
is_dep = False
repo = value
for lib_path in lib_paths:
if not value.startswith(f"{lib_path}{sep}"):
continue
# Dependency found.
is_dep = True
repo = value.replace(f"{lib_path}{sep}", "").strip(sep)
break
if not is_dep:
# Append as-is.
ape_sol_remappings.add(f_remap)
continue
# Setup remapping to a dependency in a way Ape expects.
# Also, change the name of the dependencies to be the short name
# from the remapping (also what Ape expects).
dep_found = False
for dependency in dependencies:
# NOTE: There appears to be no rhyme or reason to
# dependency short-names in foundry.
if (
not dependency["github"].endswith(clean_key)
and dependency["name"] != clean_key
and dependency["name"] != repo
):
continue
# Matching dependency found.
dependency["name"] = clean_key
version = dependency.get("version") or dependency.get("ref")
prefix = f"{sep}.cache{sep}"
if contracts_folder:
prefix = f"{contracts_folder}{prefix}"
value_without_lib_path = value
for lib_path in lib_paths:
if f"{lib_path}{sep}" not in value:
continue
value_without_lib_path = value.replace(f"{lib_path}{sep}", "")
# Sometimes, contracts-folder name is included.
suffix = ""
if f"{clean_key}{sep}" in value_without_lib_path:
suffix = value_without_lib_path.replace(f"{clean_key}{sep}", "").rstrip(sep)
new_value = f"{prefix}{clean_key}{sep}{version}{sep}{suffix}"
new_remapping = f"{real_key}={new_value}"
ape_sol_remappings.add(new_remapping)
dep_found = True
break
if not dep_found:
# Item seems like a dependency but not found in `dependencies`.
ape_sol_remappings.add(f_remap)
return sorted(list(ape_sol_remappings))
| FoundryProject |
python | PrefectHQ__prefect | src/prefect/server/database/dependencies.py | {
"start": 6329,
"end": 7493
} | class ____(Generic[P, R]):
"""Mixin class to delegate all attribute access to a wrapped function
This helps compatibility and echos what the Python method wrapper object
does, and makes subclasses transarent to many introspection techniques.
"""
__slots__ = "_func"
def __init__(self, func: Callable[P, R]) -> None:
object.__setattr__(self, "_func", func)
@property
def __wrapped__(self) -> Callable[P, R]:
"""Access the underlying wrapped function"""
return self._func
if not TYPE_CHECKING:
# Attribute hooks are guarded against typecheckers which then tend to
# mark the class as 'anything goes' otherwise.
def __getattr__(self, name: str) -> Any:
return getattr(self._func, name)
def __setattr__(self, name: str, value: Any) -> None:
setattr(self._func, name, value)
def __delattr__(self, name: str) -> None:
delattr(self._func, name)
# Descriptor object responsible for injecting the PrefectDBInterface instance.
# It has no docstring to encourage Python to find the wrapped callable docstring
# instead.
| _FuncWrapper |
python | davidhalter__parso | parso/python/tree.py | {
"start": 36712,
"end": 37226
} | class ____(Mapping):
"""
This class exists for the sole purpose of creating an immutable dict.
"""
def __init__(self, dct):
self._dict = dct
def __getitem__(self, key):
return self._dict[key]
def __len__(self):
return len(self._dict)
def __iter__(self):
return iter(self._dict)
def __hash__(self):
return id(self)
def __eq__(self, other):
# Comparing these dicts does not make sense.
return self is other
| UsedNamesMapping |
python | pyca__cryptography | tests/hazmat/primitives/test_xofhash.py | {
"start": 3695,
"end": 4620
} | class ____:
def test_shake256_variable(self, backend, subtests):
vectors = _load_all_params(
os.path.join("hashes", "SHAKE"),
["SHAKE256VariableOut.rsp"],
load_nist_vectors,
)
for vector in vectors:
with subtests.test():
output_length = int(vector["outputlen"]) // 8
msg = binascii.unhexlify(vector["msg"])
shake = hashes.SHAKE256(digest_size=output_length)
m = hashes.XOFHash(shake)
m.update(msg)
remaining = output_length
data = b""
stride = random.randint(1, 128)
while remaining > 0:
stride = min(stride, remaining)
data += m.squeeze(stride)
remaining -= stride
assert data == binascii.unhexlify(vector["output"])
| TestXOFSHAKE256 |
python | keras-team__keras | keras/src/saving/serialization_lib.py | {
"start": 1139,
"end": 1305
} | class ____:
def __init__(self, **config):
self.config = config
def serialize(self):
return serialize_keras_object(self.config)
| SerializableDict |
python | plotly__plotly.py | plotly/graph_objs/scatter/marker/colorbar/title/_font.py | {
"start": 233,
"end": 9949
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatter.marker.colorbar.title"
_path_str = "scatter.marker.colorbar.title.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets this color bar's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scatter.marker
.colorbar.title.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatter.marker.colorbar.title.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatter.marker.colorbar.title.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | coleifer__peewee | tests/fields.py | {
"start": 13479,
"end": 13744
} | class ____(TestModel):
name = CharField(primary_key=True)
m1 = ForeignKeyField(M1, deferrable='INITIALLY DEFERRED',
on_delete='CASCADE')
@skip_if(IS_MYSQL)
@skip_if(IS_CRDB, 'crdb does not support deferred foreign-key constraints')
| M2 |
python | sanic-org__sanic | sanic/cli/console.py | {
"start": 3022,
"end": 8606
} | class ____(InteractiveConsole):
def __init__(self, app: Sanic, start: Optional[Default] = None):
global repl_app
repl_app = app
locals_available = {
"app": app,
"sanic": sanic,
"do": do,
}
user_locals = {
user_local.name: user_local.var for user_local in app.repl_ctx
}
client_availability = ""
variable_descriptions = [
_variable_description(
"app", REPLContext.BUILTINS["app"], str(app)
),
_variable_description(
"sanic", REPLContext.BUILTINS["sanic"], "import sanic"
),
_variable_description(
"do", REPLContext.BUILTINS["do"], "Result(request, response)"
),
]
user_locals_descriptions = [
_variable_description(
user_local.name, user_local.desc, str(type(user_local.var))
)
for user_local in app.repl_ctx
]
if HTTPX_AVAILABLE:
locals_available["client"] = SanicClient(app)
variable_descriptions.append(
_variable_description(
"client",
REPLContext.BUILTINS["client"],
"from httpx import Client",
),
)
else:
client_availability = (
f"\n{Colors.YELLOW}The HTTP client has been disabled. "
"To enable it, install httpx:\n\t"
f"pip install httpx{Colors.END}\n"
)
super().__init__(locals={**locals_available, **user_locals})
self.compile.compiler.flags |= PyCF_ALLOW_TOP_LEVEL_AWAIT
self.loop = new_event_loop()
self._start = start
self._pause_event = threading.Event()
self._started_event = threading.Event()
self._interact_thread = threading.Thread(
target=self._console,
daemon=True,
)
self._monitor_thread = threading.Thread(
target=self._monitor,
daemon=True,
)
self._async_thread = threading.Thread(
target=self.loop.run_forever,
daemon=True,
)
self.app = app
self.resume()
self.exit_message = "Closing the REPL."
self.banner_message = "\n".join(
[
f"\n{Colors.BOLD}Welcome to the Sanic interactive console{Colors.END}", # noqa: E501
client_availability,
"The following objects are available for your convenience:", # noqa: E501
*variable_descriptions,
]
+ (
[
"\nREPL Context:",
*user_locals_descriptions,
]
if user_locals_descriptions
else []
)
+ [
"\nThe async/await keywords are available for use here.", # noqa: E501
f"To exit, press {Colors.BOLD}CTRL+C{Colors.END}, "
f"{Colors.BOLD}CTRL+D{Colors.END}, or type {Colors.BOLD}exit(){Colors.END}.\n", # noqa: E501
]
)
def pause(self):
if self.is_paused():
return
self._pause_event.clear()
def resume(self):
self._pause_event.set()
def runsource(self, source, filename="<input>", symbol="single"):
if source.strip() == "exit()":
self._shutdown()
return False
if self.is_paused():
print("Console is paused. Please wait for it to be resumed.")
return False
return super().runsource(source, filename, symbol)
def runcode(self, code):
future = concurrent.futures.Future()
async def callback():
func = FunctionType(code, self.locals)
try:
result = func()
if iscoroutine(result):
result = await result
except BaseException:
traceback.print_exc()
result = False
future.set_result(result)
self.loop.call_soon_threadsafe(self.loop.create_task, callback())
return future.result()
def is_paused(self):
return not self._pause_event.is_set()
def _console(self):
self._started_event.set()
self.interact(banner=self.banner_message, exitmsg=self.exit_message)
self._shutdown()
def _monitor(self):
if isinstance(self._start, Default):
enter = f"{Colors.BOLD + Colors.SANIC}ENTER{Colors.END}"
start = input(f"\nPress {enter} at anytime to start the REPL.\n\n")
if start:
return
try:
while True:
if not self._started_event.is_set():
self.app.manager.wait_for_ack()
self._interact_thread.start()
elif self.app.manager._all_workers_ack() and self.is_paused():
self.resume()
print(sys.ps1, end="", flush=True)
elif (
not self.app.manager._all_workers_ack()
and not self.is_paused()
):
self.pause()
time.sleep(0.1)
except (ConnectionResetError, BrokenPipeError):
pass
def _shutdown(self):
self.app.manager.monitor_publisher.send("__TERMINATE__")
def run(self):
self._monitor_thread.start()
self._async_thread.start()
| SanicREPL |
python | ansible__ansible | test/integration/targets/old_style_vars_plugins/vars_plugins/auto_enabled.py | {
"start": 86,
"end": 247
} | class ____(BaseVarsPlugin):
REQUIRES_ENABLED = False
def get_vars(self, loader, path, entities):
return {'explicitly_auto_enabled': True}
| VarsModule |
python | apache__airflow | providers/telegram/src/airflow/providers/telegram/operators/telegram.py | {
"start": 3078,
"end": 4953
} | class ____(BaseOperator):
"""
This operator allows you to send file to Telegram using Telegram Bot API.
Takes both Telegram Bot API token directly or connection that has Telegram token in password field.
If both supplied, token parameter will be given precedence.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:TelegramOperator`
:param telegram_conn_id: Telegram connection ID which its password is Telegram API token
:param token: Telegram API Token
:param chat_id: Telegram chat ID for a chat/channel/group
:param file: The path of the file or media to be sent via Telegram
:param telegram_kwargs: Extra args to be passed to telegram client
"""
template_fields: Sequence[str] = "chat_id"
ui_color = "#FFBA40"
def __init__(
self,
*,
telegram_conn_id: str = "telegram_default",
token: str | None = None,
chat_id: str | None = None,
file: str,
telegram_kwargs: dict | None = None,
**kwargs,
):
self.chat_id = chat_id
self.token = token
self.telegram_kwargs = telegram_kwargs or {}
self.file = file
if telegram_conn_id is None:
raise AirflowException("No valid Telegram connection id supplied.")
self.telegram_conn_id = telegram_conn_id
super().__init__(**kwargs)
def execute(self, context: Context) -> None:
"""Call the TelegramHook to send the provided Telegram file."""
if self.file:
self.telegram_kwargs["file"] = self.file
telegram_hook = TelegramHook(
telegram_conn_id=self.telegram_conn_id,
token=self.token,
chat_id=self.chat_id,
)
telegram_hook.send_file(**self.telegram_kwargs)
| TelegramFileOperator |
python | numba__numba | numba/cuda/cudadecl.py | {
"start": 7232,
"end": 8616
} | class ____(AbstractTemplate):
key = cuda.selp
def generic(self, args, kws):
assert not kws
test, a, b = args
# per docs
# http://docs.nvidia.com/cuda/parallel-thread-execution/index.html#comparison-and-selection-instructions-selp
supported_types = (types.float64, types.float32,
types.int16, types.uint16,
types.int32, types.uint32,
types.int64, types.uint64)
if a != b or a not in supported_types:
return
return signature(a, test, a, a)
def _genfp16_unary(l_key):
@register
class Cuda_fp16_unary(ConcreteTemplate):
key = l_key
cases = [signature(types.float16, types.float16)]
return Cuda_fp16_unary
def _genfp16_unary_operator(l_key):
@register_global(l_key)
class Cuda_fp16_unary(AbstractTemplate):
key = l_key
def generic(self, args, kws):
assert not kws
if len(args) == 1 and args[0] == types.float16:
return signature(types.float16, types.float16)
return Cuda_fp16_unary
def _genfp16_binary(l_key):
@register
class Cuda_fp16_binary(ConcreteTemplate):
key = l_key
cases = [signature(types.float16, types.float16, types.float16)]
return Cuda_fp16_binary
@register_global(float)
| Cuda_selp |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-iterable/source_iterable/streams.py | {
"start": 16839,
"end": 16932
} | class ____(IterableExportEventsStreamAdjustableRange):
data_field = "pushBounce"
| PushBounce |
python | huggingface__transformers | src/transformers/models/glm/modeling_glm.py | {
"start": 2078,
"end": 2727
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.gate_up_proj = nn.Linear(config.hidden_size, 2 * config.intermediate_size, bias=False)
self.down_proj = nn.Linear(config.intermediate_size, config.hidden_size, bias=False)
self.activation_fn = ACT2FN[config.hidden_act]
def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
up_states = self.gate_up_proj(hidden_states)
gate, up_states = up_states.chunk(2, dim=-1)
up_states = up_states * self.activation_fn(gate)
return self.down_proj(up_states)
| GlmMLP |
python | coleifer__peewee | tests/sql.py | {
"start": 52069,
"end": 54580
} | class ____(BaseTestCase):
def test_delete_query(self):
query = (User
.delete()
.where(User.c.username != 'charlie')
.limit(3))
self.assertSQL(query, (
'DELETE FROM "users" WHERE ("users"."username" != ?) LIMIT ?'),
['charlie', 3])
def test_delete_subquery(self):
count = fn.COUNT(Tweet.c.id).alias('ct')
subquery = (User
.select(User.c.id, count)
.join(Tweet, on=(Tweet.c.user_id == User.c.id))
.group_by(User.c.id)
.having(count > 100))
query = (User
.delete()
.where(User.c.id << subquery))
self.assertSQL(query, (
'DELETE FROM "users" '
'WHERE ("users"."id" IN ('
'SELECT "users"."id", COUNT("t1"."id") AS "ct" '
'FROM "users" AS "users" '
'INNER JOIN "tweets" AS "t1" ON ("t1"."user_id" = "users"."id") '
'GROUP BY "users"."id" '
'HAVING ("ct" > ?)))'), [100])
def test_delete_cte(self):
cte = (User
.select(User.c.id)
.where(User.c.admin == True)
.cte('u'))
query = (User
.delete()
.where(User.c.id << cte.select(cte.c.id))
.with_cte(cte))
self.assertSQL(query, (
'WITH "u" AS '
'(SELECT "t1"."id" FROM "users" AS "t1" WHERE ("t1"."admin" = ?)) '
'DELETE FROM "users" '
'WHERE ("users"."id" IN (SELECT "u"."id" FROM "u"))'), [True])
def test_delete_returning(self):
query = (User
.delete()
.where(User.c.id > 2)
.returning(User.c.username))
self.assertSQL(query, (
'DELETE FROM "users" '
'WHERE ("users"."id" > ?) '
'RETURNING "users"."username"'), [2])
query = query.returning(User.c.id, User.c.username, SQL('1'))
self.assertSQL(query, (
'DELETE FROM "users" '
'WHERE ("users"."id" > ?) '
'RETURNING "users"."id", "users"."username", 1'), [2])
query = query.returning(User.c.id.alias('old_id'))
self.assertSQL(query, (
'DELETE FROM "users" '
'WHERE ("users"."id" > ?) '
'RETURNING "users"."id" AS "old_id"'), [2])
Register = Table('register', ('id', 'value', 'category'))
| TestDeleteQuery |
python | pytorch__pytorch | test/test_testing.py | {
"start": 33664,
"end": 39929
} | class ____(TestCase):
def test_identifier_tensor_likes(self):
actual = torch.tensor([1, 2, 3, 4])
expected = torch.tensor([1, 2, 5, 6])
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, re.escape("Tensor-likes")):
fn()
def test_identifier_scalars(self):
actual = 3
expected = 5
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, re.escape("Scalars")):
fn()
def test_not_equal(self):
actual = torch.tensor([1, 2, 3, 4], dtype=torch.float32)
expected = torch.tensor([1, 2, 5, 6], dtype=torch.float32)
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, re.escape("not equal")):
fn(rtol=0.0, atol=0.0)
def test_not_close(self):
actual = torch.tensor([1, 2, 3, 4], dtype=torch.float32)
expected = torch.tensor([1, 2, 5, 6], dtype=torch.float32)
for fn, (rtol, atol) in itertools.product(
assert_close_with_inputs(actual, expected), ((1.3e-6, 0.0), (0.0, 1e-5), (1.3e-6, 1e-5))
):
with self.assertRaisesRegex(AssertionError, re.escape("not close")):
fn(rtol=rtol, atol=atol)
def test_mismatched_elements(self):
actual = torch.tensor([1, 2, 3, 4])
expected = torch.tensor([1, 2, 5, 6])
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, re.escape("Mismatched elements: 2 / 4 (50.0%)")):
fn()
def test_abs_diff(self):
actual = torch.tensor([[1, 2], [3, 4]])
expected = torch.tensor([[1, 2], [5, 4]])
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, re.escape("Greatest absolute difference: 2 at index (1, 0)")):
fn()
def test_small_float_dtype(self):
for dtype in [
torch.float8_e4m3fn,
torch.float8_e4m3fnuz,
torch.float8_e5m2,
torch.float8_e5m2fnuz,
torch.float8_e8m0fnu,
]:
w_vector = torch.tensor([3.14, 1.0], dtype=dtype)
x_vector = torch.tensor([1.0, 3.14], dtype=dtype)
y_vector = torch.tensor([3.14, 3.14], dtype=dtype)
z_vector = torch.tensor([1.0, 3.14], dtype=dtype)
for additional_dims in range(4):
new_shape = list(w_vector.shape) + ([1] * additional_dims)
w_tensor = w_vector.reshape(new_shape)
x_tensor = x_vector.reshape(new_shape)
y_tensor = y_vector.reshape(new_shape)
z_tensor = z_vector.reshape(new_shape)
for fn in assert_close_with_inputs(x_tensor, y_tensor):
expected_shape = (0,) + (0,) * (additional_dims)
with self.assertRaisesRegex(
AssertionError, re.escape(f"The first mismatched element is at index {expected_shape}")
):
fn()
for fn in assert_close_with_inputs(w_tensor, y_tensor):
expected_shape = (1,) + (0,) * (additional_dims)
with self.assertRaisesRegex(
AssertionError, re.escape(f"The first mismatched element is at index {expected_shape}")
):
fn()
for fn in assert_close_with_inputs(x_tensor, z_tensor):
fn()
def test_abs_diff_scalar(self):
actual = 3
expected = 5
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, re.escape("Absolute difference: 2")):
fn()
def test_rel_diff(self):
actual = torch.tensor([[1, 2], [3, 4]])
expected = torch.tensor([[1, 4], [3, 4]])
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, re.escape("Greatest relative difference: 0.5 at index (0, 1)")):
fn()
def test_rel_diff_scalar(self):
actual = 2
expected = 4
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, re.escape("Relative difference: 0.5")):
fn()
def test_zero_div_zero(self):
actual = torch.tensor([1.0, 0.0])
expected = torch.tensor([2.0, 0.0])
for fn in assert_close_with_inputs(actual, expected):
# Although it looks complicated, this regex just makes sure that the word 'nan' is not part of the error
# message. That would happen if the 0 / 0 is used for the mismatch computation although it matches.
with self.assertRaisesRegex(AssertionError, "((?!nan).)*"):
fn()
def test_rtol(self):
rtol = 1e-3
actual = torch.tensor((1, 2))
expected = torch.tensor((2, 2))
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, re.escape(f"(up to {rtol} allowed)")):
fn(rtol=rtol, atol=0.0)
def test_atol(self):
atol = 1e-3
actual = torch.tensor((1, 2))
expected = torch.tensor((2, 2))
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, re.escape(f"(up to {atol} allowed)")):
fn(rtol=0.0, atol=atol)
def test_msg_str(self):
msg = "Custom error message!"
actual = torch.tensor(1)
expected = torch.tensor(2)
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, msg):
fn(msg=msg)
def test_msg_callable(self):
msg = "Custom error message"
actual = torch.tensor(1)
expected = torch.tensor(2)
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, msg):
fn(msg=lambda _: msg)
| TestAssertCloseErrorMessage |
python | getsentry__sentry | tests/sentry/models/test_projectownership.py | {
"start": 30429,
"end": 33457
} | class ____(TestCase):
def test_no_actors(self) -> None:
assert resolve_actors([], self.project.id) == {}
def test_basic(self) -> None:
owners = [Owner("user", self.user.email), Owner("team", self.team.slug)]
assert resolve_actors(owners, self.project.id) == {
owners[0]: Actor(id=self.user.id, actor_type=ActorType.USER),
owners[1]: Actor(id=self.team.id, actor_type=ActorType.TEAM),
}
def test_teams(self) -> None:
# Normal team
owner1 = Owner("team", self.team.slug)
actor1 = Actor(id=self.team.id, actor_type=ActorType.TEAM)
# Team that doesn't exist
owner2 = Owner("team", "nope")
actor2 = None
# A team that's not ours
self.create_project(teams=[self.create_team()])
otherteam = self.create_team()
owner3 = Owner("team", otherteam.slug)
actor3 = None
assert resolve_actors([owner1, owner2, owner3], self.project.id) == {
owner1: actor1,
owner2: actor2,
owner3: actor3,
}
def test_users(self) -> None:
# Normal user
owner1 = Owner("user", self.user.email)
actor1 = Actor(id=self.user.id, actor_type=ActorType.USER)
# An extra secondary email
email1 = self.create_useremail(self.user, None, is_verified=True).email
owner2 = Owner("user", email1)
actor2 = actor1 # They map to the same user since it's just a secondary email
# Another secondary email, that isn't verified
email2 = self.create_useremail(self.user, None, is_verified=False).email
owner3 = Owner("user", email2)
# Intentionally allow unverified emails
# actor3 = None
actor3 = actor1
# An entirely unknown user
owner4 = Owner("user", "nope")
actor4 = None
# A user that doesn't belong with us
otheruser = self.create_user()
owner5 = Owner("user", otheruser.email)
actor5 = None
# Case-insensitive for user
owner6 = Owner("user", self.user.email.upper())
actor6 = actor1
assert resolve_actors(
[owner1, owner2, owner3, owner4, owner5, owner6], self.project.id
) == {
owner1: actor1,
owner2: actor2,
owner3: actor3,
owner4: actor4,
owner5: actor5,
owner6: actor6,
}
def test_with_user_avatar(self) -> None:
# Check for regressions associated with serializing to RpcUser with a
# non-null UserAvatar
user = self.create_user()
with assume_test_silo_mode_of(UserAvatar):
UserAvatar.objects.create(user=user)
org = self.create_organization(owner=user)
team = self.create_team(organization=org, members=[user])
project = self.create_project(organization=org, teams=[team])
owner = Owner("user", user.email)
resolve_actors([owner], project.id)
| ResolveActorsTestCase |
python | pytorch__pytorch | torch/_inductor/codegen/common.py | {
"start": 48012,
"end": 48526
} | class ____(DeferredLineBase):
"""A line that can be 'unwritten' by adding name to V.graph.removed_buffers"""
def __init__(self, name: str, line: str):
super().__init__(line)
self.name = name
assert not isinstance(line, DeferredLineBase)
def __call__(self) -> Optional[str]:
if not is_buffer_removed(self.name):
return self.line
return None
def _new_line(self, line: str) -> DeferredLine:
return DeferredLine(self.name, line)
| DeferredLine |
python | realpython__materials | python-all-attribute/shapes/circle.py | {
"start": 64,
"end": 206
} | class ____:
def __init__(self, radius):
self.radius = validate(radius)
def area(self):
return _pi * self.radius**2
| Circle |
python | apache__airflow | providers/common/sql/tests/unit/common/sql/operators/test_sql.py | {
"start": 39196,
"end": 43718
} | class ____:
def _construct_operator(self, sql, min_threshold, max_threshold):
dag = DAG("test_dag", schedule=None, start_date=datetime.datetime(2017, 1, 1))
return SQLThresholdCheckOperator(
task_id="test_task",
sql=sql,
min_threshold=min_threshold,
max_threshold=max_threshold,
dag=dag,
)
@mock.patch.object(SQLThresholdCheckOperator, "get_db_hook")
def test_pass_min_value_max_value(self, mock_get_db_hook):
mock_hook = mock.Mock()
mock_hook.get_first.return_value = (10,)
mock_get_db_hook.return_value = mock_hook
operator = self._construct_operator("Select avg(val) from table1 limit 1", 1, 100)
operator.execute(context=MagicMock())
@mock.patch.object(SQLThresholdCheckOperator, "get_db_hook")
def test_pass_min_value_max_value_templated(self, mock_get_db_hook):
mock_hook = mock.Mock()
mock_hook.get_first.return_value = (10,)
mock_get_db_hook.return_value = mock_hook
operator = self._construct_operator("Select avg(val) from table1 limit 1", "{{ params.min }}", 100)
operator.render_template_fields({"params": {"min": 1}})
operator.execute(context=MagicMock())
mock_hook.get_first.assert_called_once_with("Select avg(val) from table1 limit 1")
@mock.patch.object(SQLThresholdCheckOperator, "get_db_hook")
def test_fail_min_value_max_value(self, mock_get_db_hook):
mock_hook = mock.Mock()
mock_hook.get_first.return_value = (10,)
mock_get_db_hook.return_value = mock_hook
operator = self._construct_operator("Select avg(val) from table1 limit 1", 20, 100)
with pytest.raises(AirflowException, match="10.*20.0.*100.0"):
operator.execute(context=MagicMock())
@mock.patch.object(SQLThresholdCheckOperator, "get_db_hook")
def test_pass_min_sql_max_sql(self, mock_get_db_hook):
mock_hook = mock.Mock()
mock_hook.get_first.side_effect = lambda x: (int(x.split()[1]),)
mock_get_db_hook.return_value = mock_hook
operator = self._construct_operator("Select 10", "Select 1", "Select 100")
operator.execute(context=MagicMock())
@mock.patch.object(SQLThresholdCheckOperator, "get_db_hook")
def test_fail_min_sql_max_sql(self, mock_get_db_hook):
mock_hook = mock.Mock()
mock_hook.get_first.side_effect = lambda x: (int(x.split()[1]),)
mock_get_db_hook.return_value = mock_hook
operator = self._construct_operator("Select 10", "Select 20", "Select 100")
with pytest.raises(AirflowException, match="10.*20.*100"):
operator.execute(context=MagicMock())
@mock.patch.object(SQLThresholdCheckOperator, "get_db_hook")
@pytest.mark.parametrize(
("sql", "min_threshold", "max_threshold"),
(
("Select 75", 45, "Select 100"),
# check corner-case if result of query is "falsey" does not raise error
("Select 0", 0, 1),
("Select 1", 0, 1),
),
)
def test_pass_min_value_max_sql(self, mock_get_db_hook, sql, min_threshold, max_threshold):
mock_hook = mock.Mock()
mock_hook.get_first.side_effect = lambda x: (int(x.split()[1]),)
mock_get_db_hook.return_value = mock_hook
operator = self._construct_operator(sql, min_threshold, max_threshold)
operator.execute(context=MagicMock())
@mock.patch.object(SQLThresholdCheckOperator, "get_db_hook")
def test_fail_min_sql_max_value(self, mock_get_db_hook):
mock_hook = mock.Mock()
mock_hook.get_first.side_effect = lambda x: (int(x.split()[1]),)
mock_get_db_hook.return_value = mock_hook
operator = self._construct_operator("Select 155", "Select 45", 100)
with pytest.raises(AirflowException, match="155.*45.*100.0"):
operator.execute(context=MagicMock())
@mock.patch.object(SQLThresholdCheckOperator, "get_db_hook")
def test_fail_if_query_returns_no_rows(self, mock_get_db_hook):
mock_hook = mock.Mock()
mock_hook.get_first.return_value = None
mock_get_db_hook.return_value = mock_hook
sql = "Select val from table1 where val = 'val not in table'"
operator = self._construct_operator(sql, 20, 100)
with pytest.raises(AirflowException, match=f"The following query returned zero rows: {sql}"):
operator.execute(context=MagicMock())
| TestThresholdCheckOperator |
python | dask__distributed | distributed/actor.py | {
"start": 7027,
"end": 7443
} | class ____:
"""
An rpc-like object that uses the scheduler's rpc to connect to a worker
"""
def __init__(self, rpc, address):
self.rpc = rpc
self._address = address
def __getattr__(self, key):
async def func(**msg):
msg["op"] = key
result = await self.rpc.proxy(worker=self._address, msg=msg)
return result
return func
| ProxyRPC |
python | kamyu104__LeetCode-Solutions | Python/online-majority-element-in-subarray.py | {
"start": 134,
"end": 1170
} | class ____(object):
def __init__(self, arr):
"""
:type arr: List[int]
"""
Q, ERROR_RATE = 10000, 0.001
self.__K = int(Q/ERROR_RATE).bit_length() # floor(log2(Q/ERROR_RATE))+1 = 24
self.__arr = arr
self.__inv_idx = collections.defaultdict(list)
for i, x in enumerate(self.__arr):
self.__inv_idx[x].append(i)
def query(self, left, right, threshold):
"""
:type left: int
:type right: int
:type threshold: int
:rtype: int
"""
def count(inv_idx, m, left, right):
return bisect.bisect_right(inv_idx[m], right) - \
bisect.bisect_left(inv_idx[m], left)
for _ in xrange(self.__K):
m = self.__arr[random.randint(left, right)]
if count(self.__inv_idx, m, left, right) >= threshold:
return m
return -1
# Time: ctor: O(n)
# query: O(sqrt(n)*logn)
# Space: O(n)
import collections
import bisect
| MajorityChecker |
python | pytest-dev__pytest | testing/example_scripts/unittest/test_unittest_plain_async.py | {
"start": 81,
"end": 163
} | class ____(unittest.TestCase):
async def test_foo(self):
assert False
| Test |
python | facebook__pyre-check | tools/incremental_test/specification.py | {
"start": 621,
"end": 2590
} | class ____(ABC):
@abstractmethod
def activate_sandbox(self, environment: Environment) -> ContextManager[Path]:
raise NotImplementedError()
@abstractmethod
def to_json(self) -> Dict[str, Any]:
raise NotImplementedError()
@staticmethod
def from_json(input_json: Dict[str, Any]) -> "RepositoryState":
try:
kind = input_json["kind"]
if kind == "hg":
return HgRepositoryState(
repository=Path(input_json["repository"]),
commit_hash=input_json["commit_hash"],
)
elif kind == "file":
files = input_json["files"]
if not isinstance(files, dict):
raise InvalidSpecificationException(
"File repository must be specified as dicts"
)
return FileRepositoryState(files)
elif kind == "updated":
base = input_json["base"]
updates = input_json["updates"]
if not isinstance(updates, list):
raise InvalidSpecificationException(
"Updates must be specified as lists"
)
return UpdatedRepositoryState(
RepositoryState.from_json(base),
[RepositoryUpdate.from_json(update) for update in updates],
)
else:
raise InvalidSpecificationException(
"Cannot create RepositoryState due to unrecognized kind"
)
except KeyError as key:
raise InvalidSpecificationException(
f"Cannot create RespositoryState due to missing field '{key}'"
)
except TypeError as error:
raise InvalidSpecificationException(
f"Cannot create RespositoryState due to invalid path: {error}"
)
| RepositoryState |
python | getsentry__sentry | tests/snuba/api/endpoints/test_organization_group_index_stats.py | {
"start": 256,
"end": 11051
} | class ____(APITestCase, SnubaTestCase, OccurrenceTestMixin):
endpoint = "sentry-api-0-organization-group-index-stats"
def setUp(self) -> None:
super().setUp()
self.min_ago = before_now(minutes=1)
def get_response(self, *args, **kwargs):
return super().get_response(self.project.organization.slug, **kwargs)
def test_simple(self) -> None:
self.store_event(
data={"timestamp": before_now(seconds=500).isoformat(), "fingerprint": ["group-1"]},
project_id=self.project.id,
)
group_a = self.store_event(
data={"timestamp": before_now(seconds=1).isoformat(), "fingerprint": ["group-a"]},
project_id=self.project.id,
).group
self.store_event(
data={"timestamp": before_now(seconds=2).isoformat(), "fingerprint": ["group-b"]},
project_id=self.project.id,
)
group_c = self.store_event(
data={"timestamp": before_now(seconds=3).isoformat(), "fingerprint": ["group-c"]},
project_id=self.project.id,
).group
self.login_as(user=self.user)
response = self.get_response(query="is:unresolved", groups=[group_a.id, group_c.id])
response_data = sorted(response.data, key=lambda x: x["firstSeen"], reverse=True)
assert response.status_code == 200
assert len(response_data) == 2
assert int(response_data[0]["id"]) == group_a.id
assert int(response_data[1]["id"]) == group_c.id
assert "title" not in response_data[0]
assert "hasSeen" not in response_data[0]
assert "stats" in response_data[0]
assert "firstSeen" in response_data[0]
assert "lastSeen" in response_data[0]
assert "count" in response_data[0]
assert "userCount" in response_data[0]
assert "lifetime" in response_data[0]
assert "filtered" in response_data[0]
def test_unhandled(self) -> None:
self.store_event(
data={"timestamp": before_now(seconds=500).isoformat(), "fingerprint": ["group-1"]},
project_id=self.project.id,
)
group_a = self.store_event(
data={"timestamp": before_now(seconds=1).isoformat(), "fingerprint": ["group-a"]},
project_id=self.project.id,
).group
self.login_as(user=self.user)
response = self.get_response(query="is:unresolved", groups=[group_a.id])
response_data = sorted(response.data, key=lambda x: x["firstSeen"], reverse=True)
assert response.status_code == 200
assert len(response_data) == 1
assert "title" not in response_data[0]
assert "hasSeen" not in response_data[0]
assert "stats" in response_data[0]
assert "firstSeen" in response_data[0]
assert "lastSeen" in response_data[0]
assert "count" in response_data[0]
assert "userCount" in response_data[0]
assert "lifetime" in response_data[0]
assert "filtered" in response_data[0]
assert "isUnhandled" in response_data[0]
def test_issue_platform_issue(self) -> None:
event_id = uuid.uuid4().hex
_, group_info = self.process_occurrence(
event_id=event_id,
project_id=self.project.id,
type=ProfileFileIOGroupType.type_id,
event_data={
"fingerprint": ["group-1"],
"timestamp": before_now(minutes=1).isoformat(),
},
)
assert group_info is not None
profile_group = group_info.group
self.login_as(user=self.user)
response = self.get_response(
query=f"issue:{profile_group.qualified_short_id}", groups=[profile_group.id]
)
response_data = sorted(response.data, key=lambda x: x["firstSeen"], reverse=True)
assert response.status_code == 200
assert len(response_data) == 1
assert int(response_data[0]["id"]) == profile_group.id
assert "title" not in response_data[0]
assert "hasSeen" not in response_data[0]
assert "stats" in response_data[0]
assert "firstSeen" in response_data[0]
assert "lastSeen" in response_data[0]
assert "count" in response_data[0]
assert "userCount" in response_data[0]
assert "lifetime" in response_data[0]
assert "filtered" in response_data[0]
def test_issue_platform_mixed_issue_not_title(self) -> None:
event_id = uuid.uuid4().hex
_, group_info = self.process_occurrence(
event_id=event_id,
project_id=self.project.id,
type=ProfileFileIOGroupType.type_id,
event_data={
"fingerprint": ["group-a"],
"timestamp": before_now(minutes=1).isoformat(),
},
)
assert group_info is not None
profile_group = group_info.group
error_event = self.store_event(
data={"timestamp": before_now(seconds=500).isoformat(), "fingerprint": ["group-1"]},
project_id=self.project.id,
)
error_group = error_event.group
self.login_as(user=self.user)
response = self.get_response(
query=f"!title:{profile_group.title}", groups=[profile_group.id, error_group.id]
)
response_data = sorted(response.data, key=lambda x: x["firstSeen"], reverse=True)
assert response.status_code == 200
assert [int(grp["id"]) for grp in response_data] == [profile_group.id, error_group.id]
for data in response_data:
assert "title" not in data
assert "hasSeen" not in data
assert "stats" in data
assert "firstSeen" in data
assert "lastSeen" in data
assert "count" in data
assert "userCount" in data
assert "lifetime" in data
assert "filtered" in data
def test_no_matching_groups(self) -> None:
self.login_as(user=self.user)
response = self.get_response(sort_by="date", limit=10, query="is:unresolved", groups=[1337])
assert response.status_code == 400
def test_simple_with_project(self) -> None:
self.store_event(
data={"timestamp": before_now(seconds=500).isoformat(), "fingerprint": ["group-1"]},
project_id=self.project.id,
)
group_a = self.store_event(
data={"timestamp": before_now(seconds=1).isoformat(), "fingerprint": ["group-a"]},
project_id=self.project.id,
).group
self.store_event(
data={"timestamp": before_now(seconds=2).isoformat(), "fingerprint": ["group-b"]},
project_id=self.project.id,
)
group_c = self.store_event(
data={"timestamp": before_now(seconds=3).isoformat(), "fingerprint": ["group-c"]},
project_id=self.project.id,
).group
self.login_as(user=self.user)
response = self.get_response(
query=f"project:{self.project.slug}", groups=[group_a.id, group_c.id]
)
assert response.status_code == 200
assert len(response.data) == 2
def test_query_timestamp(self) -> None:
self.store_event(
data={"timestamp": before_now(seconds=500).isoformat(), "fingerprint": ["group-1"]},
project_id=self.project.id,
)
event2 = self.store_event(
data={"timestamp": before_now(seconds=1).isoformat(), "fingerprint": ["group-a"]},
project_id=self.project.id,
)
self.store_event(
data={"timestamp": before_now(seconds=2).isoformat(), "fingerprint": ["group-b"]},
project_id=self.project.id,
)
event4 = self.store_event(
data={"timestamp": before_now(seconds=3).isoformat(), "fingerprint": ["group-c"]},
project_id=self.project.id,
)
group_a = event2.group
group_c = event4.group
self.login_as(user=self.user)
response = self.get_response(
query=f"timestamp:>{before_now(seconds=3)} timestamp:<{before_now(seconds=1).isoformat()}",
groups=[group_a.id, group_c.id],
)
assert response.status_code == 200
assert len(response.data) == 2
def test_simple_flags(self) -> None:
group_a = self.store_event(
data={
"timestamp": before_now(seconds=500).isoformat(),
"fingerprint": ["group-a"],
"contexts": {"flags": {"values": [{"flag": "flag", "result": True}]}},
},
project_id=self.project.id,
).group
self.store_event(
data={"timestamp": before_now(seconds=500).isoformat(), "fingerprint": ["group-a"]},
project_id=self.project.id,
)
self.login_as(user=self.user)
response = self.get_response(query="is:unresolved flags[flag]:true", groups=[group_a.id])
response_data = sorted(response.data, key=lambda x: x["firstSeen"], reverse=True)
assert response.status_code == 200
assert len(response_data) == 1
assert int(response_data[0]["id"]) == group_a.id
assert response_data[0]["count"] == "2"
assert response_data[0]["filtered"]["count"] == "1"
assert response_data[0]["lifetime"]["count"] == "1"
def test_error_upsampling_with_allowlisted_project(self) -> None:
"""Test that count is upsampled for allowlisted projects in group index stats."""
with self.options({"issues.client_error_sampling.project_allowlist": [self.project.id]}):
project = self.project
event_data = {
"timestamp": before_now(seconds=30).isoformat(),
"message": "Error event for upsampling",
"contexts": {"error_sampling": {"client_sample_rate": 0.1}},
}
event = self.store_event(
data=event_data,
project_id=project.id,
)
group = event.group
self.login_as(user=self.user)
response = self.get_response(groups=[group.id])
assert response.status_code == 200
assert len(response.data) == 1
# Expect the count to be upsampled (1 / 0.1 = 10) - count is a string
assert response.data[0]["count"] == "10"
# Also check that lifetime stats are upsampled
assert response.data[0]["lifetime"]["count"] == "10"
# Also check that stats are upsampled, latest time bucket should contain upsampled event
assert any(
bucket[1] == 10 for bucket in response.data[0]["stats"]["24h"]
), "could not find upsampled bucket in stats"
| GroupListTest |
python | readthedocs__readthedocs.org | readthedocs/api/v3/serializers.py | {
"start": 35154,
"end": 35673
} | class ____(RedirectSerializerBase):
"""Override RedirectSerializerBase to sanitize the empty fields."""
from_url = serializers.SerializerMethodField()
to_url = serializers.SerializerMethodField()
def get_from_url(self, obj):
# Overridden only to return ``None`` when the description is ``''``
return obj.from_url or None
def get_to_url(self, obj):
# Overridden only to return ``None`` when the description is ``''``
return obj.to_url or None
| RedirectDetailSerializer |
python | sqlalchemy__sqlalchemy | test/orm/test_query.py | {
"start": 66059,
"end": 79808
} | class ____(QueryTest, AssertsCompiledSQL):
__dialect__ = "default"
def test_function_element_column_labels(self):
users = self.tables.users
sess = fixture_session()
class max_(expression.FunctionElement):
name = "max"
inherit_cache = True
@compiles(max_)
def visit_max(element, compiler, **kw):
return "max(%s)" % compiler.process(element.clauses, **kw)
q = sess.query(max_(users.c.id))
eq_(q.all(), [(10,)])
def test_truly_unlabeled_sql_expressions(self):
users = self.tables.users
sess = fixture_session()
class not_named_max(expression.ColumnElement):
name = "not_named_max"
inherit_cache = True
@compiles(not_named_max)
def visit_max(element, compiler, **kw):
return "max(id)"
# assert that there is no "AS max_" or any label of any kind.
eq_(str(select(not_named_max())), "SELECT max(id)")
# ColumnElement still handles it by applying label()
q = sess.query(not_named_max()).select_from(users)
eq_(q.all(), [(10,)])
def test_deferred_instances(self):
User, addresses, Address = (
self.classes.User,
self.tables.addresses,
self.classes.Address,
)
session = fixture_session()
s = (
session.query(User)
.filter(
and_(
addresses.c.email_address == bindparam("emailad"),
Address.user_id == User.id,
)
)
.statement
)
result = list(
session.query(User)
.params(emailad="jack@bean.com")
.from_statement(s)
)
eq_([User(id=7)], result)
def test_aliased_sql_construct(self):
User, Address = self.classes.User, self.classes.Address
j = join(User, Address)
a1 = aliased(j)
self.assert_compile(
a1.select(),
"SELECT anon_1.users_id, anon_1.users_name, anon_1.addresses_id, "
"anon_1.addresses_user_id, anon_1.addresses_email_address "
"FROM (SELECT users.id AS users_id, users.name AS users_name, "
"addresses.id AS addresses_id, addresses.user_id AS "
"addresses_user_id, addresses.email_address AS "
"addresses_email_address FROM users JOIN addresses "
"ON users.id = addresses.user_id) AS anon_1",
)
def test_aliased_sql_construct_raises_adapt_on_names(self):
User, Address = self.classes.User, self.classes.Address
j = join(User, Address)
assert_raises_message(
sa_exc.ArgumentError,
"adapt_on_names only applies to ORM elements",
aliased,
j,
adapt_on_names=True,
)
def test_scalar_subquery_compile_whereclause(self):
User = self.classes.User
Address = self.classes.Address
session = fixture_session()
q = session.query(User.id).filter(User.id == 7).scalar_subquery()
q = session.query(Address).filter(Address.user_id == q)
assert isinstance(q.whereclause.right, expression.ColumnElement)
self.assert_compile(
q,
"SELECT addresses.id AS addresses_id, addresses.user_id "
"AS addresses_user_id, addresses.email_address AS "
"addresses_email_address FROM addresses WHERE "
"addresses.user_id = (SELECT users.id "
"FROM users WHERE users.id = :id_1)",
)
def test_subquery_no_eagerloads(self):
User = self.classes.User
s = fixture_session()
self.assert_compile(
s.query(User).options(joinedload(User.addresses)).subquery(),
"SELECT users.id, users.name FROM users",
)
def test_exists_no_eagerloads(self):
User = self.classes.User
s = fixture_session()
self.assert_compile(
s.query(
s.query(User).options(joinedload(User.addresses)).exists()
),
"SELECT EXISTS (SELECT 1 FROM users) AS anon_1",
)
def test_named_subquery(self):
User = self.classes.User
session = fixture_session()
a1 = session.query(User.id).filter(User.id == 7).subquery("foo1")
a2 = session.query(User.id).filter(User.id == 7).subquery(name="foo2")
a3 = session.query(User.id).filter(User.id == 7).subquery()
eq_(a1.name, "foo1")
eq_(a2.name, "foo2")
eq_(a3.name, "%%(%d anon)s" % id(a3))
def test_labeled_subquery(self):
User = self.classes.User
session = fixture_session()
a1 = (
session.query(User.id)
.filter(User.id == 7)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.subquery()
)
assert a1.c.users_id is not None
def test_no_subquery_for_from_statement(self):
"""
found_during_typing
"""
User = self.classes.User
session = fixture_session()
q = session.query(User.id).from_statement(text("select * from user"))
with expect_raises_message(
sa.exc.InvalidRequestError,
r"Can't call this method on a Query that uses from_statement\(\)",
):
q.subquery()
def test_reduced_subquery(self):
User = self.classes.User
ua = aliased(User)
session = fixture_session()
a1 = (
session.query(User.id, ua.id, ua.name)
.filter(User.id == ua.id)
.subquery(reduce_columns=True)
)
self.assert_compile(
a1,
"SELECT users.id, users_1.name FROM "
"users, users AS users_1 "
"WHERE users.id = users_1.id",
)
def test_label(self):
User = self.classes.User
session = fixture_session()
q = session.query(User.id).filter(User.id == 7).label("foo")
self.assert_compile(
session.query(q),
"SELECT (SELECT users.id FROM users "
"WHERE users.id = :id_1) AS foo",
)
def test_scalar_subquery(self):
User = self.classes.User
session = fixture_session()
q = session.query(User.id).filter(User.id == 7).scalar_subquery()
self.assert_compile(
session.query(User).filter(User.id.in_(q)),
"SELECT users.id AS users_id, users.name "
"AS users_name FROM users WHERE users.id "
"IN (SELECT users.id FROM users WHERE "
"users.id = :id_1)",
)
def test_param_transfer(self):
User = self.classes.User
session = fixture_session()
q = (
session.query(User.id)
.filter(User.id == bindparam("foo"))
.params(foo=7)
.scalar_subquery()
)
q = session.query(User).filter(User.id.in_(q))
eq_(User(id=7), q.one())
def test_in(self):
User, Address = self.classes.User, self.classes.Address
session = fixture_session()
s = (
session.query(User.id)
.join(User.addresses)
.group_by(User.id)
.having(func.count(Address.id) > 2)
)
eq_(session.query(User).filter(User.id.in_(s)).all(), [User(id=8)])
def test_union(self):
User = self.classes.User
s = fixture_session()
q1 = s.query(User).filter(User.name == "ed")
q2 = s.query(User).filter(User.name == "fred")
eq_(
s.query(User)
.from_statement(union(q1, q2).order_by("users_name"))
.all(),
[User(name="ed"), User(name="fred")],
)
def test_select(self):
User = self.classes.User
s = fixture_session()
q1 = s.query(User).filter(User.name == "ed")
self.assert_compile(
select(q1.subquery()),
"SELECT anon_1.id, anon_1.name FROM "
"(SELECT users.id AS id, users.name AS name "
"FROM users WHERE users.name = :name_1) AS anon_1",
)
def test_join(self):
User, Address = self.classes.User, self.classes.Address
s = fixture_session()
# TODO: do we want aliased() to detect a query and convert to
# subquery() automatically ?
q1 = s.query(Address).filter(Address.email_address == "jack@bean.com")
adalias = aliased(Address, q1.subquery())
eq_(
s.query(User, adalias)
.join(adalias, User.id == adalias.user_id)
.all(),
[
(
User(id=7, name="jack"),
Address(email_address="jack@bean.com", user_id=7, id=1),
)
],
)
def test_group_by_plain(self):
User = self.classes.User
s = fixture_session()
q1 = s.query(User.id, User.name).group_by(User.name)
self.assert_compile(
select(q1.subquery()),
"SELECT anon_1.id, anon_1.name FROM (SELECT users.id AS id, "
"users.name AS name FROM users GROUP BY users.name) AS anon_1",
)
def test_group_by_append(self):
User = self.classes.User
s = fixture_session()
q1 = s.query(User.id, User.name).group_by(User.name)
# test append something to group_by
self.assert_compile(
select(q1.group_by(User.id).subquery()),
"SELECT anon_1.id, anon_1.name FROM "
"(SELECT users.id AS id, users.name AS name "
"FROM users GROUP BY users.name, users.id) AS anon_1",
)
def test_group_by_cancellation(self):
User = self.classes.User
s = fixture_session()
q1 = s.query(User.id, User.name).group_by(User.name)
# test cancellation by using None, replacement with something else
self.assert_compile(
select(q1.group_by(None).group_by(User.id).subquery()),
"SELECT anon_1.id, anon_1.name FROM "
"(SELECT users.id AS id, users.name AS name "
"FROM users GROUP BY users.id) AS anon_1",
)
# test cancellation by using None, replacement with nothing
self.assert_compile(
select(q1.group_by(None).subquery()),
"SELECT anon_1.id, anon_1.name FROM (SELECT users.id AS id, "
"users.name AS name FROM users) AS anon_1",
)
def test_group_by_cancelled_still_present(self):
User = self.classes.User
s = fixture_session()
q1 = s.query(User.id, User.name).group_by(User.name).group_by(None)
q1._no_criterion_assertion("foo")
def test_order_by_plain(self):
User = self.classes.User
s = fixture_session()
q1 = s.query(User.id, User.name).order_by(User.name)
self.assert_compile(
select(q1.subquery()),
"SELECT anon_1.id, anon_1.name FROM "
"(SELECT users.id AS id, users.name AS name "
"FROM users ORDER BY users.name) AS anon_1",
)
def test_order_by_append(self):
User = self.classes.User
s = fixture_session()
q1 = s.query(User.id, User.name).order_by(User.name)
# test append something to order_by
self.assert_compile(
select(q1.order_by(User.id).subquery()),
"SELECT anon_1.id, anon_1.name FROM "
"(SELECT users.id AS id, users.name AS name "
"FROM users ORDER BY users.name, users.id) AS anon_1",
)
def test_order_by_cancellation(self):
User = self.classes.User
s = fixture_session()
q1 = s.query(User.id, User.name).order_by(User.name)
# test cancellation by using None, replacement with something else
self.assert_compile(
select(q1.order_by(None).order_by(User.id).subquery()),
"SELECT anon_1.id, anon_1.name FROM (SELECT users.id AS id, "
"users.name AS name FROM users ORDER BY users.id) AS anon_1",
)
# test cancellation by using None, replacement with nothing
self.assert_compile(
select(q1.order_by(None).subquery()),
"SELECT anon_1.id, anon_1.name FROM (SELECT users.id AS id, "
"users.name AS name FROM users) AS anon_1",
)
def test_order_by_cancellation_false(self):
User = self.classes.User
s = fixture_session()
q1 = s.query(User.id, User.name).order_by(User.name)
# test cancellation by using None, replacement with something else
self.assert_compile(
select(q1.order_by(False).order_by(User.id).subquery()),
"SELECT anon_1.id, anon_1.name FROM (SELECT users.id AS id, "
"users.name AS name FROM users ORDER BY users.id) AS anon_1",
)
# test cancellation by using None, replacement with nothing
self.assert_compile(
select(q1.order_by(False).subquery()),
"SELECT anon_1.id, anon_1.name FROM (SELECT users.id AS id, "
"users.name AS name FROM users) AS anon_1",
)
def test_order_by_cancelled_allows_assertions(self):
User = self.classes.User
s = fixture_session()
q1 = s.query(User.id, User.name).order_by(User.name).order_by(None)
q1._no_criterion_assertion("foo")
def test_legacy_order_by_cancelled_allows_assertions(self):
User = self.classes.User
s = fixture_session()
q1 = s.query(User.id, User.name).order_by(User.name).order_by(False)
q1._no_criterion_assertion("foo")
| ExpressionTest |
python | redis__redis-py | tests/test_sentinel.py | {
"start": 1159,
"end": 11470
} | class ____:
def __init__(self, servisentinel_ce_name="mymaster", ip="127.0.0.1", port=6379):
self.clients = {}
self.master = {
"ip": ip,
"port": port,
"is_master": True,
"is_sdown": False,
"is_odown": False,
"num-other-sentinels": 0,
}
self.service_name = servisentinel_ce_name
self.slaves = []
self.nodes_down = set()
self.nodes_timeout = set()
def connection_error_if_down(self, node):
if node.id in self.nodes_down:
raise exceptions.ConnectionError
def timeout_if_down(self, node):
if node.id in self.nodes_timeout:
raise exceptions.TimeoutError
def client(self, host, port, **kwargs):
return SentinelTestClient(self, (host, port))
@pytest.fixture()
def cluster(request, master_ip):
def teardown():
redis.sentinel.Redis = saved_Redis
cluster = SentinelTestCluster(ip=master_ip)
saved_Redis = redis.sentinel.Redis
redis.sentinel.Redis = cluster.client
request.addfinalizer(teardown)
return cluster
@pytest.fixture()
def sentinel(request, cluster):
return Sentinel([("foo", 26379), ("bar", 26379)])
@pytest.fixture()
def deployed_sentinel(request):
sentinel_ips = request.config.getoption("--sentinels")
sentinel_endpoints = [
(ip.strip(), int(port.strip()))
for ip, port in (endpoint.split(":") for endpoint in sentinel_ips.split(","))
]
kwargs = {}
decode_responses = True
sentinel_kwargs = {"decode_responses": decode_responses}
force_master_ip = "localhost"
protocol = request.config.getoption("--protocol", 2)
sentinel = Sentinel(
sentinel_endpoints,
force_master_ip=force_master_ip,
sentinel_kwargs=sentinel_kwargs,
socket_timeout=0.1,
protocol=protocol,
decode_responses=decode_responses,
**kwargs,
)
yield sentinel
for s in sentinel.sentinels:
s.close()
@pytest.mark.onlynoncluster
def test_discover_master(sentinel, master_ip):
address = sentinel.discover_master("mymaster")
assert address == (master_ip, 6379)
@pytest.mark.onlynoncluster
def test_discover_master_error(sentinel):
with pytest.raises(MasterNotFoundError):
sentinel.discover_master("xxx")
@pytest.mark.onlynoncluster
def test_dead_pool(sentinel):
master = sentinel.master_for("mymaster", db=9)
conn = master.connection_pool.get_connection()
conn.disconnect()
del master
conn.connect()
@pytest.mark.onlynoncluster
def test_discover_master_sentinel_down(cluster, sentinel, master_ip):
# Put first sentinel 'foo' down
cluster.nodes_down.add(("foo", 26379))
address = sentinel.discover_master("mymaster")
assert address == (master_ip, 6379)
# 'bar' is now first sentinel
assert sentinel.sentinels[0].id == ("bar", 26379)
@pytest.mark.onlynoncluster
def test_discover_master_sentinel_timeout(cluster, sentinel, master_ip):
# Put first sentinel 'foo' down
cluster.nodes_timeout.add(("foo", 26379))
address = sentinel.discover_master("mymaster")
assert address == (master_ip, 6379)
# 'bar' is now first sentinel
assert sentinel.sentinels[0].id == ("bar", 26379)
@pytest.mark.onlynoncluster
def test_master_min_other_sentinels(cluster, master_ip):
sentinel = Sentinel([("foo", 26379)], min_other_sentinels=1)
# min_other_sentinels
with pytest.raises(MasterNotFoundError):
sentinel.discover_master("mymaster")
cluster.master["num-other-sentinels"] = 2
address = sentinel.discover_master("mymaster")
assert address == (master_ip, 6379)
@pytest.mark.onlynoncluster
def test_master_odown(cluster, sentinel):
cluster.master["is_odown"] = True
with pytest.raises(MasterNotFoundError):
sentinel.discover_master("mymaster")
@pytest.mark.onlynoncluster
def test_master_sdown(cluster, sentinel):
cluster.master["is_sdown"] = True
with pytest.raises(MasterNotFoundError):
sentinel.discover_master("mymaster")
@pytest.mark.onlynoncluster
def test_discover_slaves(cluster, sentinel):
assert sentinel.discover_slaves("mymaster") == []
cluster.slaves = [
{"ip": "slave0", "port": 1234, "is_odown": False, "is_sdown": False},
{"ip": "slave1", "port": 1234, "is_odown": False, "is_sdown": False},
]
assert sentinel.discover_slaves("mymaster") == [("slave0", 1234), ("slave1", 1234)]
# slave0 -> ODOWN
cluster.slaves[0]["is_odown"] = True
assert sentinel.discover_slaves("mymaster") == [("slave1", 1234)]
# slave1 -> SDOWN
cluster.slaves[1]["is_sdown"] = True
assert sentinel.discover_slaves("mymaster") == []
cluster.slaves[0]["is_odown"] = False
cluster.slaves[1]["is_sdown"] = False
# node0 -> DOWN
cluster.nodes_down.add(("foo", 26379))
assert sentinel.discover_slaves("mymaster") == [("slave0", 1234), ("slave1", 1234)]
cluster.nodes_down.clear()
# node0 -> TIMEOUT
cluster.nodes_timeout.add(("foo", 26379))
assert sentinel.discover_slaves("mymaster") == [("slave0", 1234), ("slave1", 1234)]
@pytest.mark.onlynoncluster
def test_master_for(sentinel, master_ip):
master = sentinel.master_for("mymaster", db=9)
assert master.ping()
assert master.connection_pool.master_address == (master_ip, 6379)
# Use internal connection check
master = sentinel.master_for("mymaster", db=9, check_connection=True)
assert master.ping()
@pytest.mark.onlynoncluster
def test_slave_for(cluster, sentinel):
cluster.slaves = [
{"ip": "127.0.0.1", "port": 6379, "is_odown": False, "is_sdown": False}
]
slave = sentinel.slave_for("mymaster", db=9)
assert slave.ping()
@pytest.mark.onlynoncluster
def test_slave_for_slave_not_found_error(cluster, sentinel):
cluster.master["is_odown"] = True
slave = sentinel.slave_for("mymaster", db=9)
with pytest.raises(SlaveNotFoundError):
slave.ping()
@pytest.mark.onlynoncluster
def test_slave_round_robin(cluster, sentinel, master_ip):
cluster.slaves = [
{"ip": "slave0", "port": 6379, "is_odown": False, "is_sdown": False},
{"ip": "slave1", "port": 6379, "is_odown": False, "is_sdown": False},
]
pool = SentinelConnectionPool("mymaster", sentinel)
rotator = pool.rotate_slaves()
assert next(rotator) in (("slave0", 6379), ("slave1", 6379))
assert next(rotator) in (("slave0", 6379), ("slave1", 6379))
# Fallback to master
assert next(rotator) == (master_ip, 6379)
with pytest.raises(SlaveNotFoundError):
next(rotator)
@pytest.mark.onlynoncluster
def test_ckquorum(sentinel):
resp = sentinel.sentinel_ckquorum("mymaster")
assert resp is True
@pytest.mark.onlynoncluster
def test_flushconfig(sentinel):
resp = sentinel.sentinel_flushconfig()
assert resp is True
@pytest.mark.onlynoncluster
def test_reset(cluster, sentinel):
cluster.master["is_odown"] = True
resp = sentinel.sentinel_reset("mymaster")
assert resp is True
@pytest.mark.onlynoncluster
@pytest.mark.parametrize("method_name", ["master_for", "slave_for"])
def test_auto_close_pool(cluster, sentinel, method_name):
"""
Check that the connection pool created by the sentinel client is
automatically closed
"""
method = getattr(sentinel, method_name)
client = method("mymaster", db=9)
pool = client.connection_pool
assert client.auto_close_connection_pool is True
calls = 0
def mock_disconnect():
nonlocal calls
calls += 1
with mock.patch.object(pool, "disconnect", mock_disconnect):
client.close()
assert calls == 1
pool.disconnect()
# Tests against real sentinel instances
@pytest.mark.onlynoncluster
def test_get_sentinels(deployed_sentinel):
resps = deployed_sentinel.sentinel_sentinels("redis-py-test", return_responses=True)
# validate that the original command response is returned
assert isinstance(resps, list)
# validate that the command has been executed against all sentinels
# each response from each sentinel is returned
assert len(resps) > 1
# validate default behavior
resps = deployed_sentinel.sentinel_sentinels("redis-py-test")
assert isinstance(resps, bool)
@pytest.mark.onlynoncluster
def test_get_master_addr_by_name(deployed_sentinel):
resps = deployed_sentinel.sentinel_get_master_addr_by_name(
"redis-py-test", return_responses=True
)
# validate that the original command response is returned
assert isinstance(resps, list)
# validate that the command has been executed just once
# when executed once, only one response element is returned
assert len(resps) == 1
assert isinstance(resps[0], tuple)
# validate default behavior
resps = deployed_sentinel.sentinel_get_master_addr_by_name("redis-py-test")
assert isinstance(resps, bool)
@pytest.mark.onlynoncluster
def test_redis_master_usage(deployed_sentinel):
r = deployed_sentinel.master_for("redis-py-test", db=0)
r.set("foo", "bar")
assert r.get("foo") == "bar"
@pytest.mark.onlynoncluster
def test_sentinel_commands_with_strict_redis_client(request):
sentinel_ips = request.config.getoption("--sentinels")
sentinel_host, sentinel_port = sentinel_ips.split(",")[0].split(":")
protocol = request.config.getoption("--protocol", 2)
client = StrictRedis(
host=sentinel_host, port=sentinel_port, decode_responses=True, protocol=protocol
)
# skipping commands that change the state of the sentinel setup
assert isinstance(client.sentinel_get_master_addr_by_name("redis-py-test"), tuple)
assert isinstance(client.sentinel_master("redis-py-test"), dict)
if is_resp2_connection(client):
assert isinstance(client.sentinel_masters(), dict)
else:
masters = client.sentinel_masters()
assert isinstance(masters, list)
for master in masters:
assert isinstance(master, dict)
assert isinstance(client.sentinel_sentinels("redis-py-test"), list)
assert isinstance(client.sentinel_slaves("redis-py-test"), list)
assert isinstance(client.sentinel_ckquorum("redis-py-test"), bool)
client.close()
| SentinelTestCluster |
python | huggingface__transformers | tests/models/flava/test_image_processing_flava.py | {
"start": 1522,
"end": 6927
} | class ____:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
do_center_crop=True,
crop_size=None,
resample=None,
do_rescale=True,
rescale_factor=1 / 255,
do_normalize=True,
image_mean=FLAVA_IMAGE_MEAN,
image_std=FLAVA_IMAGE_STD,
input_size_patches=14,
total_mask_patches=75,
mask_group_max_patches=None,
mask_group_min_patches=16,
mask_group_min_aspect_ratio=0.3,
mask_group_max_aspect_ratio=None,
codebook_do_resize=True,
codebook_size=None,
codebook_resample=None,
codebook_do_center_crop=True,
codebook_crop_size=None,
codebook_do_map_pixels=True,
codebook_do_normalize=True,
codebook_image_mean=FLAVA_CODEBOOK_MEAN,
codebook_image_std=FLAVA_CODEBOOK_STD,
):
size = size if size is not None else {"height": 224, "width": 224}
crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
codebook_size = codebook_size if codebook_size is not None else {"height": 112, "width": 112}
codebook_crop_size = codebook_crop_size if codebook_crop_size is not None else {"height": 112, "width": 112}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.do_resize = do_resize
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.size = size
self.resample = resample if resample is not None else PILImageResampling.BICUBIC
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.input_size_patches = input_size_patches
self.total_mask_patches = total_mask_patches
self.mask_group_max_patches = mask_group_max_patches
self.mask_group_min_patches = mask_group_min_patches
self.mask_group_min_aspect_ratio = mask_group_min_aspect_ratio
self.mask_group_max_aspect_ratio = mask_group_max_aspect_ratio
self.codebook_do_resize = codebook_do_resize
self.codebook_size = codebook_size
# LANCZOS resample does not support torch Tensor. Use BICUBIC as closest alternative
self.codebook_resample = codebook_resample if codebook_resample is not None else PILImageResampling.BICUBIC
self.codebook_do_center_crop = codebook_do_center_crop
self.codebook_crop_size = codebook_crop_size
self.codebook_do_map_pixels = codebook_do_map_pixels
self.codebook_do_normalize = codebook_do_normalize
self.codebook_image_mean = codebook_image_mean
self.codebook_image_std = codebook_image_std
def prepare_image_processor_dict(self):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"resample": self.resample,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"input_size_patches": self.input_size_patches,
"total_mask_patches": self.total_mask_patches,
"mask_group_max_patches": self.mask_group_max_patches,
"mask_group_min_patches": self.mask_group_min_patches,
"mask_group_min_aspect_ratio": self.mask_group_min_aspect_ratio,
"mask_group_max_aspect_ratio": self.mask_group_min_aspect_ratio,
"codebook_do_resize": self.codebook_do_resize,
"codebook_size": self.codebook_size,
"codebook_resample": self.codebook_resample,
"codebook_do_center_crop": self.codebook_do_center_crop,
"codebook_crop_size": self.codebook_crop_size,
"codebook_do_map_pixels": self.codebook_do_map_pixels,
"codebook_do_normalize": self.codebook_do_normalize,
"codebook_image_mean": self.codebook_image_mean,
"codebook_image_std": self.codebook_image_std,
}
def get_expected_image_size(self):
return (self.size["height"], self.size["width"])
def get_expected_mask_size(self):
return (
(self.input_size_patches, self.input_size_patches)
if not isinstance(self.input_size_patches, tuple)
else self.input_size_patches
)
def get_expected_codebook_image_size(self):
return (self.codebook_size["height"], self.codebook_size["width"])
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
| FlavaImageProcessingTester |
python | ApeWorX__ape | src/ape_ethereum/multicall/handlers.py | {
"start": 911,
"end": 4590
} | class ____(ManagerAccessMixin):
def __init__(
self,
address: "AddressType" = MULTICALL3_ADDRESS,
supported_chains: Optional[list[int]] = None,
) -> None:
"""
Initialize a new Multicall session object. By default, there are no calls to make.
"""
self.address = address
self.supported_chains = supported_chains or SUPPORTED_CHAINS
self.calls: list[dict] = []
@classmethod
def inject(cls) -> ModuleType:
"""
Create the multicall module contract on-chain, so we can use it.
Must use a provider that supports ``debug_setCode``.
Usage example::
from ape_ethereum import multicall
@pytest.fixture(scope="session")
def use_multicall():
# NOTE: use this fixture any test where you want to use a multicall
return multicall.BaseMulticall.inject()
"""
from ape_ethereum import multicall
provider = cls.network_manager.provider
provider.set_code(MULTICALL3_ADDRESS, MULTICALL3_CODE)
if cls.chain_manager.chain_id not in SUPPORTED_CHAINS:
SUPPORTED_CHAINS.append(cls.chain_manager.chain_id)
return multicall
@cached_property
def contract(self) -> ContractInstance:
try:
# NOTE: This will attempt to fetch the contract, such as from an explorer,
# if it is not yet cached.
contract = self.chain_manager.contracts.instance_at(self.address)
except ChainError:
# else use our backend (with less methods)
contract = self.chain_manager.contracts.instance_at(
MULTICALL3_ADDRESS,
contract_type=ContractType.model_validate(MULTICALL3_CONTRACT_TYPE),
)
if (
self.chain_manager.chain_id not in self.supported_chains
and contract.code != MULTICALL3_CODE
):
# NOTE: 2nd condition allows for use in local test deployments and fork networks
raise UnsupportedChainError()
return contract
@property
def handler(self) -> ContractTransactionHandler:
if any(call["value"] > 0 for call in self.calls):
return self.contract.aggregate3Value
return self.contract.aggregate3
def add(
self,
call: ContractMethodHandler,
*args,
allowFailure: bool = True,
value: int = 0,
) -> "BaseMulticall":
"""
Adds a call to the Multicall session object.
Raises:
:class:`~ape_ethereum.multicall.exceptions.InvalidOption`: If one
of the kwarg modifiers is not able to be used.
Args:
call (:class:`~ape_ethereum.multicall.handlers.ContractMethodHandler`):
The method to call.
*args: The arguments to invoke the method with.
allowFailure (bool): Whether the call is allowed to fail.
value (int): The amount of ether to forward with the call.
Returns:
:class:`~ape_ethereum.multicall.handlers.BaseMulticall`: returns itself
to emulate a builder pattern.
"""
# Append call dict to the list
# NOTE: Depending upon `_handler_method_abi` at time when `__call__` is triggered,
# some of these properties will be unused
self.calls.append(
{
"target": call.contract.address,
"allowFailure": allowFailure,
"value": value,
"callData": call.encode_input(*args),
}
)
return self
| BaseMulticall |
python | pytorch__pytorch | torch/distributed/fsdp/_fully_shard/_fsdp_param_group.py | {
"start": 4720,
"end": 4844
} | class ____(NamedTuple):
all_reduce_input: torch.Tensor
event: Optional[torch.Event] # all-reduce event
| AllReduceState |
python | getsentry__sentry | src/sentry/search/events/builder/discover.py | {
"start": 9270,
"end": 18678
} | class ____(TimeseriesQueryBuilder):
"""Create one of two top events queries, which is used for the Top Period &
Top Daily displays
This builder requires a Snuba response dictionary that already contains
the top events for the parameters being queried. eg.
`[{transaction: foo, count: 100}, {transaction: bar, count:50}]`
Two types of queries can be constructed through this builder:
First getting each timeseries for each top event (other=False). Which
roughly results in a query like the one below. The Groupby allow us to
get additional rows per time window for each transaction. And the Where
clause narrows the results to those in the top events:
```
SELECT
transaction, count(), time
FROM
discover
GROUP BY
transaction, time
WHERE
transaction IN ['foo', 'bar']
```
Secondly This builder can also be used for getting a single timeseries
for all events not in the top (other=True). Which is done by taking the
previous query, dropping the groupby, and negating the condition eg.
```
SELECT
count(), time
FROM
discover
GROUP BY
time
WHERE
transaction NOT IN ['foo', 'bar']
```
"""
def __init__(
self,
dataset: Dataset,
params: ParamsType,
interval: int,
top_events: list[dict[str, Any]],
snuba_params: SnubaParams | None = None,
other: bool = False,
query: str | None = None,
selected_columns: list[str] | None = None,
timeseries_columns: list[str] | None = None,
equations: list[str] | None = None,
config: QueryBuilderConfig | None = None,
limit: int | None = 10000,
):
selected_columns = [] if selected_columns is None else selected_columns
timeseries_columns = [] if timeseries_columns is None else timeseries_columns
equations = [] if equations is None else equations
timeseries_equations, timeseries_functions = categorize_columns(timeseries_columns)
super().__init__(
dataset,
params,
snuba_params=snuba_params,
interval=interval,
query=query,
selected_columns=list(set(selected_columns + timeseries_functions)),
equations=list(set(equations + timeseries_equations)),
limit=limit,
config=config,
)
self.fields: list[str] = selected_columns if selected_columns is not None else []
self.fields = [self.tag_to_prefixed_map.get(c, c) for c in selected_columns]
if (conditions := self.resolve_top_event_conditions(top_events, other)) is not None:
self.where.append(conditions)
if not other:
self.groupby.extend(
[column for column in self.columns if column not in self.aggregates]
)
@property
def translated_groupby(self) -> list[str]:
"""Get the names of the groupby columns to create the series names"""
translated = []
for groupby in self.groupby:
if groupby == self.time_column:
continue
if isinstance(groupby, (CurriedFunction, AliasedExpression)):
translated.append(groupby.alias)
else:
translated.append(groupby.name)
# sorted so the result key is consistent
return sorted(translated)
def resolve_top_event_conditions(
self, top_events: list[dict[str, Any]], other: bool
) -> WhereType | None:
"""Given a list of top events construct the conditions"""
conditions = []
for field in self.fields:
# If we have a project field, we need to limit results by project so we don't hit the result limit
if field in ["project", "project.id", "project.name"] and top_events:
# Iterate through the existing conditions to find the project one
# the project condition is a requirement of queries so there should always be one
project_condition = [
condition
for condition in self.where
if isinstance(condition, Condition)
and condition.lhs == self.column("project_id")
][0]
self.where.remove(project_condition)
if field in ["project", "project.name"]:
projects = list(
{self.params.project_slug_map[event[field]] for event in top_events}
)
else:
projects = list({event["project.id"] for event in top_events})
if other:
projects = list(set(self.params.project_ids) - set(projects))
# if there are no more projects, we search on project id 0 to guarantee no results
if not projects:
projects = [0]
self.where.append(Condition(self.column("project_id"), Op.IN, projects))
continue
resolved_field = self.resolve_column(self.prefixed_to_tag_map.get(field, field))
values: set[Any] = set()
array_values: list[Any] = []
for event in top_events:
if field in event:
alias = field
elif self.is_column_function(resolved_field) and resolved_field.alias in event:
alias = resolved_field.alias
else:
continue
# Note that because orderby shouldn't be an array field its not included in the values
event_value = event.get(alias)
if isinstance(event_value, list) and event_value not in array_values:
array_values.append(event_value)
else:
values.add(event_value)
values_list = list(values) + array_values
if values_list:
if field == "timestamp" or field.startswith("timestamp.to_"):
if not other:
# timestamp fields needs special handling, creating a big OR instead
function, operator = Or, Op.EQ
else:
# Needs to be a big AND when negated
function, operator = And, Op.NEQ
if len(values_list) > 1:
conditions.append(
function(
conditions=[
Condition(resolved_field, operator, value)
for value in sorted(values_list)
]
)
)
else:
conditions.append(Condition(resolved_field, operator, values_list[0]))
elif None in values_list:
# one of the values was null, but we can't do an in with null values, so split into two conditions
non_none_values = [value for value in values_list if value is not None]
null_condition = Condition(
Function("isNull", [resolved_field]), Op.EQ if not other else Op.NEQ, 1
)
if non_none_values:
non_none_condition = Condition(
resolved_field, Op.IN if not other else Op.NOT_IN, non_none_values
)
if not other:
conditions.append(Or(conditions=[null_condition, non_none_condition]))
else:
conditions.append(And(conditions=[null_condition, non_none_condition]))
else:
conditions.append(null_condition)
elif any(isinstance(value, list) for value in values_list):
list_conditions = []
for values in values_list:
# This depends on a weird clickhouse behaviour where the best way to compare arrays is to do
# array("foo", "bar") IN array("foo", "bar") == 1
list_conditions.append(
Condition(resolved_field, Op.IN if not other else Op.NOT_IN, values)
)
if len(list_conditions) > 1:
if not other:
conditions.append(Or(conditions=list_conditions))
else:
conditions.append(And(conditions=list_conditions))
else:
conditions.extend(list_conditions)
else:
conditions.append(
Condition(resolved_field, Op.IN if not other else Op.NOT_IN, values_list)
)
if len(conditions) > 1:
final_function = And if not other else Or
final_condition = final_function(conditions=conditions)
elif len(conditions) == 1:
final_condition = conditions[0]
else:
final_condition = None
return final_condition
| TopEventsQueryBuilder |
python | plotly__plotly.py | plotly/graph_objs/table/_legendgrouptitle.py | {
"start": 233,
"end": 2925
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "table"
_path_str = "table.legendgrouptitle"
_valid_props = {"font", "text"}
@property
def font(self):
"""
Sets this legend group's title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.table.legendgrouptitle.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.table.legendgrouptitle.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def text(self):
"""
Sets the title of the legend group.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def _prop_descriptions(self):
return """\
font
Sets this legend group's title font.
text
Sets the title of the legend group.
"""
def __init__(self, arg=None, font=None, text=None, **kwargs):
"""
Construct a new Legendgrouptitle object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.table.Legendgrouptitle`
font
Sets this legend group's title font.
text
Sets the title of the legend group.
Returns
-------
Legendgrouptitle
"""
super().__init__("legendgrouptitle")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.table.Legendgrouptitle
constructor must be a dict or
an instance of :class:`plotly.graph_objs.table.Legendgrouptitle`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("font", arg, font)
self._set_property("text", arg, text)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Legendgrouptitle |
python | kamyu104__LeetCode-Solutions | Python/distant-barcodes.py | {
"start": 849,
"end": 1369
} | class ____(object):
def rearrangeBarcodes(self, barcodes):
"""
:type barcodes: List[int]
:rtype: List[int]
"""
cnts = collections.Counter(barcodes)
sorted_cnts = [[v, k] for k, v in cnts.iteritems()]
sorted_cnts.sort(reverse=True)
i = 0
for v, k in sorted_cnts:
for _ in xrange(v):
barcodes[i] = k
i += 2
if i >= len(barcodes):
i = 1
return barcodes
| Solution2 |
python | PrefectHQ__prefect | src/prefect/futures.py | {
"start": 19736,
"end": 25670
} | class ____(NamedTuple, Generic[R]):
"""A named 2-tuple of sets.
multiple inheritance supported in 3.11+, use typing_extensions.NamedTuple
"""
done: set[PrefectFuture[R]]
not_done: set[PrefectFuture[R]]
def wait(
futures: list[PrefectFuture[R]], timeout: float | None = None
) -> DoneAndNotDoneFutures[R]:
"""
Wait for the futures in the given sequence to complete.
Args:
futures: The sequence of Futures to wait upon.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
A named 2-tuple of sets. The first set, named 'done', contains the
futures that completed (is finished or cancelled) before the wait
completed. The second set, named 'not_done', contains uncompleted
futures. Duplicate futures given to *futures* are removed and will be
returned only once.
Examples:
```python
@task
def sleep_task(seconds):
sleep(seconds)
return 42
@flow
def flow():
futures = random_task.map(range(10))
done, not_done = wait(futures, timeout=5)
print(f"Done: {len(done)}")
print(f"Not Done: {len(not_done)}")
```
"""
_futures = set(futures)
done = {f for f in _futures if f._final_state}
not_done = _futures - done
if len(done) == len(_futures):
return DoneAndNotDoneFutures(done, not_done)
# If no timeout, wait for all futures sequentially
if timeout is None:
for future in not_done.copy():
future.wait()
done.add(future)
not_done.remove(future)
return DoneAndNotDoneFutures(done, not_done)
# With timeout, monitor all futures concurrently
try:
with timeout_context(timeout):
finished_event = threading.Event()
finished_lock = threading.Lock()
finished_futures: list[PrefectFuture[R]] = []
def mark_done(future: PrefectFuture[R]):
with finished_lock:
finished_futures.append(future)
finished_event.set()
# Add callbacks to all pending futures
for future in not_done:
future.add_done_callback(mark_done)
# Wait for futures to complete within timeout
while not_done:
# Wait for at least one future to complete
finished_event.wait()
with finished_lock:
newly_done = finished_futures[:]
finished_futures.clear()
finished_event.clear()
# Move completed futures to done set
for future in newly_done:
if future in not_done:
not_done.remove(future)
done.add(future)
return DoneAndNotDoneFutures(done, not_done)
except TimeoutError:
logger.debug("Timed out waiting for all futures to complete.")
return DoneAndNotDoneFutures(done, not_done)
def resolve_futures_to_states(
expr: PrefectFuture[R] | Any,
) -> PrefectFuture[R] | Any:
"""
Given a Python built-in collection, recursively find `PrefectFutures` and build a
new collection with the same structure with futures resolved to their final states.
Resolving futures to their final states may wait for execution to complete.
Unsupported object types will be returned without modification.
"""
def _resolve_state(future: PrefectFuture[R]):
future.wait()
return future.state
return _resolve_futures(
expr,
resolve_fn=_resolve_state,
)
def resolve_futures_to_results(
expr: PrefectFuture[R] | Any,
) -> Any:
"""
Given a Python built-in collection, recursively find `PrefectFutures` and build a
new collection with the same structure with futures resolved to their final results.
Resolving futures to their final result may wait for execution to complete.
Unsupported object types will be returned without modification.
"""
def _resolve_result(future: PrefectFuture[R]) -> Any:
future.wait()
if future.state.is_completed():
return future.result()
else:
raise Exception("At least one result did not complete successfully")
return _resolve_futures(expr, resolve_fn=_resolve_result)
def _resolve_futures(
expr: PrefectFuture[R] | Any,
resolve_fn: Callable[[PrefectFuture[R]], Any],
) -> Any:
"""Helper function to resolve PrefectFutures in a collection."""
futures: set[PrefectFuture[R]] = set()
visit_collection(
expr,
visit_fn=partial(_collect_futures, futures),
return_data=False,
context={},
)
# If no futures were found, return the original expression
if not futures:
return expr
# Resolve each future using the provided resolve function
resolved_values = {future: resolve_fn(future) for future in futures}
def replace_futures(expr: Any, context: Any) -> Any:
# Expressions inside quotes should not be modified
if isinstance(context.get("annotation"), quote):
raise StopVisiting()
if isinstance(expr, PrefectFuture):
return resolved_values[expr]
else:
return expr
return visit_collection(
expr,
visit_fn=replace_futures,
return_data=True,
context={},
)
def _collect_futures(
futures: set[PrefectFuture[R]], expr: Any | PrefectFuture[R], context: Any
) -> Any | PrefectFuture[R]:
# Expressions inside quotes should not be traversed
if isinstance(context.get("annotation"), quote):
raise StopVisiting()
if isinstance(expr, PrefectFuture):
futures.add(expr)
return expr
| DoneAndNotDoneFutures |
python | kubernetes-client__python | kubernetes/client/models/v1_projected_volume_source.py | {
"start": 383,
"end": 5411
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'default_mode': 'int',
'sources': 'list[V1VolumeProjection]'
}
attribute_map = {
'default_mode': 'defaultMode',
'sources': 'sources'
}
def __init__(self, default_mode=None, sources=None, local_vars_configuration=None): # noqa: E501
"""V1ProjectedVolumeSource - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._default_mode = None
self._sources = None
self.discriminator = None
if default_mode is not None:
self.default_mode = default_mode
if sources is not None:
self.sources = sources
@property
def default_mode(self):
"""Gets the default_mode of this V1ProjectedVolumeSource. # noqa: E501
defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. # noqa: E501
:return: The default_mode of this V1ProjectedVolumeSource. # noqa: E501
:rtype: int
"""
return self._default_mode
@default_mode.setter
def default_mode(self, default_mode):
"""Sets the default_mode of this V1ProjectedVolumeSource.
defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. # noqa: E501
:param default_mode: The default_mode of this V1ProjectedVolumeSource. # noqa: E501
:type: int
"""
self._default_mode = default_mode
@property
def sources(self):
"""Gets the sources of this V1ProjectedVolumeSource. # noqa: E501
sources is the list of volume projections. Each entry in this list handles one source. # noqa: E501
:return: The sources of this V1ProjectedVolumeSource. # noqa: E501
:rtype: list[V1VolumeProjection]
"""
return self._sources
@sources.setter
def sources(self, sources):
"""Sets the sources of this V1ProjectedVolumeSource.
sources is the list of volume projections. Each entry in this list handles one source. # noqa: E501
:param sources: The sources of this V1ProjectedVolumeSource. # noqa: E501
:type: list[V1VolumeProjection]
"""
self._sources = sources
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ProjectedVolumeSource):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ProjectedVolumeSource):
return True
return self.to_dict() != other.to_dict()
| V1ProjectedVolumeSource |
python | networkx__networkx | networkx/algorithms/tests/test_voronoi.py | {
"start": 60,
"end": 3477
} | class ____:
"""Unit tests for the Voronoi cells function."""
def test_isolates(self):
"""Tests that a graph with isolated nodes has all isolates in
one block of the partition.
"""
G = nx.empty_graph(5)
cells = nx.voronoi_cells(G, {0, 2, 4})
expected = {0: {0}, 2: {2}, 4: {4}, "unreachable": {1, 3}}
assert expected == cells
def test_undirected_unweighted(self):
G = nx.cycle_graph(6)
cells = nx.voronoi_cells(G, {0, 3})
expected = {0: {0, 1, 5}, 3: {2, 3, 4}}
assert expected == cells
def test_directed_unweighted(self):
# This is the singly-linked directed cycle graph on six nodes.
G = nx.DiGraph(pairwise(range(6), cyclic=True))
cells = nx.voronoi_cells(G, {0, 3})
expected = {0: {0, 1, 2}, 3: {3, 4, 5}}
assert expected == cells
def test_directed_inward(self):
"""Tests that reversing the graph gives the "inward" Voronoi
partition.
"""
# This is the singly-linked reverse directed cycle graph on six nodes.
G = nx.DiGraph(pairwise(range(6), cyclic=True))
G = G.reverse(copy=False)
cells = nx.voronoi_cells(G, {0, 3})
expected = {0: {0, 4, 5}, 3: {1, 2, 3}}
assert expected == cells
def test_undirected_weighted(self):
edges = [(0, 1, 10), (1, 2, 1), (2, 3, 1)]
G = nx.Graph()
G.add_weighted_edges_from(edges)
cells = nx.voronoi_cells(G, {0, 3})
expected = {0: {0}, 3: {1, 2, 3}}
assert expected == cells
def test_directed_weighted(self):
edges = [(0, 1, 10), (1, 2, 1), (2, 3, 1), (3, 2, 1), (2, 1, 1)]
G = nx.DiGraph()
G.add_weighted_edges_from(edges)
cells = nx.voronoi_cells(G, {0, 3})
expected = {0: {0}, 3: {1, 2, 3}}
assert expected == cells
def test_multigraph_unweighted(self):
"""Tests that the Voronoi cells for a multigraph are the same as
for a simple graph.
"""
edges = [(0, 1), (1, 2), (2, 3)]
G = nx.MultiGraph(2 * edges)
H = nx.Graph(G)
G_cells = nx.voronoi_cells(G, {0, 3})
H_cells = nx.voronoi_cells(H, {0, 3})
assert G_cells == H_cells
def test_multidigraph_unweighted(self):
# This is the twice-singly-linked directed cycle graph on six nodes.
edges = list(pairwise(range(6), cyclic=True))
G = nx.MultiDiGraph(2 * edges)
H = nx.DiGraph(G)
G_cells = nx.voronoi_cells(G, {0, 3})
H_cells = nx.voronoi_cells(H, {0, 3})
assert G_cells == H_cells
def test_multigraph_weighted(self):
edges = [(0, 1, 10), (0, 1, 10), (1, 2, 1), (1, 2, 100), (2, 3, 1), (2, 3, 100)]
G = nx.MultiGraph()
G.add_weighted_edges_from(edges)
cells = nx.voronoi_cells(G, {0, 3})
expected = {0: {0}, 3: {1, 2, 3}}
assert expected == cells
def test_multidigraph_weighted(self):
edges = [
(0, 1, 10),
(0, 1, 10),
(1, 2, 1),
(2, 3, 1),
(3, 2, 10),
(3, 2, 1),
(2, 1, 10),
(2, 1, 1),
]
G = nx.MultiDiGraph()
G.add_weighted_edges_from(edges)
cells = nx.voronoi_cells(G, {0, 3})
expected = {0: {0}, 3: {1, 2, 3}}
assert expected == cells
| TestVoronoiCells |
python | tiangolo__fastapi | scripts/people.py | {
"start": 2015,
"end": 2093
} | class ____(BaseModel):
cursor: str
node: DiscussionsNode
| DiscussionsEdge |
python | huggingface__transformers | src/transformers/models/qwen2_vl/image_processing_qwen2_vl.py | {
"start": 1714,
"end": 3675
} | class ____(ImagesKwargs, total=False):
r"""
min_pixels (`int`, *optional*, defaults to `56 * 56`):
The min pixels of the image to resize the image.
max_pixels (`int`, *optional*, defaults to `28 * 28 * 1280`):
The max pixels of the image to resize the image.
patch_size (`int`, *optional*, defaults to 14):
The spatial patch size of the vision encoder.
temporal_patch_size (`int`, *optional*, defaults to 2):
The temporal patch size of the vision encoder.
merge_size (`int`, *optional*, defaults to 2):
The merge size of the vision encoder to llm encoder.
"""
min_pixels: int
max_pixels: int
patch_size: int
temporal_patch_size: int
merge_size: int
def smart_resize(
height: int, width: int, factor: int = 28, min_pixels: int = 56 * 56, max_pixels: int = 14 * 14 * 4 * 1280
):
"""Rescales the image so that the following conditions are met:
1. Both dimensions (height and width) are divisible by 'factor'.
2. The total number of pixels is within the range ['min_pixels', 'max_pixels'].
3. The aspect ratio of the image is maintained as closely as possible.
"""
if max(height, width) / min(height, width) > 200:
raise ValueError(
f"absolute aspect ratio must be smaller than 200, got {max(height, width) / min(height, width)}"
)
h_bar = round(height / factor) * factor
w_bar = round(width / factor) * factor
if h_bar * w_bar > max_pixels:
beta = math.sqrt((height * width) / max_pixels)
h_bar = max(factor, math.floor(height / beta / factor) * factor)
w_bar = max(factor, math.floor(width / beta / factor) * factor)
elif h_bar * w_bar < min_pixels:
beta = math.sqrt(min_pixels / (height * width))
h_bar = math.ceil(height * beta / factor) * factor
w_bar = math.ceil(width * beta / factor) * factor
return h_bar, w_bar
| Qwen2VLImageProcessorKwargs |
python | django__django | django/contrib/auth/forms.py | {
"start": 13417,
"end": 17084
} | class ____(forms.Form):
email = forms.EmailField(
label=_("Email"),
max_length=254,
widget=forms.EmailInput(attrs={"autocomplete": "email"}),
)
def send_mail(
self,
subject_template_name,
email_template_name,
context,
from_email,
to_email,
html_email_template_name=None,
):
"""
Send a django.core.mail.EmailMultiAlternatives to `to_email`.
"""
subject = loader.render_to_string(subject_template_name, context)
# Email subject *must not* contain newlines
subject = "".join(subject.splitlines())
body = loader.render_to_string(email_template_name, context)
email_message = EmailMultiAlternatives(subject, body, from_email, [to_email])
if html_email_template_name is not None:
html_email = loader.render_to_string(html_email_template_name, context)
email_message.attach_alternative(html_email, "text/html")
try:
email_message.send()
except Exception:
logger.exception(
"Failed to send password reset email to %s", context["user"].pk
)
def get_users(self, email):
"""Given an email, return matching user(s) who should receive a reset.
This allows subclasses to more easily customize the default policies
that prevent inactive users and users with unusable passwords from
resetting their password.
"""
email_field_name = UserModel.get_email_field_name()
active_users = UserModel._default_manager.filter(
**{
"%s__iexact" % email_field_name: email,
"is_active": True,
}
)
return (
u
for u in active_users
if u.has_usable_password()
and _unicode_ci_compare(email, getattr(u, email_field_name))
)
def save(
self,
domain_override=None,
subject_template_name="registration/password_reset_subject.txt",
email_template_name="registration/password_reset_email.html",
use_https=False,
token_generator=default_token_generator,
from_email=None,
request=None,
html_email_template_name=None,
extra_email_context=None,
):
"""
Generate a one-use only link for resetting password and send it to the
user.
"""
email = self.cleaned_data["email"]
if not domain_override:
current_site = get_current_site(request)
site_name = current_site.name
domain = current_site.domain
else:
site_name = domain = domain_override
email_field_name = UserModel.get_email_field_name()
for user in self.get_users(email):
user_email = getattr(user, email_field_name)
user_pk_bytes = force_bytes(UserModel._meta.pk.value_to_string(user))
context = {
"email": user_email,
"domain": domain,
"site_name": site_name,
"uid": urlsafe_base64_encode(user_pk_bytes),
"user": user,
"token": token_generator.make_token(user),
"protocol": "https" if use_https else "http",
**(extra_email_context or {}),
}
self.send_mail(
subject_template_name,
email_template_name,
context,
from_email,
user_email,
html_email_template_name=html_email_template_name,
)
| PasswordResetForm |
python | ray-project__ray | doc/source/serve/doc_code/monitoring/logging_config.py | {
"start": 1415,
"end": 1751
} | class ____:
def __call__(self):
logger = logging.getLogger("ray.serve")
logger.info("hello world")
serve.run(Model.bind())
resp = requests.get("http://localhost:8000/")
# __enable_access_log_end__
# __application_and_deployment_start__
import requests
import logging
from ray import serve
@serve.deployment
| Model |
python | aimacode__aima-python | gui/grid_mdp.py | {
"start": 19454,
"end": 21926
} | class ____(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.grid_rowconfigure(0, weight=1)
self.grid_columnconfigure(0, weight=1)
self.frame = tk.Frame(self)
self.frame.pack()
self.controller = controller
def create_buttons(self):
"""creates interactive cells to build MDP"""
_height = self.controller.shared_data['height'].get()
_width = self.controller.shared_data['width'].get()
self.controller.menu_bar.entryconfig('Edit', state=tk.NORMAL)
self.controller.menu_bar.entryconfig('Build', state=tk.NORMAL)
self.gridmdp = [[0.0] * max(1, _width) for _ in range(max(1, _height))]
self.buttons = [[None] * max(1, _width) for _ in range(max(1, _height))]
self.terminals = []
s = ttk.Style()
s.theme_use('clam')
s.configure('TButton', background=grayd, padding=0)
s.configure('wall.TButton', background=gray2, foreground=white)
s.configure('reward.TButton', background=gray9)
s.configure('+term.TButton', background=green8)
s.configure('-term.TButton', background=pblue, foreground=white)
s.configure('=term.TButton', background=green4)
for i in range(max(1, _height)):
for j in range(max(1, _width)):
self.buttons[i][j] = ttk.Button(self.frame, text=f'({_height - i - 1}, {j})',
width=int(196 / max(1, _width)),
command=partial(dialogbox, i, j, self.gridmdp, self.terminals,
self.buttons, _height))
self.buttons[i][j].grid(row=i, column=j, ipady=int(336 / max(1, _height)) - 12)
def initialize(self):
"""runs initialize_dialogbox"""
_height = self.controller.shared_data['height'].get()
_width = self.controller.shared_data['width'].get()
initialize_dialogbox(_width, _height, self.gridmdp, self.terminals, self.buttons)
def master_reset(self):
"""runs external reset"""
_height = self.controller.shared_data['height'].get()
_width = self.controller.shared_data['width'].get()
if tkinter.messagebox.askokcancel('Reset', 'Are you sure you want to reset all cells?'):
external_reset(_width, _height, self.gridmdp, self.terminals, self.buttons)
| BuildMDP |
python | getsentry__sentry | tests/sentry/issues/endpoints/test_group_current_release.py | {
"start": 468,
"end": 4551
} | class ____(APITestCase):
def _set_up_current_release(
self, group_seen_on_latest_release: bool
) -> tuple[Group, dict[str, GroupRelease]]:
clock = MockClock()
# Create several of everything, to exercise all filtering clauses.
def set_up_organization() -> tuple[Group, dict[str, GroupRelease]]:
organization = self.create_organization()
team = self.create_team(organization=organization)
self.create_team_membership(team=team, user=self.user)
environments = [
self.create_environment(name=env_name, organization=organization)
for env_name in ("production", "development")
]
def set_up_project() -> tuple[Group, dict[str, GroupRelease]]:
project = self.create_project(organization=organization, teams=[team])
for environment in environments:
environment.add_project(project)
def set_up_release() -> Release:
release = self.create_release(project=project)
for environment in environments:
ReleaseEnvironment.get_or_create(project, release, environment, clock())
return release
groups = [self.create_group(project=project) for i in range(3)]
target_group = groups[1]
early_release = set_up_release()
later_release = set_up_release()
def seen_on(
group: Group, release: Release, environment: Environment
) -> GroupRelease:
return GroupRelease.get_or_create(group, release, environment, clock())
def set_up_group_releases(environment: Environment) -> GroupRelease:
for release in (early_release, later_release):
for group in groups:
if group != target_group:
seen_on(group, release, environment)
latest_seen = seen_on(target_group, early_release, environment)
if group_seen_on_latest_release:
latest_seen = seen_on(target_group, later_release, environment)
return latest_seen
target_releases = {env.name: set_up_group_releases(env) for env in environments}
return target_group, target_releases
set_up_project()
target_group, target_releases = set_up_project()
set_up_project()
return target_group, target_releases
set_up_organization()
target_group, target_releases = set_up_organization()
set_up_organization()
return target_group, target_releases
def _test_current_release(
self, group_seen_on_latest_release: bool, environments_to_query: list[str]
) -> tuple[dict[str, object] | None, dict[str, GroupRelease]]:
target_group, target_releases = self._set_up_current_release(group_seen_on_latest_release)
self.login_as(user=self.user)
url = f"/api/0/issues/{target_group.id}/current-release/"
response = self.client.get(url, {"environment": environments_to_query}, format="json")
assert response.status_code == 200
return response.data["currentRelease"], target_releases
def test_current_release_has_group_on_one_env(self) -> None:
current_release, target_releases = self._test_current_release(True, ["production"])
prod_release = target_releases["production"]
assert current_release is not None
assert current_release["firstSeen"] == prod_release.first_seen
assert current_release["lastSeen"] == prod_release.last_seen
def test_current_release_is_later(self) -> None:
for envs in [[], ["production"], ["development"], ["production", "development"]]:
current_release, target_releases = self._test_current_release(False, envs)
assert current_release is None
| GroupCurrentReleaseTest |
python | catalyst-team__catalyst | catalyst/runners/runner.py | {
"start": 1200,
"end": 18835
} | class ____(IRunner):
"""Single-stage deep learning Runner with user-friendly API.
Runner supports the logic for deep learning pipeline configuration
with pure python code.
Please check the examples for intuition.
Args:
*args: `IRunner` args (model, engine)
**kwargs: `IRunner` kwargs (model, engine)
.. note::
IRunner supports only base user-friendly callbacks, like
TqdmCallback, TimerCallback, CheckRunCallback, BatchOverfitCallback,
and CheckpointCallback.
It does not automatically add Criterion, Optimizer or Scheduler callbacks.
That means, that you have do optimization step by yourself during
``handle_batch`` method
or specify the required callbacks in ``.train`` or ``get_callbacks`` methods.
For more easy-to-go supervised use case please follow
:py:mod:`catalyst.runners.runner.SupervisedRunner`.
.. note::
Please follow the `minimal examples`_ sections for use cases.
.. _`minimal examples`: https://github.com/catalyst-team/catalyst#minimal-examples # noqa: E501, W505
Examples:
.. code-block:: python
import os
from torch import nn, optim
from torch.nn import functional as F
from torch.utils.data import DataLoader
from catalyst import dl, metrics
from catalyst.contrib.datasets import MNIST
model = nn.Sequential(nn.Flatten(), nn.Linear(28 * 28, 10))
optimizer = optim.Adam(model.parameters(), lr=0.02)
loaders = {
"train": DataLoader(MNIST(os.getcwd(), train=True), batch_size=32),
"valid": DataLoader(MNIST(os.getcwd(), train=False), batch_size=32),
}
class CustomRunner(dl.Runner):
def predict_batch(self, batch):
# model inference step
return self.model(batch[0].to(self.device))
def on_loader_start(self, runner):
super().on_loader_start(runner)
self.meters = {
key: metrics.AdditiveMetric(compute_on_call=False)
for key in ["loss", "accuracy01", "accuracy03"]
}
def handle_batch(self, batch):
# model train/valid step
# unpack the batch
x, y = batch
# run model forward pass
logits = self.model(x)
# compute the loss
loss = F.cross_entropy(logits, y)
# compute other metrics of interest
accuracy01, accuracy03 = metrics.accuracy(logits, y, topk=(1, 3))
# log metrics
self.batch_metrics.update(
{"loss": loss, "accuracy01": accuracy01, "accuracy03": accuracy03}
)
for key in ["loss", "accuracy01", "accuracy03"]:
self.meters[key].update(
self.batch_metrics[key].item(), self.batch_size
)
# run model backward pass
if self.is_train_loader:
self.engine.backward(loss)
self.optimizer.step()
self.optimizer.zero_grad()
def on_loader_end(self, runner):
for key in ["loss", "accuracy01", "accuracy03"]:
self.loader_metrics[key] = self.meters[key].compute()[0]
super().on_loader_end(runner)
runner = CustomRunner()
# model training
runner.train(
model=model,
optimizer=optimizer,
loaders=loaders,
logdir="./logs",
num_epochs=5,
verbose=True,
valid_loader="valid",
valid_metric="loss",
minimize_valid_metric=True,
)
# model inference
for logits in runner.predict_loader(loader=loaders["valid"]):
assert logits.detach().cpu().numpy().shape[-1] == 10
"""
def __init__(self, *args, **kwargs):
"""Init."""
super().__init__(*args, **kwargs)
# extra
self._seed = 42
self._hparams: Dict = None
self._num_epochs: int = 1
# model selection
self._logdir = None
self._valid_loader = None
self._valid_metric = None
self._minimize_valid_metric = None
# extras
self._resume: str = None
self._verbose = False
self._timeit = False
self._check = False
self._overfit = False
self._profile = False
self._load_best_on_end = False
@property
def seed(self) -> int:
"""Experiment's initial seed value."""
return self._seed
@property
def hparams(self) -> Dict:
"""Returns hyperparameters."""
return self._hparams or {}
@property
def num_epochs(self) -> int:
"""Returns the number of epochs in the experiment."""
return self._num_epochs
def get_engine(self) -> Engine:
"""Returns the engine for the experiment."""
return self._engine
def get_loggers(self) -> Dict[str, ILogger]:
"""Returns the loggers for the experiment."""
loggers = self._loggers or {}
logger_exists = lambda logger_fn: any(
isinstance(x, logger_fn) for x in loggers.values()
)
if not logger_exists(ConsoleLogger):
loggers["_console"] = ConsoleLogger()
if self._logdir is not None and not logger_exists(CSVLogger):
# @TODO: remove postfix
loggers["_csv"] = CSVLogger(logdir=self._logdir, use_logdir_postfix=True)
if self._logdir is not None and not logger_exists(TensorboardLogger):
# @TODO: remove postfix
loggers["_tensorboard"] = TensorboardLogger(
logdir=self._logdir, use_logdir_postfix=True
)
return loggers
def get_loaders(self) -> "OrderedDict[str, DataLoader]":
"""Returns the loaders for the experiment."""
return self._loaders
def get_model(self) -> RunnerModel:
"""Returns the model for the experiment."""
return self._model
def get_criterion(self) -> Optional[RunnerCriterion]:
"""Returns the criterion for the experiment."""
return self._criterion
def get_optimizer(self, model: RunnerModel) -> Optional[RunnerOptimizer]:
"""Returns the optimizer for the experiment."""
return self._optimizer
def get_scheduler(self, optimizer: RunnerOptimizer) -> Optional[RunnerScheduler]:
"""Returns the scheduler for the experiment."""
return self._scheduler
def get_callbacks(self) -> "OrderedDict[str, Callback]":
"""Returns the callbacks for the experiment."""
callbacks = sort_callbacks_by_order(self._callbacks)
callback_exists = lambda callback_fn: any(
callback_isinstance(x, callback_fn) for x in callbacks.values()
)
if self._verbose and not callback_exists(TqdmCallback):
callbacks["_verbose"] = TqdmCallback()
if self._timeit and not callback_exists(TimerCallback):
callbacks["_timer"] = TimerCallback()
if self._check and not callback_exists(CheckRunCallback):
callbacks["_check"] = CheckRunCallback()
if self._overfit and not callback_exists(BatchOverfitCallback):
callbacks["_overfit"] = BatchOverfitCallback()
if self._profile and not callback_exists(ProfilerCallback):
callbacks["_profile"] = ProfilerCallback(
tensorboard_path=os.path.join(self._logdir, "tb_profile"),
profiler_kwargs={
"activities": [
torch.profiler.ProfilerActivity.CPU,
torch.profiler.ProfilerActivity.CUDA,
],
"on_trace_ready": torch.profiler.tensorboard_trace_handler(
os.path.join(self._logdir, "tb_profile")
),
"with_stack": True,
"with_flops": True,
},
)
if self._logdir is not None and not callback_exists(ICheckpointCallback):
callbacks["_checkpoint"] = CheckpointCallback(
logdir=os.path.join(self._logdir, "checkpoints"),
loader_key=self._valid_loader,
metric_key=self._valid_metric,
minimize=self._minimize_valid_metric,
load_best_on_end=self._load_best_on_end,
)
return callbacks
def train(
self,
*,
# the data
loaders: "OrderedDict[str, DataLoader]",
# the core
model: TorchModel = None,
engine: Union["Engine", str] = None,
# the components
criterion: TorchCriterion = None,
optimizer: TorchOptimizer = None,
scheduler: TorchScheduler = None,
# the callbacks
callbacks: "Union[List[Callback], OrderedDict[str, Callback]]" = None,
# the loggers
loggers: "Dict[str, ILogger]" = None,
# experiment info
seed: int = 42,
hparams: Dict[str, Any] = None,
num_epochs: int = 1,
# extra info (callbacks info)
logdir: str = None,
resume: str = None,
valid_loader: str = None,
valid_metric: str = None,
minimize_valid_metric: bool = None,
verbose: bool = False,
timeit: bool = False,
check: bool = False,
overfit: bool = False,
profile: bool = False,
load_best_on_end: bool = False,
# engine extra params,
cpu: bool = False,
fp16: bool = False,
ddp: bool = False,
) -> None:
"""
Starts the training of the model.
Args:
loaders: dictionary with one or several ``torch.utils.data.DataLoader``
for training, validation or inference
model: model to train
engine: engine to use for model training
criterion: criterion function for training
optimizer: optimizer for training
scheduler: scheduler for training
callbacks: list or dictionary with Catalyst callbacks
loggers: dictionary with Catalyst loggers
seed: experiment's initial seed value
hparams: hyperparameters for the run
num_epochs: number of training epochs
logdir: path to output directory
resume: path to checkpoint for model
valid_loader: loader name used to calculate
the metrics and save the checkpoints. For example,
you can pass `train` and then
the metrics will be taken from `train` loader.
valid_metric: the key to the name of the metric
by which the checkpoints will be selected.
minimize_valid_metric: flag to indicate whether
the ``valid_metric`` should be minimized or not (default: True).
verbose: if `True`, it displays the status of the training to the console.
timeit: if True, computes the execution time
of training process and displays it to the console.
check: if True, then only checks that pipeline is working
(3 epochs only with 3 batches per loader)
overfit: if True, then takes only one batch per loader
for model overfitting, for advance usage please check
``BatchOverfitCallback``
profile: if True, then uses ProfilerCallback, for advance usage please check
``ProfilerCallback``
load_best_on_end: if True, Runner will load
best checkpoint state (model, optimizer, etc)
according to validation metrics. Requires specified ``logdir``.
cpu: boolean flag to force CPU usage
fp16: boolean flag to use half-precision
ddp: if `True` will start training in distributed mode.
Note: Works only with python scripts. No jupyter support.
.. note::
Please follow the `minimal examples`_ sections for use cases.
.. _`minimal examples`: https://github.com/catalyst-team/catalyst#minimal-examples # noqa: E501, W505
"""
# experiment setup
self._engine = (
engine or self.engine or get_available_engine(cpu=cpu, fp16=fp16, ddp=ddp)
)
# self._trial = trial
self._loggers = loggers
# the data
self._loaders = loaders
# the components
self._model = model or self.model
self._criterion = criterion
self._optimizer = optimizer
self._scheduler = scheduler
# the callbacks
self._callbacks = callbacks
# extra
self._seed = seed
self._hparams = hparams
self._num_epochs = num_epochs
self._logdir = logdir
self._resume = resume
self._valid_loader = valid_loader
self._valid_metric = valid_metric
self._minimize_valid_metric = minimize_valid_metric
self._verbose = verbose
self._timeit = timeit
self._check = check
self._overfit = overfit
self._profile = profile
self._load_best_on_end = load_best_on_end
# run
self.run()
@torch.no_grad()
def predict_batch(self, batch: Mapping[str, Any], **kwargs) -> Mapping[str, Any]:
"""
Run model inference on specified data batch.
Args:
batch: dictionary with data batches from DataLoader.
**kwargs: additional kwargs to pass to the model
Returns: # noqa: DAR202
Mapping: model output dictionary
Raises:
NotImplementedError: if not implemented yet
"""
raise NotImplementedError("Please implement `runner.predict_batch` method")
@torch.no_grad()
def predict_loader(
self,
*,
loader: DataLoader,
model: TorchModel = None,
engine: Union["Engine", str] = None,
seed: int = 42,
# extra info
resume: str = None,
# engine extra params,
cpu: bool = False,
fp16: bool = False,
) -> Generator:
"""
Runs model inference on PyTorch DataLoader and returns
python generator with model predictions from `runner.predict_batch`.
Args:
loader: loader to predict
model: model to use for prediction
engine: engine to use for prediction
seed: random seed to use before prediction
resume: path to checkpoint for model
cpu: boolean flag to force CPU usage
fp16: boolean flag to use half-precision
Yields:
bathes with model predictions
.. note::
Please follow the `minimal examples`_ sections for use cases.
.. _`minimal examples`: https://github.com/catalyst-team/catalyst#minimal-examples # noqa: E501, W505
"""
self.engine = engine or get_available_engine(cpu=cpu, fp16=fp16)
if model is not None:
self.model = model
assert self.model is not None
if resume is not None:
self.engine.wait_for_everyone()
unwrapped_model = self.engine.unwrap_model(self.model)
unwrapped_model.load_state_dict(load_checkpoint(resume))
self.model = self.engine.prepare(self.model)
maybe_recursive_call(self.model, "train", mode=False)
loader = self.engine.prepare(loader)
set_global_seed(seed)
for batch in loader:
yield self.predict_batch(batch)
def evaluate_loader(
self,
loader: DataLoader,
callbacks: "Union[List[Callback], OrderedDict[str, Callback]]" = None,
model: Optional[TorchModel] = None,
engine: Union["Engine", str] = None,
seed: int = 42,
verbose: bool = False,
) -> Dict[str, Any]:
"""
Evaluates dataloader with given model and returns obtained metrics.
Args:
loader: loader to predict
callbacks: list or dictionary with catalyst callbacks
model: model, compatible with current runner.
If `None` simply takes current model from runner.
engine: engine to use for model evaluation
seed: random seed to use before prediction
verbose: if `True`, it displays the status of the evaluation to the console.
Returns:
Dict with metrics counted on the loader.
Raises:
IRunnerError: if ``CheckpointCallback`` found in the callbacks
"""
callbacks = sort_callbacks_by_order(callbacks)
for callback in callbacks.values():
if callback_isinstance(callback, ICheckpointCallback):
raise IRunnerError(
"CheckpointCallback isn`t allowed for evaluation loader method"
)
if engine is None:
engine = self.engine
if model is None:
model = self.model
assert model is not None
self.train(
model=model,
engine=engine,
loaders=OrderedDict([("valid", loader)]),
num_epochs=1,
verbose=verbose,
callbacks=callbacks,
valid_loader="valid",
seed=seed,
)
return self.loader_metrics
__all__ = ["Runner"]
| Runner |
python | keras-team__keras | keras/src/ops/core.py | {
"start": 32260,
"end": 34914
} | class ____(Operation):
def __init__(self, dtype=None, sparse=None, ragged=None, *, name=None):
super().__init__(name=name)
self.dtype = None if dtype is None else backend.standardize_dtype(dtype)
self.sparse = sparse
self.ragged = ragged
def call(self, x):
return backend.core.convert_to_tensor(
x, dtype=self.dtype, sparse=self.sparse, ragged=self.ragged
)
def compute_output_spec(self, x):
dtype = (
backend.standardize_dtype(x.dtype)
if self.dtype is None
else self.dtype
)
sparse = (
False if self.sparse is not None and not self.sparse else x.sparse
)
ragged = (
False if self.ragged is not None and not self.ragged else x.ragged
)
return backend.KerasTensor(
shape=x.shape, dtype=dtype, sparse=sparse, ragged=ragged
)
@keras_export("keras.ops.convert_to_tensor")
def convert_to_tensor(x, dtype=None, sparse=None, ragged=None):
"""Convert a NumPy array or Python array to a tensor.
Native tensors for the current backend or left unchanged unless the `dtype`,
`sparse` or `ragged` arguments are set.
Args:
x: A NumPy array, Python array (can be nested) or a backend tensor.
dtype: The target type. If `None`, the type of `x` is used.
sparse: Whether to keep sparse tensors. `False` will cause sparse
tensors to be densified. The default value of `None` means that
sparse tensors are kept only if the backend supports them.
ragged: Whether to keep ragged tensors. `False` will cause ragged
tensors to be densified. The default value of `None` means that
ragged tensors are kept only if the backend supports them.
Returns:
A backend tensor of the specified `dtype` and sparseness.
Example:
>>> x = np.array([1, 2, 3])
>>> y = keras.ops.convert_to_tensor(x)
"""
if any_symbolic_tensors((x,)):
return ConvertToTensor(dtype=dtype, sparse=sparse, ragged=ragged)(x)
return backend.core.convert_to_tensor(
x, dtype=dtype, sparse=sparse, ragged=ragged
)
@keras_export("keras.ops.convert_to_numpy")
def convert_to_numpy(x):
"""Convert a tensor to a NumPy array.
Args:
x: A tensor.
Returns:
A NumPy array.
"""
if any_symbolic_tensors((x,)):
# This will raise a `ValueError` defined in the `KerasTensor` class.
# We trigger it rather than duplicate it here.
return np.array(x)
return backend.convert_to_numpy(x)
| ConvertToTensor |
python | coleifer__peewee | playhouse/dataset.py | {
"start": 12097,
"end": 12306
} | class ____(CSVExporter):
def export(self, file_obj, header=True, **kwargs):
kwargs.setdefault('delimiter', '\t')
return super(TSVExporter, self).export(file_obj, header, **kwargs)
| TSVExporter |
python | coleifer__peewee | tests/models.py | {
"start": 166555,
"end": 168532
} | class ____(BaseTestCase):
def test_model_reprs(self):
class User(Model):
username = TextField(primary_key=True)
class Tweet(Model):
user = ForeignKeyField(User, backref='tweets')
content = TextField()
timestamp = TimestampField()
class EAV(Model):
entity = TextField()
attribute = TextField()
value = TextField()
class Meta:
primary_key = CompositeKey('entity', 'attribute')
class NoPK(Model):
key = TextField()
class Meta:
primary_key = False
self.assertEqual(repr(User), '<Model: User>')
self.assertEqual(repr(Tweet), '<Model: Tweet>')
self.assertEqual(repr(EAV), '<Model: EAV>')
self.assertEqual(repr(NoPK), '<Model: NoPK>')
self.assertEqual(repr(User()), '<User: None>')
self.assertEqual(repr(Tweet()), '<Tweet: None>')
self.assertEqual(repr(EAV()), '<EAV: (None, None)>')
self.assertEqual(repr(NoPK()), '<NoPK: n/a>')
self.assertEqual(repr(User(username='huey')), '<User: huey>')
self.assertEqual(repr(Tweet(id=1337)), '<Tweet: 1337>')
self.assertEqual(repr(EAV(entity='e', attribute='a')),
"<EAV: ('e', 'a')>")
self.assertEqual(repr(NoPK(key='k')), '<NoPK: n/a>')
self.assertEqual(repr(User.username), '<TextField: User.username>')
self.assertEqual(repr(Tweet.user), '<ForeignKeyField: Tweet.user>')
self.assertEqual(repr(EAV.entity), '<TextField: EAV.entity>')
self.assertEqual(repr(TextField()), '<TextField: (unbound)>')
def test_model_str_method(self):
class User(Model):
username = TextField(primary_key=True)
def __str__(self):
return self.username.title()
u = User(username='charlie')
self.assertEqual(repr(u), '<User: Charlie>')
| TestModelFieldReprs |
python | conda__conda | conda/core/path_actions.py | {
"start": 5558,
"end": 5701
} | class ____(Action, metaclass=ABCMeta):
@abstractproperty
def target_full_paths(self):
raise NotImplementedError()
| MultiPathAction |
python | numba__numba | numba/tests/test_python_int.py | {
"start": 187,
"end": 1690
} | class ____(unittest.TestCase):
# Issue #474: ints should be returned rather than longs under Python 2,
# as much as possible.
def test_int_return_type(self, flags=force_pyobj_flags,
int_type=types.int64, operands=(3, 4)):
pyfunc = return_int
cfunc = jit((int_type, int_type), **flags)(pyfunc)
expected = pyfunc(*operands)
got = cfunc(*operands)
self.assertIs(type(got), type(expected))
self.assertEqual(got, expected)
def test_int_return_type_npm(self):
self.test_int_return_type(flags=no_pyobj_flags)
def test_unsigned_int_return_type(self, flags=force_pyobj_flags):
self.test_int_return_type(int_type=types.uint64, flags=flags)
def test_unsigned_int_return_type_npm(self):
self.test_unsigned_int_return_type(flags=no_pyobj_flags)
def test_long_int_return_type(self, flags=force_pyobj_flags):
# Same but returning a 64-bit integer. The return type should be
# `int` on 64-bit builds, `long` on 32-bit ones (or Windows).
self.test_int_return_type(flags=flags, operands=(2**33, 2**40))
def test_long_int_return_type_npm(self):
self.test_long_int_return_type(flags=no_pyobj_flags)
def test_longer_int_return_type(self, flags=force_pyobj_flags):
# This won't be supported in nopython mode.
self.test_int_return_type(flags=flags, operands=(2**70, 2**75))
if __name__ == '__main__':
unittest.main()
| TestPythonInt |
python | huggingface__transformers | src/transformers/models/groupvit/modeling_groupvit.py | {
"start": 32793,
"end": 35087
} | class ____(nn.Module):
def __init__(self, config: GroupViTVisionConfig) -> None:
super().__init__()
self.config = config
self.stages = nn.ModuleList(
[
GroupViTStage(
config=config,
depth=config.depths[i],
num_group_token=config.num_group_tokens[i],
num_output_group=config.num_output_groups[i],
num_prev_group_token=config.num_output_groups[i - 1] if i > 0 else 0,
)
for i in range(len(config.depths))
]
)
self.gradient_checkpointing = False
def forward(
self,
hidden_states: torch.Tensor,
output_hidden_states: Optional[bool] = None,
output_attentions: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
all_hidden_states = () if output_hidden_states else None
all_groupings = () if output_attentions else None
group_tokens = None
for i, stage in enumerate(self.stages):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = stage(hidden_states, group_tokens, output_attentions)
hidden_states = layer_outputs[0]
group_tokens = layer_outputs[1]
if output_attentions and layer_outputs[2] is not None:
all_groupings = all_groupings + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_groupings] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_groupings
)
| GroupViTVisionEncoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.