language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | django__django | tests/forms_tests/tests/tests.py | {
"start": 2230,
"end": 9344
} | class ____(TestCase):
def test_no_empty_option(self):
"""
If a model's ForeignKey has blank=False and a default, no empty option
is created.
"""
option = ChoiceOptionModel.objects.create(name="default")
choices = list(ChoiceFieldForm().fields["choice"].choices)
self.assertEqual(len(choices), 1)
self.assertEqual(choices[0], (option.pk, str(option)))
def test_callable_initial_value(self):
"""
The initial value for a callable default returning a queryset is the
pk.
"""
ChoiceOptionModel.objects.create(id=1, name="default")
ChoiceOptionModel.objects.create(id=2, name="option 2")
ChoiceOptionModel.objects.create(id=3, name="option 3")
self.assertHTMLEqual(
ChoiceFieldForm().as_p(),
"""
<p><label for="id_choice">Choice:</label>
<select name="choice" id="id_choice">
<option value="1" selected>ChoiceOption 1</option>
<option value="2">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select>
<input type="hidden" name="initial-choice" value="1" id="initial-id_choice">
</p>
<p><label for="id_choice_int">Choice int:</label>
<select name="choice_int" id="id_choice_int">
<option value="1" selected>ChoiceOption 1</option>
<option value="2">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select>
<input type="hidden" name="initial-choice_int" value="1"
id="initial-id_choice_int">
</p>
<p><label for="id_multi_choice">Multi choice:</label>
<select multiple name="multi_choice" id="id_multi_choice" required>
<option value="1" selected>ChoiceOption 1</option>
<option value="2">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select>
<input type="hidden" name="initial-multi_choice" value="1"
id="initial-id_multi_choice_0">
</p>
<p><label for="id_multi_choice_int">Multi choice int:</label>
<select multiple name="multi_choice_int" id="id_multi_choice_int" required>
<option value="1" selected>ChoiceOption 1</option>
<option value="2">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select>
<input type="hidden" name="initial-multi_choice_int" value="1"
id="initial-id_multi_choice_int_0">
</p>
""",
)
def test_initial_instance_value(self):
"Initial instances for model fields may also be instances (refs #7287)"
ChoiceOptionModel.objects.create(id=1, name="default")
obj2 = ChoiceOptionModel.objects.create(id=2, name="option 2")
obj3 = ChoiceOptionModel.objects.create(id=3, name="option 3")
self.assertHTMLEqual(
ChoiceFieldForm(
initial={
"choice": obj2,
"choice_int": obj2,
"multi_choice": [obj2, obj3],
"multi_choice_int": ChoiceOptionModel.objects.exclude(
name="default"
),
}
).as_p(),
"""
<p><label for="id_choice">Choice:</label>
<select name="choice" id="id_choice">
<option value="1">ChoiceOption 1</option>
<option value="2" selected>ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select>
<input type="hidden" name="initial-choice" value="2" id="initial-id_choice">
</p>
<p><label for="id_choice_int">Choice int:</label>
<select name="choice_int" id="id_choice_int">
<option value="1">ChoiceOption 1</option>
<option value="2" selected>ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select>
<input type="hidden" name="initial-choice_int" value="2"
id="initial-id_choice_int">
</p>
<p><label for="id_multi_choice">Multi choice:</label>
<select multiple name="multi_choice" id="id_multi_choice" required>
<option value="1">ChoiceOption 1</option>
<option value="2" selected>ChoiceOption 2</option>
<option value="3" selected>ChoiceOption 3</option>
</select>
<input type="hidden" name="initial-multi_choice" value="2"
id="initial-id_multi_choice_0">
<input type="hidden" name="initial-multi_choice" value="3"
id="initial-id_multi_choice_1">
</p>
<p><label for="id_multi_choice_int">Multi choice int:</label>
<select multiple name="multi_choice_int" id="id_multi_choice_int" required>
<option value="1">ChoiceOption 1</option>
<option value="2" selected>ChoiceOption 2</option>
<option value="3" selected>ChoiceOption 3</option>
</select>
<input type="hidden" name="initial-multi_choice_int" value="2"
id="initial-id_multi_choice_int_0">
<input type="hidden" name="initial-multi_choice_int" value="3"
id="initial-id_multi_choice_int_1">
</p>
""",
)
@skipUnlessDBFeature("supports_json_field")
def test_callable_default_hidden_widget_value_not_overridden(self):
class FieldWithCallableDefaultsModel(models.Model):
int_field = models.IntegerField(default=lambda: 1)
json_field = models.JSONField(default=dict)
class FieldWithCallableDefaultsModelForm(ModelForm):
class Meta:
model = FieldWithCallableDefaultsModel
fields = "__all__"
form = FieldWithCallableDefaultsModelForm(
data={
"initial-int_field": "1",
"int_field": "1000",
"initial-json_field": "{}",
"json_field": '{"key": "val"}',
}
)
form_html = form.as_p()
self.assertHTMLEqual(
form_html,
"""
<p>
<label for="id_int_field">Int field:</label>
<input type="number" name="int_field" value="1000"
required id="id_int_field">
<input type="hidden" name="initial-int_field" value="1"
id="initial-id_int_field">
</p>
<p>
<label for="id_json_field">Json field:</label>
<textarea cols="40" id="id_json_field" name="json_field" required rows="10">
{"key": "val"}
</textarea>
<input id="initial-id_json_field" name="initial-json_field" type="hidden"
value="{}">
</p>
""",
)
| ModelFormCallableModelDefault |
python | pandas-dev__pandas | pandas/tests/frame/test_reductions.py | {
"start": 5891,
"end": 55177
} | class ____:
# ---------------------------------------------------------------------
# Reductions
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize(
"opname",
[
"count",
"sum",
"mean",
"product",
"median",
"min",
"max",
"nunique",
"var",
"std",
"sem",
pytest.param("skew", marks=td.skip_if_no("scipy")),
pytest.param("kurt", marks=td.skip_if_no("scipy")),
],
)
def test_stat_op_api_float_string_frame(self, float_string_frame, axis, opname):
if (opname in ("sum", "min", "max") and axis == 0) or opname in (
"count",
"nunique",
):
getattr(float_string_frame, opname)(axis=axis)
else:
if opname in ["var", "std", "sem", "skew", "kurt"]:
msg = "could not convert string to float: 'bar'"
elif opname == "product":
if axis == 1:
msg = "can't multiply sequence by non-int of type 'float'"
else:
msg = "can't multiply sequence by non-int of type 'str'"
elif opname == "sum":
msg = r"unsupported operand type\(s\) for \+: 'float' and 'str'"
elif opname == "mean":
if axis == 0:
# different message on different builds
msg = "|".join(
[
r"Could not convert \['.*'\] to numeric",
"Could not convert string '(bar){30}' to numeric",
]
)
else:
msg = r"unsupported operand type\(s\) for \+: 'float' and 'str'"
elif opname in ["min", "max"]:
msg = "'[><]=' not supported between instances of 'float' and 'str'"
elif opname == "median":
msg = re.compile(
r"Cannot convert \[.*\] to numeric|does not support|Cannot perform",
flags=re.S,
)
if not isinstance(msg, re.Pattern):
msg = msg + "|does not support|Cannot perform reduction"
with pytest.raises(TypeError, match=msg):
getattr(float_string_frame, opname)(axis=axis)
if opname != "nunique":
getattr(float_string_frame, opname)(axis=axis, numeric_only=True)
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize(
"opname",
[
"count",
"sum",
"mean",
"product",
"median",
"min",
"max",
"var",
"std",
"sem",
pytest.param("skew", marks=td.skip_if_no("scipy")),
pytest.param("kurt", marks=td.skip_if_no("scipy")),
],
)
def test_stat_op_api_float_frame(self, float_frame, axis, opname):
getattr(float_frame, opname)(axis=axis, numeric_only=False)
def test_stat_op_calc(self, float_frame_with_na, mixed_float_frame):
def count(s):
return notna(s).sum()
def nunique(s):
return len(algorithms.unique1d(s.dropna()))
def var(x):
return np.var(x, ddof=1)
def std(x):
return np.std(x, ddof=1)
def sem(x):
return np.std(x, ddof=1) / np.sqrt(len(x))
assert_stat_op_calc(
"nunique",
nunique,
float_frame_with_na,
has_skipna=False,
check_dtype=False,
check_dates=True,
)
# GH#32571: rol needed for flaky CI builds
# mixed types (with upcasting happening)
assert_stat_op_calc(
"sum",
np.sum,
mixed_float_frame.astype("float32"),
check_dtype=False,
rtol=1e-3,
)
assert_stat_op_calc(
"sum", np.sum, float_frame_with_na, skipna_alternative=np.nansum
)
assert_stat_op_calc("mean", np.mean, float_frame_with_na, check_dates=True)
assert_stat_op_calc(
"product", np.prod, float_frame_with_na, skipna_alternative=np.nanprod
)
assert_stat_op_calc("var", var, float_frame_with_na)
assert_stat_op_calc("std", std, float_frame_with_na)
assert_stat_op_calc("sem", sem, float_frame_with_na)
assert_stat_op_calc(
"count",
count,
float_frame_with_na,
has_skipna=False,
check_dtype=False,
check_dates=True,
)
def test_stat_op_calc_skew_kurtosis(self, float_frame_with_na):
sp_stats = pytest.importorskip("scipy.stats")
def skewness(x):
if len(x) < 3:
return np.nan
return sp_stats.skew(x, bias=False)
def kurt(x):
if len(x) < 4:
return np.nan
return sp_stats.kurtosis(x, bias=False)
assert_stat_op_calc("skew", skewness, float_frame_with_na)
assert_stat_op_calc("kurt", kurt, float_frame_with_na)
def test_median(self, float_frame_with_na, int_frame):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
assert_stat_op_calc("median", wrapper, float_frame_with_na, check_dates=True)
assert_stat_op_calc(
"median", wrapper, int_frame, check_dtype=False, check_dates=True
)
@pytest.mark.parametrize(
"method", ["sum", "mean", "prod", "var", "std", "skew", "min", "max"]
)
@pytest.mark.parametrize(
"df",
[
DataFrame(
{
"a": [
-0.00049987540199591344,
-0.0016467257772919831,
0.00067695870775883013,
],
"b": [-0, -0, 0.0],
"c": [
0.00031111847529610595,
0.0014902627951905339,
-0.00094099200035979691,
],
},
index=["foo", "bar", "baz"],
dtype="O",
),
DataFrame({0: [np.nan, 2], 1: [np.nan, 3], 2: [np.nan, 4]}, dtype=object),
],
)
@pytest.mark.filterwarnings("ignore:Mismatched null-like values:FutureWarning")
def test_stat_operators_attempt_obj_array(self, method, df, axis):
# GH#676
assert df.values.dtype == np.object_
result = getattr(df, method)(axis=axis)
expected = getattr(df.astype("f8"), method)(axis=axis).astype(object)
if axis in [1, "columns"] and method in ["min", "max"]:
expected[expected.isna()] = None
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("op", ["mean", "std", "var", "skew", "kurt", "sem"])
def test_mixed_ops(self, op):
# GH#16116
df = DataFrame(
{
"int": [1, 2, 3, 4],
"float": [1.0, 2.0, 3.0, 4.0],
"str": ["a", "b", "c", "d"],
}
)
msg = "|".join(
[
"Could not convert",
"could not convert",
"can't multiply sequence by non-int",
"does not support",
"Cannot perform",
]
)
with pytest.raises(TypeError, match=msg):
getattr(df, op)()
with pd.option_context("use_bottleneck", False):
with pytest.raises(TypeError, match=msg):
getattr(df, op)()
def test_reduce_mixed_frame(self):
# GH 6806
df = DataFrame(
{
"bool_data": [True, True, False, False, False],
"int_data": [10, 20, 30, 40, 50],
"string_data": ["a", "b", "c", "d", "e"],
}
)
df.reindex(columns=["bool_data", "int_data", "string_data"])
test = df.sum(axis=0)
tm.assert_numpy_array_equal(
test.values, np.array([2, 150, "abcde"], dtype=object)
)
alt = df.T.sum(axis=1)
tm.assert_series_equal(test, alt)
def test_nunique(self):
df = DataFrame({"A": [1, 1, 1], "B": [1, 2, 3], "C": [1, np.nan, 3]})
tm.assert_series_equal(df.nunique(), Series({"A": 1, "B": 3, "C": 2}))
tm.assert_series_equal(
df.nunique(dropna=False), Series({"A": 1, "B": 3, "C": 3})
)
tm.assert_series_equal(df.nunique(axis=1), Series([1, 2, 2]))
tm.assert_series_equal(df.nunique(axis=1, dropna=False), Series([1, 3, 2]))
@pytest.mark.parametrize("tz", [None, "UTC"])
def test_mean_mixed_datetime_numeric(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
df = DataFrame({"A": [1, 1], "B": [Timestamp("2000", tz=tz)] * 2})
result = df.mean()
expected = Series([1.0, Timestamp("2000", tz=tz)], index=["A", "B"])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("tz", [None, "UTC"])
def test_mean_includes_datetimes(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
# Behavior in 0.24.0rc1 was buggy.
# As of 2.0 with numeric_only=None we do *not* drop datetime columns
df = DataFrame({"A": [Timestamp("2000", tz=tz)] * 2})
result = df.mean()
expected = Series([Timestamp("2000", tz=tz)], index=["A"])
tm.assert_series_equal(result, expected)
def test_mean_mixed_string_decimal(self):
# GH 11670
# possible bug when calculating mean of DataFrame?
d = [
{"A": 2, "B": None, "C": Decimal("628.00")},
{"A": 1, "B": None, "C": Decimal("383.00")},
{"A": 3, "B": None, "C": Decimal("651.00")},
{"A": 2, "B": None, "C": Decimal("575.00")},
{"A": 4, "B": None, "C": Decimal("1114.00")},
{"A": 1, "B": "TEST", "C": Decimal("241.00")},
{"A": 2, "B": None, "C": Decimal("572.00")},
{"A": 4, "B": None, "C": Decimal("609.00")},
{"A": 3, "B": None, "C": Decimal("820.00")},
{"A": 5, "B": None, "C": Decimal("1223.00")},
]
df = DataFrame(d)
with pytest.raises(
TypeError, match="unsupported operand type|does not support|Cannot perform"
):
df.mean()
result = df[["A", "C"]].mean()
expected = Series([2.7, 681.6], index=["A", "C"], dtype=object)
tm.assert_series_equal(result, expected)
def test_var_std(self, datetime_frame):
result = datetime_frame.std(ddof=4)
expected = datetime_frame.apply(lambda x: x.std(ddof=4))
tm.assert_almost_equal(result, expected)
result = datetime_frame.var(ddof=4)
expected = datetime_frame.apply(lambda x: x.var(ddof=4))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.default_rng(2).random((1, 1000)), 1000, 0)
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
with pd.option_context("use_bottleneck", False):
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
@pytest.mark.parametrize("meth", ["sem", "var", "std"])
def test_numeric_only_flag(self, meth):
# GH 9201
df1 = DataFrame(
np.random.default_rng(2).standard_normal((5, 3)),
columns=["foo", "bar", "baz"],
)
# Cast to object to avoid implicit cast when setting entry to "100" below
df1 = df1.astype({"foo": object})
# set one entry to a number in str format
df1.loc[0, "foo"] = "100"
df2 = DataFrame(
np.random.default_rng(2).standard_normal((5, 3)),
columns=["foo", "bar", "baz"],
)
# Cast to object to avoid implicit cast when setting entry to "a" below
df2 = df2.astype({"foo": object})
# set one entry to a non-number str
df2.loc[0, "foo"] = "a"
result = getattr(df1, meth)(axis=1, numeric_only=True)
expected = getattr(df1[["bar", "baz"]], meth)(axis=1)
tm.assert_series_equal(expected, result)
result = getattr(df2, meth)(axis=1, numeric_only=True)
expected = getattr(df2[["bar", "baz"]], meth)(axis=1)
tm.assert_series_equal(expected, result)
# df1 has all numbers, df2 has a letter inside
msg = r"unsupported operand type\(s\) for -: 'float' and 'str'"
with pytest.raises(TypeError, match=msg):
getattr(df1, meth)(axis=1, numeric_only=False)
msg = "could not convert string to float: 'a'"
with pytest.raises(TypeError, match=msg):
getattr(df2, meth)(axis=1, numeric_only=False)
def test_sem(self, datetime_frame):
result = datetime_frame.sem(ddof=4)
expected = datetime_frame.apply(lambda x: x.std(ddof=4) / np.sqrt(len(x)))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.default_rng(2).random((1, 1000)), 1000, 0)
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
with pd.option_context("use_bottleneck", False):
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
@pytest.mark.parametrize(
"dropna, expected",
[
(
True,
{
"A": [12],
"B": [10.0],
"C": [1.0],
"D": ["a"],
"E": Categorical(["a"], categories=["a"]),
"F": DatetimeIndex(["2000-01-02"], dtype="M8[ns]"),
"G": to_timedelta(["1 days"]),
},
),
(
False,
{
"A": [12],
"B": [10.0],
"C": [np.nan],
"D": Series([np.nan], dtype="str"),
"E": Categorical([np.nan], categories=["a"]),
"F": DatetimeIndex([pd.NaT], dtype="M8[ns]"),
"G": to_timedelta([pd.NaT]),
},
),
(
True,
{
"H": [8, 9, np.nan, np.nan],
"I": [8, 9, np.nan, np.nan],
"J": [1, np.nan, np.nan, np.nan],
"K": Categorical(["a", np.nan, np.nan, np.nan], categories=["a"]),
"L": DatetimeIndex(
["2000-01-02", "NaT", "NaT", "NaT"], dtype="M8[ns]"
),
"M": to_timedelta(["1 days", "nan", "nan", "nan"]),
"N": [0, 1, 2, 3],
},
),
(
False,
{
"H": [8, 9, np.nan, np.nan],
"I": [8, 9, np.nan, np.nan],
"J": [1, np.nan, np.nan, np.nan],
"K": Categorical([np.nan, "a", np.nan, np.nan], categories=["a"]),
"L": DatetimeIndex(
["NaT", "2000-01-02", "NaT", "NaT"], dtype="M8[ns]"
),
"M": to_timedelta(["nan", "1 days", "nan", "nan"]),
"N": [0, 1, 2, 3],
},
),
],
)
def test_mode_dropna(self, dropna, expected):
df = DataFrame(
{
"A": [12, 12, 19, 11],
"B": [10, 10, np.nan, 3],
"C": [1, np.nan, np.nan, np.nan],
"D": Series([np.nan, np.nan, "a", np.nan], dtype="str"),
"E": Categorical([np.nan, np.nan, "a", np.nan]),
"F": DatetimeIndex(["NaT", "2000-01-02", "NaT", "NaT"], dtype="M8[ns]"),
"G": to_timedelta(["1 days", "nan", "nan", "nan"]),
"H": [8, 8, 9, 9],
"I": [9, 9, 8, 8],
"J": [1, 1, np.nan, np.nan],
"K": Categorical(["a", np.nan, "a", np.nan]),
"L": DatetimeIndex(
["2000-01-02", "2000-01-02", "NaT", "NaT"], dtype="M8[ns]"
),
"M": to_timedelta(["1 days", "nan", "1 days", "nan"]),
"N": np.arange(4, dtype="int64"),
}
)
result = df[sorted(expected.keys())].mode(dropna=dropna)
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
def test_mode_sort_with_na(self, using_infer_string):
df = DataFrame({"A": [np.nan, np.nan, "a", "a"]})
expected = DataFrame({"A": ["a", np.nan]})
result = df.mode(dropna=False)
tm.assert_frame_equal(result, expected)
def test_mode_empty_df(self):
df = DataFrame([], columns=["a", "b"])
expected = df.copy()
result = df.mode()
tm.assert_frame_equal(result, expected)
def test_operators_timedelta64(self):
df = DataFrame(
{
"A": date_range("2012-1-1", periods=3, freq="D", unit="ns"),
"B": date_range("2012-1-2", periods=3, freq="D", unit="ns"),
"C": Timestamp("20120101") - timedelta(minutes=5, seconds=5),
}
)
diffs = DataFrame({"A": df["A"] - df["C"], "B": df["A"] - df["B"]})
# min
result = diffs.min()
assert result.iloc[0] == diffs.loc[0, "A"]
assert result.iloc[1] == diffs.loc[0, "B"]
result = diffs.min(axis=1)
assert (result == diffs.loc[0, "B"]).all()
# max
result = diffs.max()
assert result.iloc[0] == diffs.loc[2, "A"]
assert result.iloc[1] == diffs.loc[2, "B"]
result = diffs.max(axis=1)
assert (result == diffs["A"]).all()
# abs
result = diffs.abs()
result2 = abs(diffs)
expected = DataFrame({"A": df["A"] - df["C"], "B": df["B"] - df["A"]})
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# mixed frame
mixed = diffs.copy()
mixed["C"] = "foo"
mixed["D"] = 1
mixed["E"] = 1.0
mixed["F"] = Timestamp("20130101")
# results in an object array
result = mixed.min()
expected = Series(
[
pd.Timedelta(timedelta(seconds=5 * 60 + 5)),
pd.Timedelta(timedelta(days=-1)),
"foo",
1,
1.0,
Timestamp("20130101"),
],
index=mixed.columns,
)
tm.assert_series_equal(result, expected)
# excludes non-numeric
result = mixed.min(axis=1, numeric_only=True)
expected = Series([1, 1, 1.0])
tm.assert_series_equal(result, expected)
# works when only those columns are selected
result = mixed[["A", "B"]].min(axis=1)
expected = Series([timedelta(days=-1)] * 3, dtype="m8[ns]")
tm.assert_series_equal(result, expected)
result = mixed[["A", "B"]].min()
expected = Series(
[timedelta(seconds=5 * 60 + 5), timedelta(days=-1)],
index=["A", "B"],
dtype="m8[ns]",
)
tm.assert_series_equal(result, expected)
# GH 3106
df = DataFrame(
{
"time": date_range("20130102", periods=5, unit="ns"),
"time2": date_range("20130105", periods=5, unit="ns"),
}
)
df["off1"] = df["time2"] - df["time"]
assert df["off1"].dtype == "timedelta64[ns]"
df["off2"] = df["time"] - df["time2"]
df._consolidate_inplace()
assert df["off1"].dtype == "timedelta64[ns]"
assert df["off2"].dtype == "timedelta64[ns]"
def test_std_timedelta64_skipna_false(self):
# GH#37392
tdi = pd.timedelta_range("1 Day", periods=10)
df = DataFrame({"A": tdi, "B": tdi}, copy=True)
df.iloc[-2, -1] = pd.NaT
result = df.std(skipna=False)
expected = Series(
[df["A"].std(), pd.NaT], index=["A", "B"], dtype="timedelta64[ns]"
)
tm.assert_series_equal(result, expected)
result = df.std(axis=1, skipna=False)
expected = Series([pd.Timedelta(0)] * 8 + [pd.NaT, pd.Timedelta(0)])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"values", [["2022-01-01", "2022-01-02", pd.NaT, "2022-01-03"], 4 * [pd.NaT]]
)
def test_std_datetime64_with_nat(self, values, skipna, request, unit):
# GH#51335
dti = to_datetime(values).as_unit(unit)
df = DataFrame({"a": dti})
result = df.std(skipna=skipna)
if not skipna or all(value is pd.NaT for value in values):
expected = Series({"a": pd.NaT}, dtype=f"timedelta64[{unit}]")
else:
# 86400000000000ns == 1 day
expected = Series({"a": 86400000000000}, dtype=f"timedelta64[{unit}]")
tm.assert_series_equal(result, expected)
def test_sum_corner(self):
empty_frame = DataFrame()
axis0 = empty_frame.sum(axis=0)
axis1 = empty_frame.sum(axis=1)
assert isinstance(axis0, Series)
assert isinstance(axis1, Series)
assert len(axis0) == 0
assert len(axis1) == 0
@pytest.mark.parametrize(
"index",
[
RangeIndex(0),
DatetimeIndex([]),
Index([], dtype=np.int64),
Index([], dtype=np.float64),
DatetimeIndex([], freq="ME"),
PeriodIndex([], freq="D"),
],
)
def test_axis_1_empty(self, all_reductions, index):
df = DataFrame(columns=["a"], index=index)
result = getattr(df, all_reductions)(axis=1)
if all_reductions in ("any", "all"):
expected_dtype = "bool"
elif all_reductions == "count":
expected_dtype = "int64"
else:
expected_dtype = "object"
expected = Series([], index=index, dtype=expected_dtype)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("min_count", [0, 1])
def test_axis_1_sum_na(self, string_dtype_no_object, skipna, min_count):
# https://github.com/pandas-dev/pandas/issues/60229
dtype = string_dtype_no_object
df = DataFrame({"a": [pd.NA]}, dtype=dtype)
result = df.sum(axis=1, skipna=skipna, min_count=min_count)
value = "" if skipna and min_count == 0 else pd.NA
expected = Series([value], dtype=dtype)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("method, unit", [("sum", 0), ("prod", 1)])
@pytest.mark.parametrize("numeric_only", [None, True, False])
def test_sum_prod_nanops(self, method, unit, numeric_only):
idx = ["a", "b", "c"]
df = DataFrame({"a": [unit, unit], "b": [unit, np.nan], "c": [np.nan, np.nan]})
# The default
result = getattr(df, method)(numeric_only=numeric_only)
expected = Series([unit, unit, unit], index=idx, dtype="float64")
tm.assert_series_equal(result, expected)
# min_count=1
result = getattr(df, method)(numeric_only=numeric_only, min_count=1)
expected = Series([unit, unit, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = getattr(df, method)(numeric_only=numeric_only, min_count=0)
expected = Series([unit, unit, unit], index=idx, dtype="float64")
tm.assert_series_equal(result, expected)
result = getattr(df.iloc[1:], method)(numeric_only=numeric_only, min_count=1)
expected = Series([unit, np.nan, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count > 1
df = DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5})
result = getattr(df, method)(numeric_only=numeric_only, min_count=5)
expected = Series(result, index=["A", "B"])
tm.assert_series_equal(result, expected)
result = getattr(df, method)(numeric_only=numeric_only, min_count=6)
expected = Series(result, index=["A", "B"])
tm.assert_series_equal(result, expected)
def test_sum_nanops_timedelta(self):
# prod isn't defined on timedeltas
idx = ["a", "b", "c"]
df = DataFrame({"a": [0, 0], "b": [0, np.nan], "c": [np.nan, np.nan]})
df2 = df.apply(to_timedelta)
# 0 by default
result = df2.sum()
expected = Series([0, 0, 0], dtype="m8[ns]", index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = df2.sum(min_count=0)
tm.assert_series_equal(result, expected)
# min_count=1
result = df2.sum(min_count=1)
expected = Series([0, 0, np.nan], dtype="m8[ns]", index=idx)
tm.assert_series_equal(result, expected)
def test_sum_nanops_min_count(self):
# https://github.com/pandas-dev/pandas/issues/39738
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
result = df.sum(min_count=10)
expected = Series([np.nan, np.nan], index=["x", "y"])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("float_type", ["float16", "float32", "float64"])
@pytest.mark.parametrize(
"kwargs, expected_result",
[
({"axis": 1, "min_count": 2}, [3.2, 5.3, np.nan]),
({"axis": 1, "min_count": 3}, [np.nan, np.nan, np.nan]),
({"axis": 1, "skipna": False}, [3.2, 5.3, np.nan]),
],
)
def test_sum_nanops_dtype_min_count(self, float_type, kwargs, expected_result):
# GH#46947
df = DataFrame({"a": [1.0, 2.3, 4.4], "b": [2.2, 3, np.nan]}, dtype=float_type)
result = df.sum(**kwargs)
expected = Series(expected_result).astype(float_type)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("float_type", ["float16", "float32", "float64"])
@pytest.mark.parametrize(
"kwargs, expected_result",
[
({"axis": 1, "min_count": 2}, [2.0, 4.0, np.nan]),
({"axis": 1, "min_count": 3}, [np.nan, np.nan, np.nan]),
({"axis": 1, "skipna": False}, [2.0, 4.0, np.nan]),
],
)
def test_prod_nanops_dtype_min_count(self, float_type, kwargs, expected_result):
# GH#46947
df = DataFrame(
{"a": [1.0, 2.0, 4.4], "b": [2.0, 2.0, np.nan]}, dtype=float_type
)
result = df.prod(**kwargs)
expected = Series(expected_result).astype(float_type)
tm.assert_series_equal(result, expected)
def test_sum_object(self, float_frame):
values = float_frame.values.astype(int)
frame = DataFrame(values, index=float_frame.index, columns=float_frame.columns)
deltas = frame * timedelta(1)
deltas.sum()
def test_sum_bool(self, float_frame):
# ensure this works, bug report
bools = np.isnan(float_frame)
bools.sum(axis=1)
bools.sum(axis=0)
def test_sum_mixed_datetime(self):
# GH#30886
df = DataFrame({"A": date_range("2000", periods=4), "B": [1, 2, 3, 4]}).reindex(
[2, 3, 4]
)
with pytest.raises(TypeError, match="does not support operation 'sum'"):
df.sum()
def test_mean_corner(self, float_frame, float_string_frame):
# unit test when have object data
msg = "Could not convert|does not support|Cannot perform"
with pytest.raises(TypeError, match=msg):
float_string_frame.mean(axis=0)
# xs sum mixed type, just want to know it works...
with pytest.raises(TypeError, match="unsupported operand type"):
float_string_frame.mean(axis=1)
# take mean of boolean column
float_frame["bool"] = float_frame["A"] > 0
means = float_frame.mean(axis=0)
assert means["bool"] == float_frame["bool"].values.mean()
def test_mean_datetimelike(self):
# GH#24757 check that datetimelike are excluded by default, handled
# correctly with numeric_only=True
# As of 2.0, datetimelike are *not* excluded with numeric_only=None
df = DataFrame(
{
"A": np.arange(3),
"B": date_range("2016-01-01", periods=3),
"C": pd.timedelta_range("1D", periods=3),
"D": pd.period_range("2016", periods=3, freq="Y"),
}
)
result = df.mean(numeric_only=True)
expected = Series({"A": 1.0})
tm.assert_series_equal(result, expected)
with pytest.raises(TypeError, match="mean is not implemented for PeriodArray"):
df.mean()
def test_mean_datetimelike_numeric_only_false(self):
df = DataFrame(
{
"A": np.arange(3),
"B": date_range("2016-01-01", periods=3),
"C": pd.timedelta_range("1D", periods=3),
}
)
# datetime(tz) and timedelta work
result = df.mean(numeric_only=False)
expected = Series({"A": 1, "B": df.loc[1, "B"], "C": df.loc[1, "C"]})
tm.assert_series_equal(result, expected)
# mean of period is not allowed
df["D"] = pd.period_range("2016", periods=3, freq="Y")
with pytest.raises(TypeError, match="mean is not implemented for Period"):
df.mean(numeric_only=False)
def test_mean_extensionarray_numeric_only_true(self):
# https://github.com/pandas-dev/pandas/issues/33256
arr = np.random.default_rng(2).integers(1000, size=(10, 5))
df = DataFrame(arr, dtype="Int64")
result = df.mean(numeric_only=True)
expected = DataFrame(arr).mean().astype("Float64")
tm.assert_series_equal(result, expected)
def test_stats_mixed_type(self, float_string_frame):
with pytest.raises(TypeError, match="could not convert"):
float_string_frame.std(axis=1)
with pytest.raises(TypeError, match="could not convert"):
float_string_frame.var(axis=1)
with pytest.raises(TypeError, match="unsupported operand type"):
float_string_frame.mean(axis=1)
with pytest.raises(TypeError, match="could not convert"):
float_string_frame.skew(axis=1)
def test_sum_bools(self):
df = DataFrame(index=range(1), columns=range(10))
bools = isna(df)
assert bools.sum(axis=1)[0] == 10
# ----------------------------------------------------------------------
# Index of max / min
@pytest.mark.parametrize("axis", [0, 1])
def test_idxmin(self, float_frame, int_frame, skipna, axis):
frame = float_frame
frame.iloc[5:10] = np.nan
frame.iloc[15:20, -2:] = np.nan
for df in [frame, int_frame]:
if (not skipna or axis == 1) and df is not int_frame:
if skipna:
msg = "Encountered all NA values"
else:
msg = "Encountered an NA value"
with pytest.raises(ValueError, match=msg):
df.idxmin(axis=axis, skipna=skipna)
with pytest.raises(ValueError, match=msg):
df.idxmin(axis=axis, skipna=skipna)
else:
result = df.idxmin(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmin, axis=axis, skipna=skipna)
expected = expected.astype(df.index.dtype)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")
def test_idxmin_empty(self, index, skipna, axis):
# GH53265
if axis == 0:
frame = DataFrame(index=index)
else:
frame = DataFrame(columns=index)
result = frame.idxmin(axis=axis, skipna=skipna)
expected = Series(dtype=index.dtype)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("numeric_only", [True, False])
def test_idxmin_numeric_only(self, numeric_only):
df = DataFrame({"a": [2, 3, 1], "b": [2, 1, 1], "c": list("xyx")})
result = df.idxmin(numeric_only=numeric_only)
if numeric_only:
expected = Series([2, 1], index=["a", "b"])
else:
expected = Series([2, 1, 0], index=["a", "b", "c"])
tm.assert_series_equal(result, expected)
def test_idxmin_axis_2(self, float_frame):
frame = float_frame
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
frame.idxmin(axis=2)
@pytest.mark.parametrize("axis", [0, 1])
def test_idxmax(self, float_frame, int_frame, skipna, axis):
frame = float_frame
frame.iloc[5:10] = np.nan
frame.iloc[15:20, -2:] = np.nan
for df in [frame, int_frame]:
if (skipna is False or axis == 1) and df is frame:
if skipna:
msg = "Encountered all NA values"
else:
msg = "Encountered an NA value"
with pytest.raises(ValueError, match=msg):
df.idxmax(axis=axis, skipna=skipna)
return
result = df.idxmax(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmax, axis=axis, skipna=skipna)
expected = expected.astype(df.index.dtype)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")
def test_idxmax_empty(self, index, skipna, axis):
# GH53265
if axis == 0:
frame = DataFrame(index=index)
else:
frame = DataFrame(columns=index)
result = frame.idxmax(axis=axis, skipna=skipna)
expected = Series(dtype=index.dtype)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("numeric_only", [True, False])
def test_idxmax_numeric_only(self, numeric_only):
df = DataFrame({"a": [2, 3, 1], "b": [2, 1, 1], "c": list("xyx")})
result = df.idxmax(numeric_only=numeric_only)
if numeric_only:
expected = Series([1, 0], index=["a", "b"])
else:
expected = Series([1, 0, 1], index=["a", "b", "c"])
tm.assert_series_equal(result, expected)
def test_idxmax_arrow_types(self):
# GH#55368
pytest.importorskip("pyarrow")
df = DataFrame({"a": [2, 3, 1], "b": [2, 1, 1]}, dtype="int64[pyarrow]")
result = df.idxmax()
expected = Series([1, 0], index=["a", "b"])
tm.assert_series_equal(result, expected)
result = df.idxmin()
expected = Series([2, 1], index=["a", "b"])
tm.assert_series_equal(result, expected)
df = DataFrame({"a": ["b", "c", "a"]}, dtype="string[pyarrow]")
result = df.idxmax(numeric_only=False)
expected = Series([1], index=["a"])
tm.assert_series_equal(result, expected)
result = df.idxmin(numeric_only=False)
expected = Series([2], index=["a"])
tm.assert_series_equal(result, expected)
def test_idxmax_axis_2(self, float_frame):
frame = float_frame
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
frame.idxmax(axis=2)
def test_idxmax_mixed_dtype(self):
# don't cast to object, which would raise in nanops
dti = date_range("2016-01-01", periods=3)
df = DataFrame({1: [0, 2, 1], 2: range(3)[::-1], 3: dti})
result = df.idxmax()
expected = Series([1, 0, 2], index=range(1, 4))
tm.assert_series_equal(result, expected)
result = df.idxmin()
expected = Series([0, 2, 0], index=range(1, 4))
tm.assert_series_equal(result, expected)
# with NaTs
df.loc[0, 3] = pd.NaT
result = df.idxmax()
expected = Series([1, 0, 2], index=range(1, 4))
tm.assert_series_equal(result, expected)
result = df.idxmin()
expected = Series([0, 2, 1], index=range(1, 4))
tm.assert_series_equal(result, expected)
# with multi-column dt64 block
df[4] = dti[::-1]
df._consolidate_inplace()
result = df.idxmax()
expected = Series([1, 0, 2, 0], index=range(1, 5))
tm.assert_series_equal(result, expected)
result = df.idxmin()
expected = Series([0, 2, 1, 2], index=range(1, 5))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"op, expected_value",
[("idxmax", [0, 4]), ("idxmin", [0, 5])],
)
def test_idxmax_idxmin_convert_dtypes(self, op, expected_value):
# GH 40346
df = DataFrame(
{
"ID": [100, 100, 100, 200, 200, 200],
"value": [0, 0, 0, 1, 2, 0],
},
dtype="Int64",
)
df = df.groupby("ID")
result = getattr(df, op)()
expected = DataFrame(
{"value": expected_value},
index=Index([100, 200], name="ID", dtype="Int64"),
)
tm.assert_frame_equal(result, expected)
def test_idxmax_dt64_multicolumn_axis1(self):
dti = date_range("2016-01-01", periods=3)
df = DataFrame({3: dti, 4: dti[::-1]}, copy=True)
df.iloc[0, 0] = pd.NaT
df._consolidate_inplace()
result = df.idxmax(axis=1)
expected = Series([4, 3, 3])
tm.assert_series_equal(result, expected)
result = df.idxmin(axis=1)
expected = Series([4, 3, 4])
tm.assert_series_equal(result, expected)
# ----------------------------------------------------------------------
# Logical reductions
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize("bool_only", [False, True])
def test_any_all_mixed_float(
self, all_boolean_reductions, axis, bool_only, float_string_frame
):
# make sure op works on mixed-type frame
mixed = float_string_frame
mixed["_bool_"] = np.random.default_rng(2).standard_normal(len(mixed)) > 0.5
getattr(mixed, all_boolean_reductions)(axis=axis, bool_only=bool_only)
@pytest.mark.parametrize("axis", [0, 1])
def test_any_all_bool_with_na(
self, all_boolean_reductions, axis, bool_frame_with_na
):
getattr(bool_frame_with_na, all_boolean_reductions)(axis=axis, bool_only=False)
def test_any_all_bool_frame(self, all_boolean_reductions, bool_frame_with_na):
# GH#12863: numpy gives back non-boolean data for object type
# so fill NaNs to compare with pandas behavior
frame = bool_frame_with_na.fillna(True)
alternative = getattr(np, all_boolean_reductions)
f = getattr(frame, all_boolean_reductions)
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper))
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1))
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper))
tm.assert_series_equal(
result1, frame.apply(skipna_wrapper, axis=1), check_dtype=False
)
# bad axis
with pytest.raises(ValueError, match="No axis named 2"):
f(axis=2)
# all NA case
all_na = frame * np.nan
r0 = getattr(all_na, all_boolean_reductions)(axis=0)
r1 = getattr(all_na, all_boolean_reductions)(axis=1)
if all_boolean_reductions == "any":
assert not r0.any()
assert not r1.any()
else:
assert r0.all()
assert r1.all()
def test_any_all_extra(self):
df = DataFrame(
{
"A": [True, False, False],
"B": [True, True, False],
"C": [True, True, True],
},
index=["a", "b", "c"],
)
result = df[["A", "B"]].any(axis=1)
expected = Series([True, True, False], index=["a", "b", "c"])
tm.assert_series_equal(result, expected)
result = df[["A", "B"]].any(axis=1, bool_only=True)
tm.assert_series_equal(result, expected)
result = df.all(axis=1)
expected = Series([True, False, False], index=["a", "b", "c"])
tm.assert_series_equal(result, expected)
result = df.all(axis=1, bool_only=True)
tm.assert_series_equal(result, expected)
# Axis is None
result = df.all(axis=None).item()
assert result is False
result = df.any(axis=None).item()
assert result is True
result = df[["C"]].all(axis=None).item()
assert result is True
@pytest.mark.parametrize("axis", [0, 1])
def test_any_all_object_dtype(self, axis, all_boolean_reductions, skipna):
# GH#35450
df = DataFrame(
data=[
[1, np.nan, np.nan, True],
[np.nan, 2, np.nan, True],
[np.nan, np.nan, np.nan, True],
[np.nan, np.nan, "5", np.nan],
]
)
result = getattr(df, all_boolean_reductions)(axis=axis, skipna=skipna)
expected = Series([True, True, True, True])
tm.assert_series_equal(result, expected)
def test_any_datetime(self):
# GH 23070
float_data = [1, np.nan, 3, np.nan]
datetime_data = [
Timestamp("1960-02-15"),
Timestamp("1960-02-16"),
pd.NaT,
pd.NaT,
]
df = DataFrame({"A": float_data, "B": datetime_data})
msg = "datetime64 type does not support operation 'any'"
with pytest.raises(TypeError, match=msg):
df.any(axis=1)
def test_any_all_bool_only(self):
# GH 25101
df = DataFrame(
{"col1": [1, 2, 3], "col2": [4, 5, 6], "col3": [None, None, None]},
columns=Index(["col1", "col2", "col3"], dtype=object),
)
result = df.all(bool_only=True)
expected = Series(dtype=np.bool_, index=[])
tm.assert_series_equal(result, expected)
df = DataFrame(
{
"col1": [1, 2, 3],
"col2": [4, 5, 6],
"col3": [None, None, None],
"col4": [False, False, True],
}
)
result = df.all(bool_only=True)
expected = Series({"col4": False})
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"func, data, expected",
[
(np.any, {}, False),
(np.all, {}, True),
(np.any, {"A": []}, False),
(np.all, {"A": []}, True),
(np.any, {"A": [False, False]}, False),
(np.all, {"A": [False, False]}, False),
(np.any, {"A": [True, False]}, True),
(np.all, {"A": [True, False]}, False),
(np.any, {"A": [True, True]}, True),
(np.all, {"A": [True, True]}, True),
(np.any, {"A": [False], "B": [False]}, False),
(np.all, {"A": [False], "B": [False]}, False),
(np.any, {"A": [False, False], "B": [False, True]}, True),
(np.all, {"A": [False, False], "B": [False, True]}, False),
# other types
(np.all, {"A": Series([0.0, 1.0], dtype="float")}, False),
(np.any, {"A": Series([0.0, 1.0], dtype="float")}, True),
(np.all, {"A": Series([0, 1], dtype=int)}, False),
(np.any, {"A": Series([0, 1], dtype=int)}, True),
pytest.param(np.all, {"A": Series([0, 1], dtype="M8[ns]")}, False),
pytest.param(np.all, {"A": Series([0, 1], dtype="M8[ns, UTC]")}, False),
pytest.param(np.any, {"A": Series([0, 1], dtype="M8[ns]")}, True),
pytest.param(np.any, {"A": Series([0, 1], dtype="M8[ns, UTC]")}, True),
pytest.param(np.all, {"A": Series([1, 2], dtype="M8[ns]")}, True),
pytest.param(np.all, {"A": Series([1, 2], dtype="M8[ns, UTC]")}, True),
pytest.param(np.any, {"A": Series([1, 2], dtype="M8[ns]")}, True),
pytest.param(np.any, {"A": Series([1, 2], dtype="M8[ns, UTC]")}, True),
pytest.param(np.all, {"A": Series([0, 1], dtype="m8[ns]")}, False),
pytest.param(np.any, {"A": Series([0, 1], dtype="m8[ns]")}, True),
pytest.param(np.all, {"A": Series([1, 2], dtype="m8[ns]")}, True),
pytest.param(np.any, {"A": Series([1, 2], dtype="m8[ns]")}, True),
# np.all on Categorical raises, so the reduction drops the
# column, so all is being done on an empty Series, so is True
(np.all, {"A": Series([0, 1], dtype="category")}, True),
(np.any, {"A": Series([0, 1], dtype="category")}, False),
(np.all, {"A": Series([1, 2], dtype="category")}, True),
(np.any, {"A": Series([1, 2], dtype="category")}, False),
# Mix GH#21484
pytest.param(
np.all,
{
"A": Series([10, 20], dtype="M8[ns]"),
"B": Series([10, 20], dtype="m8[ns]"),
},
True,
),
],
)
def test_any_all_np_func(self, func, data, expected):
# GH 19976
data = DataFrame(data)
if any(isinstance(x, CategoricalDtype) for x in data.dtypes):
with pytest.raises(
TypeError, match=".* dtype category does not support operation"
):
func(data)
# method version
with pytest.raises(
TypeError, match=".* dtype category does not support operation"
):
getattr(DataFrame(data), func.__name__)(axis=None)
if data.dtypes.apply(lambda x: x.kind == "M").any():
# GH#34479
msg = "datetime64 type does not support operation '(any|all)'"
with pytest.raises(TypeError, match=msg):
func(data)
# method version
with pytest.raises(TypeError, match=msg):
getattr(DataFrame(data), func.__name__)(axis=None)
elif data.dtypes.apply(lambda x: x != "category").any():
result = func(data)
assert isinstance(result, np.bool_)
assert result.item() is expected
# method version
result = getattr(DataFrame(data), func.__name__)(axis=None)
assert isinstance(result, np.bool_)
assert result.item() is expected
def test_any_all_object(self):
# GH 19976
result = np.all(DataFrame(columns=["a", "b"])).item()
assert result is True
result = np.any(DataFrame(columns=["a", "b"])).item()
assert result is False
def test_any_all_object_bool_only(self):
df = DataFrame({"A": ["foo", 2], "B": [True, False]}).astype(object)
df._consolidate_inplace()
df["C"] = Series([True, True])
# Categorical of bools is _not_ considered booly
df["D"] = df["C"].astype("category")
# The underlying bug is in DataFrame._get_bool_data, so we check
# that while we're here
res = df._get_bool_data()
expected = df[["C"]]
tm.assert_frame_equal(res, expected)
res = df.all(bool_only=True, axis=0)
expected = Series([True], index=["C"])
tm.assert_series_equal(res, expected)
# operating on a subset of columns should not produce a _larger_ Series
res = df[["B", "C"]].all(bool_only=True, axis=0)
tm.assert_series_equal(res, expected)
assert df.all(bool_only=True, axis=None)
res = df.any(bool_only=True, axis=0)
expected = Series([True], index=["C"])
tm.assert_series_equal(res, expected)
# operating on a subset of columns should not produce a _larger_ Series
res = df[["C"]].any(bool_only=True, axis=0)
tm.assert_series_equal(res, expected)
assert df.any(bool_only=True, axis=None)
# ---------------------------------------------------------------------
# Unsorted
def test_series_broadcasting(self):
# smoke test for numpy warnings
# GH 16378, GH 16306
df = DataFrame([1.0, 1.0, 1.0])
df_nan = DataFrame({"A": [np.nan, 2.0, np.nan]})
s = Series([1, 1, 1])
s_nan = Series([np.nan, np.nan, 1])
with tm.assert_produces_warning(None):
df_nan.clip(lower=s, axis=0)
for op in ["lt", "le", "gt", "ge", "eq", "ne"]:
getattr(df, op)(s_nan, axis=0)
| TestDataFrameAnalytics |
python | oauthlib__oauthlib | oauthlib/oauth2/rfc6749/tokens.py | {
"start": 8147,
"end": 11015
} | class ____(TokenBase):
__slots__ = (
'request_validator', 'token_generator',
'refresh_token_generator', 'expires_in'
)
def __init__(self, request_validator=None, token_generator=None,
expires_in=None, refresh_token_generator=None):
self.request_validator = request_validator
self.token_generator = token_generator or random_token_generator
self.refresh_token_generator = (
refresh_token_generator or self.token_generator
)
self.expires_in = expires_in or 3600
def create_token(self, request, refresh_token=False, **kwargs):
"""
Create a BearerToken, by default without refresh token.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:param refresh_token:
"""
if "save_token" in kwargs:
warnings.warn("`save_token` has been deprecated, it was not called internally."
"If you do, call `request_validator.save_token()` instead.",
DeprecationWarning)
expires_in = self.expires_in(request) if callable(self.expires_in) else self.expires_in
request.expires_in = expires_in
token = {
'access_token': self.token_generator(request),
'expires_in': expires_in,
'token_type': 'Bearer',
}
# If provided, include - this is optional in some cases https://tools.ietf.org/html/rfc6749#section-3.3 but
# there is currently no mechanism to coordinate issuing a token for only a subset of the requested scopes so
# all tokens issued are for the entire set of requested scopes.
if request.scopes is not None:
token['scope'] = ' '.join(request.scopes)
if refresh_token:
if (request.refresh_token and
not self.request_validator.rotate_refresh_token(request)):
token['refresh_token'] = request.refresh_token
else:
token['refresh_token'] = self.refresh_token_generator(request)
token.update(request.extra_credentials or {})
return OAuth2Token(token)
def validate_request(self, request):
"""
:param request: OAuthlib request.
:type request: oauthlib.common.Request
"""
token = get_token_from_header(request)
return self.request_validator.validate_bearer_token(
token, request.scopes, request)
def estimate_type(self, request):
"""
:param request: OAuthlib request.
:type request: oauthlib.common.Request
"""
if request.headers.get('Authorization', '').split(' ')[0].lower() == 'bearer':
return 9
elif request.access_token is not None:
return 5
else:
return 0
| BearerToken |
python | huggingface__transformers | src/transformers/models/conditional_detr/configuration_conditional_detr.py | {
"start": 908,
"end": 12713
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`ConditionalDetrModel`]. It is used to instantiate
a Conditional DETR model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the Conditional DETR
[microsoft/conditional-detr-resnet-50](https://huggingface.co/microsoft/conditional-detr-resnet-50) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
use_timm_backbone (`bool`, *optional*, defaults to `True`):
Whether or not to use the `timm` library for the backbone. If set to `False`, will use the [`AutoBackbone`]
API.
backbone_config (`PreTrainedConfig` or `dict`, *optional*):
The configuration of the backbone model. Only used in case `use_timm_backbone` is set to `False` in which
case it will default to `ResNetConfig()`.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
num_queries (`int`, *optional*, defaults to 100):
Number of object queries, i.e. detection slots. This is the maximal number of objects
[`ConditionalDetrModel`] can detect in a single image. For COCO, we recommend 100 queries.
d_model (`int`, *optional*, defaults to 256):
This parameter is a general dimension parameter, defining dimensions for components such as the encoder layer and projection parameters in the decoder layer, among others.
encoder_layers (`int`, *optional*, defaults to 6):
Number of encoder layers.
decoder_layers (`int`, *optional*, defaults to 6):
Number of decoder layers.
encoder_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer decoder.
decoder_ffn_dim (`int`, *optional*, defaults to 2048):
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
encoder_ffn_dim (`int`, *optional*, defaults to 2048):
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
activation_function (`str` or `function`, *optional*, defaults to `"relu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
init_xavier_std (`float`, *optional*, defaults to 1):
The scaling factor used for the Xavier initialization gain in the HM Attention map module.
encoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
decoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
auxiliary_loss (`bool`, *optional*, defaults to `False`):
Whether auxiliary decoding losses (loss at each decoder layer) are to be used.
position_embedding_type (`str`, *optional*, defaults to `"sine"`):
Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`.
backbone (`str`, *optional*, defaults to `"resnet50"`):
Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
use_pretrained_backbone (`bool`, *optional*, defaults to `True`):
Whether to use pretrained weights for the backbone.
backbone_kwargs (`dict`, *optional*):
Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
dilation (`bool`, *optional*, defaults to `False`):
Whether to replace stride with dilation in the last convolutional block (DC5). Only supported when
`use_timm_backbone` = `True`.
class_cost (`float`, *optional*, defaults to 1):
Relative weight of the classification error in the Hungarian matching cost.
bbox_cost (`float`, *optional*, defaults to 5):
Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost.
giou_cost (`float`, *optional*, defaults to 2):
Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost.
mask_loss_coefficient (`float`, *optional*, defaults to 1):
Relative weight of the Focal loss in the panoptic segmentation loss.
dice_loss_coefficient (`float`, *optional*, defaults to 1):
Relative weight of the DICE/F-1 loss in the panoptic segmentation loss.
bbox_loss_coefficient (`float`, *optional*, defaults to 5):
Relative weight of the L1 bounding box loss in the object detection loss.
giou_loss_coefficient (`float`, *optional*, defaults to 2):
Relative weight of the generalized IoU loss in the object detection loss.
eos_coefficient (`float`, *optional*, defaults to 0.1):
Relative classification weight of the 'no-object' class in the object detection loss.
focal_alpha (`float`, *optional*, defaults to 0.25):
Alpha parameter in the focal loss.
Examples:
```python
>>> from transformers import ConditionalDetrConfig, ConditionalDetrModel
>>> # Initializing a Conditional DETR microsoft/conditional-detr-resnet-50 style configuration
>>> configuration = ConditionalDetrConfig()
>>> # Initializing a model (with random weights) from the microsoft/conditional-detr-resnet-50 style configuration
>>> model = ConditionalDetrModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "conditional_detr"
sub_configs = {"backbone_config": AutoConfig}
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__(
self,
use_timm_backbone=True,
backbone_config=None,
num_channels=3,
num_queries=300,
encoder_layers=6,
encoder_ffn_dim=2048,
encoder_attention_heads=8,
decoder_layers=6,
decoder_ffn_dim=2048,
decoder_attention_heads=8,
encoder_layerdrop=0.0,
decoder_layerdrop=0.0,
is_encoder_decoder=True,
activation_function="relu",
d_model=256,
dropout=0.1,
attention_dropout=0.0,
activation_dropout=0.0,
init_std=0.02,
init_xavier_std=1.0,
auxiliary_loss=False,
position_embedding_type="sine",
backbone="resnet50",
use_pretrained_backbone=True,
backbone_kwargs=None,
dilation=False,
class_cost=2,
bbox_cost=5,
giou_cost=2,
mask_loss_coefficient=1,
dice_loss_coefficient=1,
cls_loss_coefficient=2,
bbox_loss_coefficient=5,
giou_loss_coefficient=2,
focal_alpha=0.25,
**kwargs,
):
# We default to values which were previously hard-coded in the model. This enables configurability of the config
# while keeping the default behavior the same.
if use_timm_backbone and backbone_kwargs is None:
backbone_kwargs = {}
if dilation:
backbone_kwargs["output_stride"] = 16
backbone_kwargs["out_indices"] = [1, 2, 3, 4]
backbone_kwargs["in_chans"] = num_channels
# Backwards compatibility
elif not use_timm_backbone and backbone in (None, "resnet50"):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
backbone_config = CONFIG_MAPPING["resnet"](out_features=["stage4"])
elif isinstance(backbone_config, dict):
backbone_model_type = backbone_config.get("model_type")
config_class = CONFIG_MAPPING[backbone_model_type]
backbone_config = config_class.from_dict(backbone_config)
verify_backbone_config_arguments(
use_timm_backbone=use_timm_backbone,
use_pretrained_backbone=use_pretrained_backbone,
backbone=backbone,
backbone_config=backbone_config,
backbone_kwargs=backbone_kwargs,
)
self.use_timm_backbone = use_timm_backbone
self.backbone_config = backbone_config
self.num_channels = num_channels
self.num_queries = num_queries
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.init_xavier_std = init_xavier_std
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.num_hidden_layers = encoder_layers
self.auxiliary_loss = auxiliary_loss
self.position_embedding_type = position_embedding_type
self.backbone = backbone
self.use_pretrained_backbone = use_pretrained_backbone
self.backbone_kwargs = backbone_kwargs
self.dilation = dilation
# Hungarian matcher
self.class_cost = class_cost
self.bbox_cost = bbox_cost
self.giou_cost = giou_cost
# Loss coefficients
self.mask_loss_coefficient = mask_loss_coefficient
self.dice_loss_coefficient = dice_loss_coefficient
self.cls_loss_coefficient = cls_loss_coefficient
self.bbox_loss_coefficient = bbox_loss_coefficient
self.giou_loss_coefficient = giou_loss_coefficient
self.focal_alpha = focal_alpha
super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
__all__ = ["ConditionalDetrConfig"]
| ConditionalDetrConfig |
python | readthedocs__readthedocs.org | readthedocs/projects/models.py | {
"start": 56923,
"end": 57351
} | class ____(ImportedFile):
"""
Imported HTML file Proxy model.
This tracks only the HTML files for indexing to search.
"""
class Meta:
proxy = True
objects = HTMLFileManager()
def get_processed_json(self):
parser = GenericParser(self.version)
return parser.parse(self.path)
@cached_property
def processed_json(self):
return self.get_processed_json()
| HTMLFile |
python | astropy__astropy | astropy/coordinates/tests/test_transformations.py | {
"start": 2020,
"end": 2050
} | class ____(ICRS):
pass
| TCoo1 |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_dag_run.py | {
"start": 61130,
"end": 77508
} | class ____:
def _dags_for_trigger_tests(self, session=None):
inactive_dag = DagModel(
dag_id="inactive",
bundle_name="testing",
fileloc="/tmp/dag_del_1.py",
timetable_summary="2 2 * * *",
is_stale=True,
is_paused=True,
owners="test_owner,another_test_owner",
next_dagrun=datetime(2021, 1, 1, 12, 0, 0, tzinfo=timezone.utc),
)
import_errors_dag = DagModel(
dag_id="import_errors",
bundle_name="testing",
fileloc="/tmp/dag_del_2.py",
timetable_summary="2 2 * * *",
is_stale=False,
owners="test_owner,another_test_owner",
next_dagrun=datetime(2021, 1, 1, 12, 0, 0, tzinfo=timezone.utc),
)
import_errors_dag.has_import_errors = True
session.add(inactive_dag)
session.add(import_errors_dag)
session.commit()
@time_machine.travel(timezone.utcnow(), tick=False)
@pytest.mark.parametrize(
("dag_run_id", "note", "data_interval_start", "data_interval_end", "note_data"),
[
("dag_run_5", "test-note", None, None, {"user_id": "test", "content": "test-note"}),
(
"dag_run_6",
"test-note",
"2024-01-03T00:00:00+00:00",
"2024-01-04T05:00:00+00:00",
{"user_id": "test", "content": "test-note"},
),
(None, None, None, None, None),
],
)
@pytest.mark.usefixtures("configure_git_connection_for_dag_bundle")
def test_should_respond_200(
self, test_client, dag_run_id, note, data_interval_start, data_interval_end, note_data, session
):
fixed_now = timezone.utcnow().isoformat()
request_json = {"note": note, "logical_date": fixed_now}
if dag_run_id is not None:
request_json["dag_run_id"] = dag_run_id
if data_interval_start is not None:
request_json["data_interval_start"] = data_interval_start
if data_interval_end is not None:
request_json["data_interval_end"] = data_interval_end
request_json["logical_date"] = fixed_now
response = test_client.post(
f"/dags/{DAG1_ID}/dagRuns",
json=request_json,
)
assert response.status_code == 200
if dag_run_id is None:
expected_dag_run_id = f"manual__{fixed_now}"
else:
expected_dag_run_id = dag_run_id
expected_data_interval_start = fixed_now.replace("+00:00", "Z")
expected_data_interval_end = fixed_now.replace("+00:00", "Z")
if data_interval_start is not None and data_interval_end is not None:
expected_data_interval_start = data_interval_start.replace("+00:00", "Z")
expected_data_interval_end = data_interval_end.replace("+00:00", "Z")
expected_logical_date = fixed_now.replace("+00:00", "Z")
run = (
session.query(DagRun).where(DagRun.dag_id == DAG1_ID, DagRun.run_id == expected_dag_run_id).one()
)
expected_response_json = {
"bundle_version": None,
"conf": {},
"dag_display_name": DAG1_DISPLAY_NAME,
"dag_id": DAG1_ID,
"dag_run_id": expected_dag_run_id,
"dag_versions": get_dag_versions_dict(run.dag_versions),
"end_date": None,
"logical_date": expected_logical_date,
"run_after": fixed_now.replace("+00:00", "Z"),
"start_date": None,
"duration": None,
"run_type": "manual",
"state": "queued",
"data_interval_end": expected_data_interval_end,
"data_interval_start": expected_data_interval_start,
"queued_at": fixed_now.replace("+00:00", "Z"),
"last_scheduling_decision": None,
"note": note,
"triggered_by": "rest_api",
"triggering_user_name": "test",
"partition_key": None,
}
assert response.json() == expected_response_json
_check_dag_run_note(session, expected_dag_run_id, note_data)
_check_last_log(session, dag_id=DAG1_ID, event="trigger_dag_run", logical_date=None)
def test_should_respond_401(self, unauthenticated_test_client):
response = unauthenticated_test_client.post(
f"/dags/{DAG1_ID}/dagRuns",
json={},
)
assert response.status_code == 401
def test_should_respond_403(self, unauthorized_test_client):
response = unauthorized_test_client.post(
f"/dags/{DAG1_ID}/dagRuns",
json={},
)
assert response.status_code == 403
@pytest.mark.parametrize(
("post_body", "expected_detail"),
[
(
{"executiondate": "2020-11-10T08:25:56Z"},
{
"detail": [
{
"input": "2020-11-10T08:25:56Z",
"loc": ["body", "executiondate"],
"msg": "Extra inputs are not permitted",
"type": "extra_forbidden",
}
]
},
),
(
{"data_interval_start": "2020-11-10T08:25:56"},
{
"detail": [
{
"input": "2020-11-10T08:25:56",
"loc": ["body", "data_interval_start"],
"msg": "Input should have timezone info",
"type": "timezone_aware",
}
]
},
),
(
{"data_interval_end": "2020-11-10T08:25:56"},
{
"detail": [
{
"input": "2020-11-10T08:25:56",
"loc": ["body", "data_interval_end"],
"msg": "Input should have timezone info",
"type": "timezone_aware",
}
]
},
),
(
{"dag_run_id": 20},
{
"detail": [
{
"input": 20,
"loc": ["body", "dag_run_id"],
"msg": "Input should be a valid string",
"type": "string_type",
}
]
},
),
(
{"note": 20},
{
"detail": [
{
"input": 20,
"loc": ["body", "note"],
"msg": "Input should be a valid string",
"type": "string_type",
}
]
},
),
(
{"conf": 20},
{
"detail": [
{
"input": 20,
"loc": ["body", "conf"],
"msg": "Input should be a valid dictionary",
"type": "dict_type",
}
]
},
),
],
)
def test_invalid_data(self, test_client, post_body, expected_detail):
now = timezone.utcnow().isoformat()
post_body["logical_date"] = now
response = test_client.post(f"/dags/{DAG1_ID}/dagRuns", json=post_body)
assert response.status_code == 422
assert response.json() == expected_detail
def test_post_dag_runs_with_empty_payload(self, test_client):
response = test_client.post(
f"/dags/{DAG1_ID}/dagRuns", data={}, headers={"Content-Type": "application/json"}
)
assert response.status_code == 422
body = response.json()
assert body["detail"] == [
{
"input": None,
"loc": ["body"],
"msg": "Field required",
"type": "missing",
},
]
@mock.patch("airflow.serialization.serialized_objects.SerializedDAG.create_dagrun")
def test_dagrun_creation_exception_is_handled(self, mock_create_dagrun, test_client):
now = timezone.utcnow().isoformat()
error_message = "Encountered Error"
mock_create_dagrun.side_effect = ValueError(error_message)
response = test_client.post(f"/dags/{DAG1_ID}/dagRuns", json={"logical_date": now})
assert response.status_code == 400
assert response.json() == {"detail": error_message}
def test_should_respond_404_if_a_dag_is_inactive(self, test_client, session, testing_dag_bundle):
now = timezone.utcnow().isoformat()
self._dags_for_trigger_tests(session)
response = test_client.post("/dags/inactive/dagRuns", json={"logical_date": now})
assert response.status_code == 404
assert response.json()["detail"] == "DAG with dag_id: 'inactive' not found"
def test_should_respond_400_if_a_dag_has_import_errors(self, test_client, session, testing_dag_bundle):
now = timezone.utcnow().isoformat()
self._dags_for_trigger_tests(session)
response = test_client.post("/dags/import_errors/dagRuns", json={"logical_date": now})
assert response.status_code == 400
assert (
response.json()["detail"]
== "DAG with dag_id: 'import_errors' has import errors and cannot be triggered"
)
@time_machine.travel(timezone.utcnow(), tick=False)
@pytest.mark.usefixtures("configure_git_connection_for_dag_bundle")
def test_should_response_409_for_duplicate_logical_date(self, test_client):
RUN_ID_1 = "random_1"
RUN_ID_2 = "random_2"
now = timezone.utcnow().isoformat().replace("+00:00", "Z")
note = "duplicate logical date test"
response_1 = test_client.post(
f"/dags/{DAG1_ID}/dagRuns",
json={"dag_run_id": RUN_ID_1, "note": note, "logical_date": now},
)
response_2 = test_client.post(
f"/dags/{DAG1_ID}/dagRuns",
json={"dag_run_id": RUN_ID_2, "note": note, "logical_date": now},
)
assert response_1.status_code == 200
assert response_1.json() == {
"bundle_version": None,
"dag_display_name": DAG1_DISPLAY_NAME,
"dag_run_id": RUN_ID_1,
"dag_id": DAG1_ID,
"dag_versions": mock.ANY,
"logical_date": now,
"queued_at": now,
"start_date": None,
"end_date": None,
"duration": None,
"run_after": now,
"data_interval_start": now,
"data_interval_end": now,
"last_scheduling_decision": None,
"run_type": "manual",
"state": "queued",
"triggered_by": "rest_api",
"triggering_user_name": "test",
"conf": {},
"note": note,
"partition_key": None,
}
assert response_2.status_code == 409
@pytest.mark.parametrize(
("data_interval_start", "data_interval_end"),
[
(
LOGICAL_DATE1.isoformat(),
None,
),
(
None,
LOGICAL_DATE1.isoformat(),
),
],
)
def test_should_response_422_for_missing_start_date_or_end_date(
self, test_client, data_interval_start, data_interval_end
):
now = timezone.utcnow().isoformat()
response = test_client.post(
f"/dags/{DAG1_ID}/dagRuns",
json={
"data_interval_start": data_interval_start,
"data_interval_end": data_interval_end,
"logical_date": now,
},
)
assert response.status_code == 422
assert (
response.json()["detail"][0]["msg"]
== "Value error, Either both data_interval_start and data_interval_end must be provided or both must be None"
)
def test_raises_validation_error_for_invalid_params(self, test_client):
now = timezone.utcnow().isoformat()
response = test_client.post(
f"/dags/{DAG2_ID}/dagRuns",
json={"conf": {"validated_number": 5000}, "logical_date": now},
)
assert response.status_code == 400
assert "Invalid input for param validated_number" in response.json()["detail"]
def test_response_404(self, test_client):
now = timezone.utcnow().isoformat()
response = test_client.post("/dags/randoms/dagRuns", json={"logical_date": now})
assert response.status_code == 404
assert response.json()["detail"] == "DAG with dag_id: 'randoms' not found"
def test_response_409(self, test_client):
now = timezone.utcnow().isoformat()
response = test_client.post(
f"/dags/{DAG1_ID}/dagRuns", json={"dag_run_id": DAG1_RUN1_ID, "logical_date": now}
)
assert response.status_code == 409
response_json = response.json()
assert "detail" in response_json
assert list(response_json["detail"].keys()) == ["reason", "statement", "orig_error", "message"]
@pytest.mark.usefixtures("configure_git_connection_for_dag_bundle")
def test_should_respond_200_with_null_logical_date(self, test_client):
response = test_client.post(
f"/dags/{DAG1_ID}/dagRuns",
json={"logical_date": None},
)
assert response.status_code == 200
assert response.json() == {
"bundle_version": None,
"dag_display_name": DAG1_DISPLAY_NAME,
"dag_run_id": mock.ANY,
"dag_id": DAG1_ID,
"dag_versions": mock.ANY,
"logical_date": None,
"queued_at": mock.ANY,
"run_after": mock.ANY,
"start_date": None,
"end_date": None,
"duration": None,
"data_interval_start": mock.ANY,
"data_interval_end": mock.ANY,
"last_scheduling_decision": None,
"run_type": "manual",
"state": "queued",
"triggered_by": "rest_api",
"triggering_user_name": "test",
"conf": {},
"note": None,
"partition_key": None,
}
@time_machine.travel("2025-10-02 12:00:00", tick=False)
@pytest.mark.usefixtures("custom_timetable_plugin")
def test_custom_timetable_generate_run_id_for_manual_trigger(self, dag_maker, test_client, session):
"""Test that custom timetable's generate_run_id is used for manual triggers (issue #55908)."""
custom_dag_id = "test_custom_timetable_dag"
with dag_maker(
dag_id=custom_dag_id,
schedule=CustomTimetable("0 0 * * *", timezone="UTC"),
session=session,
serialized=True,
):
EmptyOperator(task_id="test_task")
session.commit()
logical_date = datetime(2025, 10, 1, 0, 0, 0, tzinfo=timezone.utc)
response = test_client.post(
f"/dags/{custom_dag_id}/dagRuns",
json={"logical_date": logical_date.isoformat()},
)
assert response.status_code == 200
run_id_with_logical_date = response.json()["dag_run_id"]
assert run_id_with_logical_date.startswith("custom_")
run = session.query(DagRun).filter(DagRun.run_id == run_id_with_logical_date).one()
assert run.dag_id == custom_dag_id
response = test_client.post(
f"/dags/{custom_dag_id}/dagRuns",
json={"logical_date": None},
)
assert response.status_code == 200
run_id_without_logical_date = response.json()["dag_run_id"]
assert run_id_without_logical_date.startswith("custom_manual_")
run = session.query(DagRun).filter(DagRun.run_id == run_id_without_logical_date).one()
assert run.dag_id == custom_dag_id
| TestTriggerDagRun |
python | pytorch__pytorch | test/quantization/fx/test_model_report_fx.py | {
"start": 50792,
"end": 62605
} | class ____(QuantizationTestCase):
class SimpleConv(torch.nn.Module):
def __init__(self, con_dims):
super().__init__()
self.relu = torch.nn.ReLU()
self.conv = torch.nn.Conv2d(con_dims[0], con_dims[1], kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
def forward(self, x):
x = self.conv(x)
x = self.relu(x)
return x
class TwoBlockComplexNet(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.block1 = TestFxDetectInputWeightEqualization.SimpleConv((3, 32))
self.block2 = TestFxDetectInputWeightEqualization.SimpleConv((3, 3))
self.conv = torch.nn.Conv2d(32, 3, kernel_size=(1, 1), stride=(1, 1), padding=(1, 1), bias=False)
self.linear = torch.nn.Linear(768, 10)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.block1(x)
x = self.conv(x)
y = self.block2(x)
y = y.repeat(1, 1, 2, 2)
z = x + y
z = z.flatten(start_dim=1)
z = self.linear(z)
z = self.relu(z)
return z
def get_fusion_modules(self):
return [['conv', 'relu']]
def get_example_inputs(self):
return (torch.randn((1, 3, 28, 28)),)
class ReluOnly(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.relu(x)
return x
def get_example_inputs(self):
return (torch.arange(27).reshape((1, 3, 3, 3)),)
def _get_prepped_for_calibration_model(self, model, detector_set, fused=False):
r"""Returns a model that has been prepared for calibration and corresponding model_report"""
# pass in necessary inputs to helper
example_input = model.get_example_inputs()[0]
return _get_prepped_for_calibration_model_helper(model, detector_set, example_input, fused)
@skipIfNoFBGEMM
def test_input_weight_equalization_determine_points(self):
# use fbgemm and create our model instance
# then create model report instance with detector
with override_quantized_engine('fbgemm'):
detector_set = {InputWeightEqualizationDetector(0.5)}
# get tst model and calibrate
non_fused = self._get_prepped_for_calibration_model(self.TwoBlockComplexNet(), detector_set)
fused = self._get_prepped_for_calibration_model(self.TwoBlockComplexNet(), detector_set, fused=True)
# reporter should still give same counts even for fused model
for prepared_for_callibrate_model, _mod_report in [non_fused, fused]:
# supported modules to check
mods_to_check = {nn.Linear, nn.Conv2d}
# get the set of all nodes in the graph their fqns
node_fqns = {node.target for node in prepared_for_callibrate_model.graph.nodes}
# there should be 4 node fqns that have the observer inserted
correct_number_of_obs_inserted = 4
number_of_obs_found = 0
obs_name_to_find = InputWeightEqualizationDetector.DEFAULT_PRE_OBSERVER_NAME
for node in prepared_for_callibrate_model.graph.nodes:
# if the obs name is inside the target, we found an observer
if obs_name_to_find in str(node.target):
number_of_obs_found += 1
self.assertEqual(number_of_obs_found, correct_number_of_obs_inserted)
# assert that each of the desired modules have the observers inserted
for module in prepared_for_callibrate_model.modules():
# check if module is a supported module
is_in_include_list = sum(isinstance(module, x) for x in mods_to_check) > 0
if is_in_include_list:
# make sure it has the observer attribute
self.assertTrue(hasattr(module, InputWeightEqualizationDetector.DEFAULT_PRE_OBSERVER_NAME))
else:
# if it's not a supported type, it shouldn't have observer attached
self.assertTrue(not hasattr(module, InputWeightEqualizationDetector.DEFAULT_PRE_OBSERVER_NAME))
@skipIfNoFBGEMM
def test_input_weight_equalization_report_gen(self):
# use fbgemm and create our model instance
# then create model report instance with detector
with override_quantized_engine('fbgemm'):
test_input_weight_detector = InputWeightEqualizationDetector(0.4)
detector_set = {test_input_weight_detector}
model = self.TwoBlockComplexNet()
# prepare the model for calibration
prepared_for_callibrate_model, model_report = self._get_prepped_for_calibration_model(
model, detector_set
)
# now we actually calibrate the model
example_input = model.get_example_inputs()[0]
example_input = example_input.to(torch.float)
prepared_for_callibrate_model(example_input)
# now get the report by running it through ModelReport instance
generated_report = model_report.generate_model_report(True)
# check that sizes are appropriate only 1 detector
self.assertEqual(len(generated_report), 1)
# get the specific report for input weight equalization
input_weight_str, input_weight_dict = generated_report[test_input_weight_detector.get_detector_name()]
# we should have 5 layers looked at since 4 conv / linear layers
self.assertEqual(len(input_weight_dict), 4)
# we can validate that the max and min values of the detector were recorded properly for the first one
# this is because no data has been processed yet, so it should be values from original input
example_input = example_input.reshape((3, 28, 28)) # reshape input
for module_fqn in input_weight_dict:
# look for the first linear
if "block1.linear" in module_fqn:
block_1_lin_recs = input_weight_dict[module_fqn]
# get input range info and the channel axis
ch_axis = block_1_lin_recs[InputWeightEqualizationDetector.CHANNEL_KEY]
# ensure that the min and max values extracted match properly
example_min, example_max = torch.aminmax(example_input, dim=ch_axis)
dimension_min = torch.amin(example_min, dim=ch_axis)
dimension_max = torch.amax(example_max, dim=ch_axis)
# make sure per channel min and max are as expected
min_per_key = InputWeightEqualizationDetector.ACTIVATION_PREFIX
min_per_key += InputWeightEqualizationDetector.PER_CHANNEL_MIN_KEY
max_per_key = InputWeightEqualizationDetector.ACTIVATION_PREFIX
max_per_key += InputWeightEqualizationDetector.PER_CHANNEL_MAX_KEY
per_channel_min = block_1_lin_recs[min_per_key]
per_channel_max = block_1_lin_recs[max_per_key]
self.assertEqual(per_channel_min, dimension_min)
self.assertEqual(per_channel_max, dimension_max)
# make sure per channel min and max are as expected
min_key = InputWeightEqualizationDetector.ACTIVATION_PREFIX
min_key += InputWeightEqualizationDetector.GLOBAL_MIN_KEY
max_key = InputWeightEqualizationDetector.ACTIVATION_PREFIX
max_key += InputWeightEqualizationDetector.GLOBAL_MAX_KEY
# make sure the global min and max were correctly recorded and presented
global_min = block_1_lin_recs[min_key]
global_max = block_1_lin_recs[max_key]
self.assertEqual(global_min, min(dimension_min))
self.assertEqual(global_max, max(dimension_max))
input_ratio = torch.sqrt((per_channel_max - per_channel_min) / (global_max - global_min))
# ensure comparison stat passed back is sqrt of range ratios
# need to get the weight ratios first
# make sure per channel min and max are as expected
min_per_key = InputWeightEqualizationDetector.WEIGHT_PREFIX
min_per_key += InputWeightEqualizationDetector.PER_CHANNEL_MIN_KEY
max_per_key = InputWeightEqualizationDetector.WEIGHT_PREFIX
max_per_key += InputWeightEqualizationDetector.PER_CHANNEL_MAX_KEY
# get weight per channel and global info
per_channel_min = block_1_lin_recs[min_per_key]
per_channel_max = block_1_lin_recs[max_per_key]
# make sure per channel min and max are as expected
min_key = InputWeightEqualizationDetector.WEIGHT_PREFIX
min_key += InputWeightEqualizationDetector.GLOBAL_MIN_KEY
max_key = InputWeightEqualizationDetector.WEIGHT_PREFIX
max_key += InputWeightEqualizationDetector.GLOBAL_MAX_KEY
global_min = block_1_lin_recs[min_key]
global_max = block_1_lin_recs[max_key]
weight_ratio = torch.sqrt((per_channel_max - per_channel_min) / (global_max - global_min))
# also get comp stat for this specific layer
comp_stat = block_1_lin_recs[InputWeightEqualizationDetector.COMP_METRIC_KEY]
weight_to_input_ratio = weight_ratio / input_ratio
self.assertEqual(comp_stat, weight_to_input_ratio)
# only looking at the first example so can break
break
@skipIfNoFBGEMM
def test_input_weight_equalization_report_gen_empty(self):
# tests report gen on a model that doesn't have any layers
# use fbgemm and create our model instance
# then create model report instance with detector
with override_quantized_engine('fbgemm'):
test_input_weight_detector = InputWeightEqualizationDetector(0.4)
detector_set = {test_input_weight_detector}
model = self.ReluOnly()
# prepare the model for calibration
prepared_for_callibrate_model, model_report = self._get_prepped_for_calibration_model(model, detector_set)
# now we actually calibrate the model
example_input = model.get_example_inputs()[0]
example_input = example_input.to(torch.float)
prepared_for_callibrate_model(example_input)
# now get the report by running it through ModelReport instance
generated_report = model_report.generate_model_report(True)
# check that sizes are appropriate only 1 detector
self.assertEqual(len(generated_report), 1)
# get the specific report for input weight equalization
input_weight_str, input_weight_dict = generated_report[test_input_weight_detector.get_detector_name()]
# we should have 0 layers since there is only a Relu
self.assertEqual(len(input_weight_dict), 0)
# make sure that the string only has two lines, as should be if no suggestions
self.assertEqual(input_weight_str.count("\n"), 2)
| TestFxDetectInputWeightEqualization |
python | doocs__leetcode | solution/0600-0699/0642.Design Search Autocomplete System/Solution.py | {
"start": 659,
"end": 1647
} | class ____:
def __init__(self, sentences: List[str], times: List[int]):
self.trie = Trie()
for a, b in zip(sentences, times):
self.trie.insert(a, b)
self.t = []
def input(self, c: str) -> List[str]:
def dfs(node):
if node is None:
return
if node.v:
res.append((node.v, node.w))
for nxt in node.children:
dfs(nxt)
if c == '#':
s = ''.join(self.t)
self.trie.insert(s, 1)
self.t = []
return []
res = []
self.t.append(c)
node = self.trie.search(''.join(self.t))
if node is None:
return res
dfs(node)
res.sort(key=lambda x: (-x[0], x[1]))
return [v[1] for v in res[:3]]
# Your AutocompleteSystem object will be instantiated and called as such:
# obj = AutocompleteSystem(sentences, times)
# param_1 = obj.input(c)
| AutocompleteSystem |
python | great-expectations__great_expectations | great_expectations/data_context/types/base.py | {
"start": 50799,
"end": 52566
} | class ____(DictDot):
"""
Define base defaults for platform specific StoreBackendDefaults.
StoreBackendDefaults define defaults for specific cases of often used configurations.
For example, if you plan to store expectations, validations, and data_docs in s3 use the S3StoreBackendDefaults and you may be able to specify less parameters.
""" # noqa: E501 # FIXME CoP
def __init__( # noqa: PLR0913 # FIXME CoP
self,
expectations_store_name: str = DataContextConfigDefaults.DEFAULT_EXPECTATIONS_STORE_NAME.value, # noqa: E501 # FIXME CoP
validation_results_store_name: str = DataContextConfigDefaults.DEFAULT_VALIDATIONS_STORE_NAME.value, # noqa: E501 # FIXME CoP
checkpoint_store_name: str = DataContextConfigDefaults.DEFAULT_CHECKPOINT_STORE_NAME.value,
data_docs_site_name: str = DataContextConfigDefaults.DEFAULT_DATA_DOCS_SITE_NAME.value,
stores: Optional[dict] = None,
data_docs_sites: Optional[dict] = None,
) -> None:
self.expectations_store_name = expectations_store_name
self.validation_results_store_name = validation_results_store_name
self.checkpoint_store_name = checkpoint_store_name
self.validation_definition_store_name = (
DataContextConfigDefaults.DEFAULT_VALIDATION_DEFINITION_STORE_NAME.value
)
if stores is None:
stores = copy.deepcopy(DataContextConfigDefaults.DEFAULT_STORES.value)
self.stores = stores
if data_docs_sites is None:
data_docs_sites = copy.deepcopy(DataContextConfigDefaults.DEFAULT_DATA_DOCS_SITES.value)
self.data_docs_sites = data_docs_sites
self.data_docs_site_name = data_docs_site_name
@public_api
| BaseStoreBackendDefaults |
python | doocs__leetcode | lcof2/剑指 Offer II 066. 单词之和/Solution.py | {
"start": 0,
"end": 570
} | class ____:
def __init__(self):
"""
Initialize your data structure here.
"""
self.data = defaultdict(int)
self.t = defaultdict(int)
def insert(self, key: str, val: int) -> None:
old = self.t[key]
self.t[key] = val
for i in range(1, len(key) + 1):
self.data[key[:i]] += val - old
def sum(self, prefix: str) -> int:
return self.data[prefix]
# Your MapSum object will be instantiated and called as such:
# obj = MapSum()
# obj.insert(key,val)
# param_2 = obj.sum(prefix)
| MapSum |
python | cython__cython | Cython/Compiler/PyrexTypes.py | {
"start": 113147,
"end": 113533
} | class ____(CReferenceBaseType):
is_rvalue_reference = 1
def __str__(self):
return "%s &&" % self.ref_base_type
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
return self.ref_base_type.declaration_code(
"&&%s" % entity_code,
for_display, dll_linkage, pyrex)
| CppRvalueReferenceType |
python | kamyu104__LeetCode-Solutions | Python/missing-number-in-arithmetic-progression.py | {
"start": 32,
"end": 554
} | class ____(object):
def missingNumber(self, arr):
"""
:type arr: List[int]
:rtype: int
"""
def check(arr, d, x):
return arr[x] != arr[0] + d*x
d = (arr[-1]-arr[0])//len(arr)
left, right = 0, len(arr)-1
while left <= right:
mid = left + (right-left)//2
if check(arr, d, mid):
right = mid-1
else:
left = mid+1
return arr[0] + d*left
# Time: O(n)
# Space: O(1)
| Solution |
python | django__django | tests/generic_views/views.py | {
"start": 6231,
"end": 6469
} | class ____(generic.list.MultipleObjectMixin, generic.View):
queryset = [
{"name": "John"},
{"name": "Yoko"},
]
def get(self, request):
self.object_list = self.get_queryset()
| CustomMultipleObjectMixinView |
python | pytorch__pytorch | .ci/lumen_cli/tests/test_cli_helper.py | {
"start": 244,
"end": 378
} | class ____(BaseRunner):
"""Foo description from docstring."""
def run(self) -> None: # replaced by mock
pass
| FooRunner |
python | pypa__setuptools | setuptools/_distutils/tests/support.py | {
"start": 300,
"end": 1370
} | class ____:
"""
Mix-in class that handles temporary directories for test cases.
"""
def mkdtemp(self):
"""Create a temporary directory that will be cleaned up.
Returns the path of the directory.
"""
d = tempfile.mkdtemp()
self.tempdirs.append(d)
return d
def write_file(self, path, content='xxx'):
"""Writes a file in the given path.
path can be a string or a sequence.
"""
pathlib.Path(*always_iterable(path)).write_text(content, encoding='utf-8')
def create_dist(self, pkg_name='foo', **kw):
"""Will generate a test environment.
This function creates:
- a Distribution instance using keywords
- a temporary directory with a package structure
It returns the package directory and the distribution
instance.
"""
tmp_dir = self.mkdtemp()
pkg_dir = os.path.join(tmp_dir, pkg_name)
os.mkdir(pkg_dir)
dist = Distribution(attrs=kw)
return pkg_dir, dist
| TempdirManager |
python | getsentry__sentry-python | tests/conftest.py | {
"start": 16739,
"end": 18873
} | class ____(BaseHTTPRequestHandler):
def do_GET(self): # noqa: N802
# Process an HTTP GET request and return a response.
# If the path ends with /status/<number>, return status code <number>.
# Otherwise return a 200 response.
code = 200
if "/status/" in self.path:
code = int(self.path[-3:])
self.send_response(code)
self.end_headers()
return
def get_free_port():
s = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM)
s.bind(("localhost", 0))
_, port = s.getsockname()
s.close()
return port
def create_mock_http_server():
# Start a mock server to test outgoing http requests
mock_server_port = get_free_port()
mock_server = HTTPServer(("localhost", mock_server_port), MockServerRequestHandler)
mock_server_thread = Thread(target=mock_server.serve_forever)
mock_server_thread.daemon = True
mock_server_thread.start()
return mock_server_port
def unpack_werkzeug_response(response):
# werkzeug < 2.1 returns a tuple as client response, newer versions return
# an object
try:
return response.get_data(), response.status, response.headers
except AttributeError:
content, status, headers = response
return b"".join(content), status, headers
def werkzeug_set_cookie(client, servername, key, value):
# client.set_cookie has a different signature in different werkzeug versions
try:
client.set_cookie(servername, key, value)
except TypeError:
client.set_cookie(key, value)
@contextmanager
def patch_start_tracing_child(fake_transaction_is_none=False):
# type: (bool) -> Iterator[Optional[mock.MagicMock]]
if not fake_transaction_is_none:
fake_transaction = mock.MagicMock()
fake_start_child = mock.MagicMock()
fake_transaction.start_child = fake_start_child
else:
fake_transaction = None
fake_start_child = None
with mock.patch(
"sentry_sdk.tracing_utils.get_current_span", return_value=fake_transaction
):
yield fake_start_child
| MockServerRequestHandler |
python | qdrant__qdrant-client | qdrant_client/http/api/collections_api.py | {
"start": 7616,
"end": 9691
} | class ____(_CollectionsApi):
def collection_exists(
self,
collection_name: str,
) -> m.InlineResponse2006:
"""
Returns \"true\" if the given collection name exists, and \"false\" otherwise
"""
return self._build_for_collection_exists(
collection_name=collection_name,
)
def create_collection(
self,
collection_name: str,
timeout: int = None,
create_collection: m.CreateCollection = None,
) -> m.InlineResponse200:
"""
Create new collection with given parameters
"""
return self._build_for_create_collection(
collection_name=collection_name,
timeout=timeout,
create_collection=create_collection,
)
def delete_collection(
self,
collection_name: str,
timeout: int = None,
) -> m.InlineResponse200:
"""
Drop collection and all associated data
"""
return self._build_for_delete_collection(
collection_name=collection_name,
timeout=timeout,
)
def get_collection(
self,
collection_name: str,
) -> m.InlineResponse2004:
"""
Get detailed information about specified existing collection
"""
return self._build_for_get_collection(
collection_name=collection_name,
)
def get_collections(
self,
) -> m.InlineResponse2003:
"""
Get list name of all existing collections
"""
return self._build_for_get_collections()
def update_collection(
self,
collection_name: str,
timeout: int = None,
update_collection: m.UpdateCollection = None,
) -> m.InlineResponse200:
"""
Update parameters of the existing collection
"""
return self._build_for_update_collection(
collection_name=collection_name,
timeout=timeout,
update_collection=update_collection,
)
| SyncCollectionsApi |
python | networkx__networkx | networkx/generators/tests/test_geometric.py | {
"start": 11030,
"end": 18087
} | class ____:
"""Unit tests for :func:`~networkx.thresholded_random_geometric_graph`"""
def test_number_of_nodes(self):
G = nx.thresholded_random_geometric_graph(50, 0.2, 0.1, seed=42)
assert len(G) == 50
G = nx.thresholded_random_geometric_graph(range(50), 0.2, 0.1, seed=42)
assert len(G) == 50
def test_distances(self):
"""Tests that pairs of vertices adjacent if and only if they are
within the prescribed radius.
"""
# Use the Euclidean metric, the default according to the
# documentation.
G = nx.thresholded_random_geometric_graph(50, 0.25, 0.1, seed=42)
for u, v in combinations(G, 2):
# Adjacent vertices must be within the given distance.
if v in G[u]:
assert math.dist(G.nodes[u]["pos"], G.nodes[v]["pos"]) <= 0.25
def test_p(self):
"""Tests for providing an alternate distance metric to the generator."""
# Use the L1 metric.
def dist(x, y):
return sum(abs(a - b) for a, b in zip(x, y))
G = nx.thresholded_random_geometric_graph(50, 0.25, 0.1, p=1, seed=42)
for u, v in combinations(G, 2):
# Adjacent vertices must be within the given distance.
if v in G[u]:
assert dist(G.nodes[u]["pos"], G.nodes[v]["pos"]) <= 0.25
def test_node_names(self):
"""Tests using values other than sequential numbers as node IDs."""
import string
nodes = list(string.ascii_lowercase)
G = nx.thresholded_random_geometric_graph(nodes, 0.25, 0.1, seed=42)
assert len(G) == len(nodes)
for u, v in combinations(G, 2):
# Adjacent vertices must be within the given distance.
if v in G[u]:
assert math.dist(G.nodes[u]["pos"], G.nodes[v]["pos"]) <= 0.25
def test_theta(self):
"""Tests that pairs of vertices adjacent if and only if their sum
weights exceeds the threshold parameter theta.
"""
G = nx.thresholded_random_geometric_graph(50, 0.25, 0.1, seed=42)
for u, v in combinations(G, 2):
# Adjacent vertices must be within the given distance.
if v in G[u]:
assert (G.nodes[u]["weight"] + G.nodes[v]["weight"]) >= 0.1
def test_pos_name(self):
trgg = nx.thresholded_random_geometric_graph
G = trgg(50, 0.25, 0.1, seed=42, pos_name="p", weight_name="wt")
assert all(len(d["p"]) == 2 for n, d in G.nodes.items())
assert all(d["wt"] > 0 for n, d in G.nodes.items())
def test_geometric_edges_pos_attribute():
G = nx.Graph()
G.add_nodes_from(
[
(0, {"position": (0, 0)}),
(1, {"position": (0, 1)}),
(2, {"position": (1, 0)}),
]
)
expected_edges = [(0, 1), (0, 2)]
assert expected_edges == nx.geometric_edges(G, radius=1, pos_name="position")
def test_geometric_edges_raises_no_pos():
G = nx.path_graph(3)
msg = "all nodes. must have a '"
with pytest.raises(nx.NetworkXError, match=msg):
nx.geometric_edges(G, radius=1)
def test_number_of_nodes_S1():
G = nx.geometric_soft_configuration_graph(
beta=1.5, n=100, gamma=2.7, mean_degree=10, seed=42
)
assert len(G) == 100
def test_set_attributes_S1():
G = nx.geometric_soft_configuration_graph(
beta=1.5, n=100, gamma=2.7, mean_degree=10, seed=42
)
kappas = nx.get_node_attributes(G, "kappa")
assert len(kappas) == 100
thetas = nx.get_node_attributes(G, "theta")
assert len(thetas) == 100
radii = nx.get_node_attributes(G, "radius")
assert len(radii) == 100
def test_mean_kappas_mean_degree_S1():
G = nx.geometric_soft_configuration_graph(
beta=2.5, n=50, gamma=2.7, mean_degree=10, seed=8023
)
kappas = nx.get_node_attributes(G, "kappa")
mean_kappas = sum(kappas.values()) / len(kappas)
assert math.fabs(mean_kappas - 10) < 0.5
degrees = dict(G.degree())
mean_degree = sum(degrees.values()) / len(degrees)
assert math.fabs(mean_degree - 10) < 1
def test_dict_kappas_S1():
kappas = {i: 10 for i in range(1000)}
G = nx.geometric_soft_configuration_graph(beta=1, kappas=kappas)
assert len(G) == 1000
kappas = nx.get_node_attributes(G, "kappa")
assert all(kappa == 10 for kappa in kappas.values())
def test_beta_clustering_S1():
G1 = nx.geometric_soft_configuration_graph(
beta=1.5, n=100, gamma=3.5, mean_degree=10, seed=42
)
G2 = nx.geometric_soft_configuration_graph(
beta=3.0, n=100, gamma=3.5, mean_degree=10, seed=42
)
assert nx.average_clustering(G1) < nx.average_clustering(G2)
def test_wrong_parameters_S1():
with pytest.raises(
nx.NetworkXError,
match="Please provide either kappas, or all 3 of: n, gamma and mean_degree.",
):
G = nx.geometric_soft_configuration_graph(
beta=1.5, gamma=3.5, mean_degree=10, seed=42
)
with pytest.raises(
nx.NetworkXError,
match="When kappas is input, n, gamma and mean_degree must not be.",
):
kappas = {i: 10 for i in range(1000)}
G = nx.geometric_soft_configuration_graph(
beta=1.5, kappas=kappas, gamma=2.3, seed=42
)
with pytest.raises(
nx.NetworkXError,
match="Please provide either kappas, or all 3 of: n, gamma and mean_degree.",
):
G = nx.geometric_soft_configuration_graph(beta=1.5, seed=42)
def test_negative_beta_S1():
with pytest.raises(
nx.NetworkXError, match="The parameter beta cannot be smaller or equal to 0."
):
G = nx.geometric_soft_configuration_graph(
beta=-1, n=100, gamma=2.3, mean_degree=10, seed=42
)
def test_non_zero_clustering_beta_lower_one_S1():
G = nx.geometric_soft_configuration_graph(
beta=0.5, n=100, gamma=3.5, mean_degree=10, seed=42
)
assert nx.average_clustering(G) > 0
def test_mean_degree_influence_on_connectivity_S1():
low_mean_degree = 2
high_mean_degree = 20
G_low = nx.geometric_soft_configuration_graph(
beta=1.2, n=100, gamma=2.7, mean_degree=low_mean_degree, seed=42
)
G_high = nx.geometric_soft_configuration_graph(
beta=1.2, n=100, gamma=2.7, mean_degree=high_mean_degree, seed=42
)
assert nx.number_connected_components(G_low) > nx.number_connected_components(
G_high
)
def test_compare_mean_kappas_different_gammas_S1():
G1 = nx.geometric_soft_configuration_graph(
beta=1.5, n=20, gamma=2.7, mean_degree=5, seed=42
)
G2 = nx.geometric_soft_configuration_graph(
beta=1.5, n=20, gamma=3.5, mean_degree=5, seed=42
)
kappas1 = nx.get_node_attributes(G1, "kappa")
mean_kappas1 = sum(kappas1.values()) / len(kappas1)
kappas2 = nx.get_node_attributes(G2, "kappa")
mean_kappas2 = sum(kappas2.values()) / len(kappas2)
assert math.fabs(mean_kappas1 - mean_kappas2) < 1
| TestThresholdedRandomGeometricGraph |
python | huggingface__transformers | src/transformers/models/mgp_str/processing_mgp_str.py | {
"start": 940,
"end": 1167
} | class ____(ExplicitEnum):
CHARACTER = "char"
BPE = "bpe"
WORDPIECE = "wp"
SUPPORTED_ANNOTATION_FORMATS = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
@requires(backends=("sentencepiece",))
| DecodeType |
python | joke2k__faker | faker/providers/person/tr_TR/__init__.py | {
"start": 44,
"end": 30455
} | class ____(PersonProvider):
formats_female = (
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{first_name_female}} {{last_name}} {{last_name}}",
"{{first_name_female}} {{last_name}} {{last_name}}",
"{{prefix_female}} {{first_name_female}} {{first_name_female}} {{last_name}}",
"{{prefix_female}} {{first_name_female}} {{first_name_female}} {{last_name}} {{last_name}}",
"{{prefix_female}} {{first_name_female}} {{last_name}} {{last_name}}",
)
formats_male = (
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{first_name_male}} {{last_name}} {{last_name}}",
"{{first_name_male}} {{last_name}}",
"{{prefix_male}} {{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}",
"{{prefix_male}} {{first_name_male}} {{first_name_male}} {{last_name}}",
)
formats = formats_male + formats_female
first_names_female = (
"Abiye",
"Acarkan",
"Adal",
"Adila",
"Adviye",
"Afife",
"Ahter",
"Akay",
"Akgüneş",
"Akise",
"Akmaral",
"Aksoy",
"Akyıldız",
"Alabezek",
"Alaz",
"Algış",
"Alize",
"Almast",
"Alsoy",
"Altınbike",
"Altınçiçek",
"Alışık",
"Amre",
"Anargül",
"Anka",
"Aral",
"Armahan",
"Arziye",
"Arıpınar",
"Asiman",
"Asliye",
"Asu",
"Atanur",
"Atiyye",
"Avunç",
"Ayasun",
"Aybet",
"Aycagül",
"Aydar",
"Ayduru",
"Aygönenç",
"Ayhan",
"Aykut",
"Aylil",
"Aynilhayat",
"Aynımah",
"Aysema",
"Aysevim",
"Aysuna",
"Ayten",
"Aytöz",
"Ayyaruk",
"Ayçan",
"Ayülker",
"Ayşeana",
"Ayşenur",
"Azade",
"Azize",
"Açılay",
"Ağbegim",
"Aşhan",
"Badegül",
"Bahtinur",
"Balca",
"Ballı",
"Banü",
"Basriye",
"Bağdat",
"Bediriye",
"Begim",
"Behiza",
"Belgizar",
"Belkize",
"Benek",
"Benice",
"Beray",
"Bergen",
"Beriye",
"Berrin",
"Besey",
"Beste",
"Beyhatun",
"Bezek",
"Bidayet",
"Bilay",
"Bilginur",
"Bilkay",
"Binay",
"Birben",
"Birgül",
"Birsan",
"Bitül",
"Burcuhan",
"Buşra",
"Büreyre",
"Büşranur",
"Canan",
"Canfeza",
"Cannur",
"Canseven",
"Canur",
"Cedide",
"Cemiyle",
"Cevale",
"Ceyhun",
"Cihan",
"Cuheyna",
"Damlanur",
"Deha",
"Deniz",
"Deryanur",
"Değer",
"Dilara",
"Dilcan",
"Dilfeza",
"Dilhuş",
"Dilsitan",
"Dilşat",
"Divan",
"Doğannur",
"Duha",
"Durgadin",
"Dursadiye",
"Duyguhan",
"Döner",
"Dürrüşehvar",
"Ecegül",
"Edaviye",
"Efil",
"Egenur",
"Elamiye",
"Elgin",
"Elifnur",
"Elvan",
"Emal",
"Emine.",
"Emiş",
"Enfes",
"Erbay",
"Erem",
"Ergül",
"Eriş",
"Ervaniye",
"Esengün",
"Esmanperi",
"Esna",
"Evde",
"Evrim",
"Ezgin",
"Eşim",
"Fadile",
"Fadıla",
"Faize",
"Fatigül",
"Fatinur",
"Fatmanur",
"Fayize",
"Feden",
"Fehmiye",
"Ferahdiba",
"Feraye",
"Ferhan",
"Ferinaz",
"Fermuta",
"Feryas",
"Feyha",
"Feyzin",
"Fidaye",
"Fildan",
"Firdevis",
"Fitnat",
"Fügen",
"Gabel",
"Ganiye",
"Gelengül",
"Gilman",
"Goncafer",
"Gök",
"Gökperi",
"Gökçe",
"Göli",
"Görsev",
"Gözem",
"Gül",
"Gülay",
"Gülbani",
"Gülbeyan",
"Gülbiye",
"Gülcegün",
"Güldam",
"Gülder",
"Güldünya",
"Gülenay",
"Güler",
"Gülev",
"Gülfari",
"Gülfeza",
"Gülgen",
"Gülgüzel",
"Gülhisar",
"Gülinaz",
"Gülkadın",
"Güllühan",
"Gülmisal",
"Gülnaziye",
"Gülper",
"Gülsalın",
"Gülselin",
"Gülseren",
"Gülsevil",
"Gülsiye",
"Gülsü",
"Gülter",
"Gülzadiye",
"Gülçe",
"Gülözge",
"Gülüs",
"Gülşa",
"Gülşeref",
"Günar",
"Günebakan",
"Güngören",
"Günsel",
"Günver",
"Gürcüye",
"Gürten",
"Güverçin",
"Güzey",
"Habibe",
"Hacile",
"Hadrey",
"Hafıza",
"Halenur",
"Haliye",
"Hamiyet",
"Hanbiken",
"Hanim",
"Hansultan",
"Harbinaz",
"Hasgül",
"Hasret",
"Hatin",
"Havali",
"Havse",
"Hayel",
"Hayrünnisa",
"Hazine",
"Hekime",
"Henife",
"Heva",
"Hezniye",
"Hilayda",
"Hinet",
"Hoşkadem",
"Huban",
"Hurican",
"Hurşide",
"Hüda",
"Hümeyra",
"Hürmet",
"Hürüyet",
"Hüsnühâl",
"Ildız",
"Irıs",
"Işin",
"Işın",
"Jaruthip",
"Kader",
"Kadınana",
"Kandef",
"Kardelen",
"Kaver",
"Kefser",
"Kerime",
"Kezban",
"Kifaye",
"Kitan",
"Koncagül",
"Kumral",
"Kutgün",
"Kutun",
"Kâzime",
"Kübran",
"Kısmet",
"Laika",
"Laze",
"Lerze",
"Leyli",
"Lezize",
"Limon",
"Lâle",
"Lüfen",
"Macide",
"Mahigül",
"Mahnaz",
"Mahter",
"Maksüde",
"Masume",
"Maynur",
"Maşide",
"Mecide",
"Mefharet",
"Mehdiye",
"Mehrigül",
"Melaha",
"Meleknur",
"Melikkan",
"Melûl",
"Menfeat",
"Menişan",
"Merba",
"Merim",
"Merva",
"Meryeme",
"Mesude",
"Meveddet",
"Mevlüdiye",
"Meyhanim",
"Mezide",
"Mihrab",
"Mihriye",
"Minibe",
"Miray",
"Misra",
"Miyesser",
"Muarra",
"Mufide",
"Muhiye",
"Mujde",
"Mukbile",
"Musaffa",
"Muvahhide",
"Mübetcel",
"Mücevher",
"Müferrih",
"Müjde",
"Mükrüme",
"Mümtaze",
"Münezzer",
"Müret",
"Müsemma",
"Müveddet",
"Müğber",
"Müşüre",
"Nades",
"Nafile",
"Naide",
"Nalân",
"Narhanim",
"Nasiba",
"Natalia",
"Naz",
"Nazende",
"Nazi",
"Nazimet",
"Nazlihan",
"Nazıdil",
"Nebiha",
"Necilal",
"Necva",
"Nefaret",
"Nefiye",
"Nejdet",
"Neptün",
"Neriban",
"Nesfe",
"Neslinur",
"Neval",
"Nevgin",
"Nevise",
"Nevsale",
"Neyran",
"Nezengül",
"Nezize",
"Neşrin",
"Nihan",
"Nilcan",
"Nili",
"Nirgül",
"Niğmet",
"Nura",
"Nurbanu",
"Nurda",
"Nurdeniz",
"Nurey",
"Nurgil",
"Nurhayet",
"Nuriyet",
"Nurmelek",
"Nurseda",
"Nurser",
"Nursim",
"Nurtaç",
"Nurveren",
"Nurşan",
"Nüdret",
"Nürice",
"Oguş",
"Oluş",
"Orçin",
"Paksu",
"Paye",
"Pekkan",
"Pembesin",
"Peren",
"Perinur",
"Permun",
"Pesent",
"Piran",
"Pürçek",
"Rabbiye",
"Rafia",
"Rahiye",
"Rakide",
"Rana",
"Rayla",
"Rebihat",
"Refet",
"Rehime",
"Rengül",
"Revza",
"Rezin",
"Risalet",
"Rojnu",
"Ruhide",
"Ruhugül",
"Rumeysa",
"Rümeysa",
"Rıfkıye",
"Sabihe",
"Sabır",
"Sadeti",
"Sadiye",
"Safinaz",
"Safura",
"Sahil",
"Saire",
"Salimet",
"Samahat",
"Sanavber",
"Sanur",
"Sarya",
"Satıa",
"Saygın",
"Saçı",
"Sebigül",
"Seblâ",
"Sedife",
"Sefer",
"Sehel",
"Sejda",
"Selcen",
"Selime",
"Selmin",
"Selvi",
"Selçuk",
"Semat",
"Semine",
"Semrin",
"Seniha",
"Serda",
"Serfinaz",
"Serma",
"Sernur",
"Servinaz",
"Sevcan",
"Sevdinar",
"Sevgen",
"Sevginur",
"Sevican",
"Sevim",
"Sevla",
"Sevsevil",
"Seyhan",
"Seyyide",
"Sezen",
"Seçgül",
"Sidar",
"Silanur",
"Simber",
"Simten",
"Sirap",
"Siti",
"Solma",
"Sonnur",
"Soykan",
"Subutiye",
"Sultane",
"Sunay",
"Susam",
"Söyler",
"Süheyda",
"Süleyla",
"Sümerya",
"Süner",
"Süsen",
"Süzer",
"Sırriye",
"Tagangül",
"Talibe",
"Tan",
"Tangül",
"Tanses",
"Tanyu",
"Tasvir",
"Tayyibe",
"Taçnur",
"Teknaz",
"Temime",
"Tercan",
"Teybet",
"Ticen",
"Tomurcuk",
"Tule",
"Turcein",
"Tutkucan",
"Tuğçe",
"Tülin",
"Türcan",
"Türknur",
"Tüzenur",
"Ufukay",
"Ummahani",
"Umuşan",
"Uyanser",
"Uğur",
"Vacibe",
"Varlık",
"Vecide",
"Vefia",
"Verde",
"Vezrife",
"Vildane",
"Yahşi",
"Yalın",
"Yasemen",
"Yazgül",
"Yaşar",
"Yekbun",
"Yepelek",
"Yeşil",
"Yosma",
"Yurdaser",
"Yurtseven",
"Yücel",
"Yıldız",
"Zahfer",
"Zaliha",
"Zebirce",
"Zehranur",
"Zelha",
"Zemzem",
"Zerafet",
"Zeride",
"Zevlüde",
"Zeyno",
"Zilfa",
"Zinnure",
"Zubeyde",
"Zöhrehan",
"Züheyla",
"Zülbiye",
"Zülfüye",
"Zümre",
"Âlemşah",
"Çalım",
"Çağlar",
"Çevregül",
"Çilga",
"Çisem",
"Çolpan",
"Ömriye",
"Öncel",
"Örfiye",
"Övün",
"Özay",
"Özbilge",
"Özdeş",
"Özge",
"Özgün",
"Özlem",
"Özpetek",
"Özyurt",
"Üge",
"Ülke",
"Ülküm",
"Ümmahan",
"Ümmühan",
"Ümray",
"Ünal",
"Ünsever",
"Ürper",
"Üçgül",
"İde",
"İhsan",
"İklim",
"İldeş",
"İlkbahar",
"İlklima",
"İlper",
"İmge",
"İmrihan",
"İncifir",
"İnsaf",
"İrfan",
"İslime",
"İsra",
"İzel",
"İçimbike",
"Şadıman",
"Şahdiye",
"Şahinder",
"Şahnuray",
"Şahıgül",
"Şamiha",
"Şayan",
"Şazime",
"Şefiye",
"Şehreban",
"Şehza",
"Şelâle",
"Şemsinisa",
"Şendoğan",
"Şennur",
"Şeref",
"Şerman",
"Şevketfeza",
"Şeyda",
"Şilan",
"Şirivan",
"Şöhret",
"Şüküfe",
)
first_names_male = (
"Abdiş",
"Abdulbekir",
"Abdulgazi",
"Abdulkadir",
"Abdulmenaf",
"Abdulsemet",
"Abdurrahman",
"Abdülahat",
"Abdülcemal",
"Abdülhadi",
"Abdülkerim",
"Abdülsamed",
"Abdürreşit",
"Abid",
"Abuzar",
"Acar",
"Aclan",
"Adak",
"Adasal",
"Adlan",
"Adıgün",
"Afer",
"Ahat",
"Ahsen",
"Akalan",
"Akar",
"Akatay",
"Akbaş",
"Akboğa",
"Akcivan",
"Akdora",
"Akdurmuş",
"Akgöl",
"Akif",
"Akkerman",
"Akmaner",
"Aksay",
"Aksöğüt",
"Aktemür",
"Akver",
"Akçabay",
"Akçasu",
"Aköz",
"Akınal",
"Alaaddin",
"Alaeddin",
"Alanalp",
"Alattin",
"Alcan",
"Alexandru",
"Aliabbas",
"Aliihsan",
"Aliseydi",
"Alkor",
"Almus",
"Alparslan",
"Alpcan",
"Alpin",
"Alpsü",
"Alsoy",
"Altoğan",
"Altuğ",
"Altınkaya",
"Altınışın",
"Amaç",
"Andiç",
"Annak",
"Apaydın",
"Aran",
"Arcan",
"Argu",
"Arifcan",
"Arkut",
"Arpağ",
"Artan",
"Aru",
"Arıel",
"Arıkol",
"Arısoy",
"Asalet",
"Aslanhan",
"Asım",
"Atagün",
"Atalay",
"Atasagun",
"Atatöre",
"Atgun",
"Atilhan",
"Atnan",
"Atılgan",
"Avşin",
"Ayaydın",
"Aybora",
"Aydinç",
"Aydınbey",
"Aygutalp",
"Aykutalp",
"Aypar",
"Aysoy",
"Aytek",
"Aytuna",
"Ayvas",
"Ayşan",
"Azettin",
"Açıkel",
"Ağakişi",
"Ağmur",
"Aşir",
"Baba",
"Bahaddin",
"Bahittin",
"Baki",
"Balatekin",
"Bali",
"Baltaş",
"Barak",
"Bariş",
"Barsen",
"Barışcan",
"Basım",
"Baturay",
"Batırhan",
"Baydu",
"Baykan",
"Bayman",
"Bayruhan",
"Baytal",
"Bayzettin",
"Bağdaş",
"Başay",
"Başhan",
"Başok",
"Bedi",
"Bedri",
"Behrem",
"Bekbay",
"Bektaşi",
"Bellisan",
"Bengibay",
"Benol",
"Beren",
"Berkal",
"Berki",
"Berksay",
"Berran",
"Besin",
"Beyda",
"Beyler",
"Beyzade",
"Bican",
"Bilender",
"Bilgen",
"Bilgütay",
"Biltaş",
"Binbaşar",
"Binışık",
"Birgit",
"Birsen",
"Bişar",
"Borahan",
"Borataş",
"Boynak",
"Bozbağ",
"Bozerk",
"Boztaş",
"Boğatimur",
"Buhari",
"Bulunç",
"Burakhan",
"Burç",
"Buyrukhan",
"Börteçin",
"Büget",
"Bünyamün",
"Cabir",
"Canal",
"Canberk",
"Candeniz",
"Cangür",
"Cannur",
"Cansin",
"Cantez",
"Cavit",
"Cebesoy",
"Celilay",
"Cemalettin",
"Cenan",
"Cercis",
"Cevheri",
"Cezayir",
"Cihandide",
"Cindoruk",
"Coşkun",
"Cuman",
"Cüneyit",
"Dalan",
"Dalkılıç",
"Danış",
"Dayar",
"Dağistan",
"Delil",
"Demirbüken",
"Demiriz",
"Demirok",
"Demiryürek",
"Denizalp",
"Denkel",
"Derkay",
"Deviner",
"Değmeer",
"Diken",
"Dilder",
"Dincer",
"Dinçkol",
"Dinçsü",
"Dirican",
"Dirlik",
"Dolun",
"Dorukhan",
"Doğanalp",
"Doğanşah",
"Doğuhan",
"Duracan",
"Durdali",
"Durmuşali",
"Duruk",
"Duruöz",
"Dölensoy",
"Dündaralp",
"Eba",
"Ebuakil",
"Ecemiş",
"Edgübay",
"Efe",
"Eflâtun",
"Efser",
"Ekber",
"Ekmel",
"Elhan",
"Elnur",
"Elöve",
"Emin",
"Emirşan",
"Emrullah",
"Enes",
"Enginiz",
"Ensari",
"Eral",
"Eraycan",
"Erbil",
"Ercihan",
"Erdemer",
"Erdibay",
"Erdogan",
"Erdursun",
"Erenalp",
"Erensoy",
"Ergener",
"Erginel",
"Ergönül",
"Ergün",
"Erik",
"Erinçer",
"Erkan",
"Erkinel",
"Erksoy",
"Erkılıç",
"Ermutlu",
"Eroğul",
"Ersel",
"Erseç",
"Ertan",
"Erten",
"Ertuncay",
"Ertün",
"Eryıldız",
"Eröz",
"Erşat",
"Esenbay",
"Esentürk",
"Eskinalp",
"Evcimen",
"Evrimer",
"Eyyüp",
"Ezgütekin",
"Eşref",
"Fahrullah",
"Fami",
"Fatih",
"Fazul",
"Fehim",
"Fenni",
"Ferat",
"Feremez",
"Ferihan",
"Fersan",
"Ferzi",
"Fetullah",
"Feyruz",
"Feza",
"Filit",
"Fuzuli",
"Galip",
"Gazel",
"Gencaslan",
"Gençay",
"Geray",
"Ginyas",
"Giz",
"Gökay",
"Gökbudun",
"Göken",
"Göknur",
"Göksev",
"Gökten",
"Gökçebalan",
"Gökçil",
"Gönen",
"Görgünay",
"Görklü",
"Gözel",
"Gücal",
"Gülağa",
"Gülel",
"Güleğen",
"Gülşahin",
"Gümüştekin",
"Günaydin",
"Günden",
"Gündüzalp",
"Güneri",
"Güngördü",
"Günkurt",
"Günser",
"Günver",
"Günşen",
"Gürarda",
"Gürelcem",
"Gürsal",
"Güçal",
"Güçlüer",
"Güçyeter",
"Haciali",
"Hakikat",
"Halidun",
"Haluk",
"Hami",
"Hanedan",
"Hariz",
"Hasbek",
"Hatem",
"Hayali",
"Hayret",
"Hazrat",
"Hekmet",
"Heyvetullah",
"Hidir",
"Hindal",
"Hiçsönmez",
"Hudavent",
"Hunalp",
"Huzuri",
"Hükümdar",
"Hürdoğan",
"Hüryaşar",
"Hüsmen",
"Hıfzullah",
"Idık",
"Ilgı",
"Ismık",
"Işıkay",
"Işıman",
"Jankat",
"Kader",
"Kahir",
"Kalgay",
"Kamar",
"Kanak",
"Kanpulat",
"Kapagan",
"Karabaş",
"Karaca",
"Karaer",
"Karakucak",
"Karanbay",
"Karataş",
"Karcan",
"Karlukhan",
"Kasim",
"Kavurt",
"Kayagün",
"Kaygusuz",
"Kayrabay",
"Kayıt",
"Kaşif",
"Kelâmi",
"Kenter",
"Kerman",
"Kete",
"Kibar",
"Kiramettin",
"Kiyasi",
"Kocabaş",
"Koldan",
"Konguralp",
"Kopan",
"Koray",
"Korkmazalp",
"Korugan",
"Kotuz",
"Koçak",
"Koçkan",
"Koşukhan",
"Kuddusi",
"Kutay",
"Kâmil",
"Köker",
"Köktaş",
"Kösemen",
"Kürşad",
"Kılıçbay",
"Kınel",
"Kırat",
"Kırgız",
"Kıvılcım",
"Kızıl",
"Kızıltunç",
"Ledün",
"Lutfi",
"Lütfi",
"Mahir",
"Mahsun",
"Maksur",
"Mansurali",
"Masar",
"Mazlum",
"Mecit",
"Mefarettin",
"Mehmed",
"Mehmetzahir",
"Melihcan",
"Memili",
"Mengi",
"Mengüç",
"Merdi",
"Mertel",
"Merzuk",
"Mestur",
"Metinkaya",
"Mevlüt",
"Meşhur",
"Mihin",
"Milay",
"Mirbadin",
"Mishat",
"Monis",
"Mucahit",
"Muhammet",
"Muhip",
"Muhyettin",
"Muktedir",
"Muratcan",
"Musafet",
"Mutasım",
"Mutluhan",
"Muvaffak",
"Möhsim",
"Mücellib",
"Müfit",
"Mükramin",
"Mülâyim",
"Münif",
"Mürit",
"Müslum",
"Müzekker",
"Nabil",
"Nafii",
"Nakip",
"Nas",
"Nasuf",
"Nayil",
"Nazlim",
"Nebih",
"Necdat",
"Necmettin",
"Nehip",
"Nerim",
"Nesip",
"Nevsal",
"Nezihi",
"Nihai",
"Niyazi",
"Noman",
"Nural",
"Nurcan",
"Nuretdin",
"Nurkan",
"Nurullah",
"Nuyan",
"N˜zamett˜n",
"Odkanlı",
"Oganer",
"Okanay",
"Okbay",
"Okgüçlü",
"Okseven",
"Oktüremiş",
"Okyalaz",
"Olca",
"Oldağ",
"Oliver",
"Omaca",
"Onat",
"Ongay",
"Onuker",
"Onurcan",
"Onursu",
"Oranlı",
"Orgün",
"Ortak",
"Oruç",
"Otay",
"Oymak",
"Ozansü",
"Oğulbaş",
"Oğurata",
"Oğuzman",
"Paker",
"Pehlil",
"Pirahmet",
"Rabih",
"Rafih",
"Rahmet",
"Ramadan",
"Rasul",
"Razı",
"Recepali",
"Refik",
"Remazan",
"Resulcan",
"Rezzak",
"Risalet",
"Rohat",
"Ruhsat",
"Rüknettin",
"Rüşen",
"Saba",
"Sabih",
"Sadat",
"Sadittin",
"Safet",
"Sahir",
"Sakip",
"Salami",
"Salkın",
"Salurbay",
"Sami",
"Samurtay",
"Sancak",
"Sançar",
"Sargın",
"Sarpkın",
"Sarıcabay",
"Satrettin",
"Savak",
"Savni",
"Saydam",
"Sayin",
"Sayrak",
"Sayın",
"Sağcan",
"Sağıt",
"Sebattin",
"Seda",
"Seha",
"Selaheddin",
"Selatin",
"Seler",
"Selvi",
"Selâtin",
"Semender",
"Sencar",
"Seracettin",
"Serda",
"Serezli",
"Serhatmehmet",
"Serol",
"Server",
"Sevdi",
"Sevindik",
"Seydo",
"Seyfullah",
"Seyithan",
"Sezal",
"Sezginbaş",
"Seçme",
"Sidki",
"Siper",
"Sittik",
"Sonad",
"Songurkan",
"Soydaner",
"Soykut",
"Soyselçuk",
"Suat",
"Sudi",
"Sulhi",
"Sunel",
"Suphi",
"Sökmen",
"Sözer",
"Sücaettin",
"Süha",
"Sümeyye",
"Süvari",
"Sıla",
"Sıylıhan",
"Taciddin",
"Tahir",
"Talayer",
"Tali",
"Tamaydın",
"Tanak",
"Tanbay",
"Tandoğdu",
"Tanhan",
"Tanpınar",
"Tansev",
"Tansığ",
"Tanyolaç",
"Tanır",
"Tarancı",
"Tartış",
"Tatu",
"Tayaydın",
"Taygan",
"Taylak",
"Tayyip",
"Taşar",
"Taşkan",
"Teber",
"Tecimer",
"Tekbay",
"Tekecan",
"Tekiner",
"Teksoy",
"Telim",
"Temirhan",
"Temizkal",
"Temuçin",
"Tenvir",
"Terlan",
"Tevs",
"Tezcan",
"Tezol",
"Timurtaş",
"Tiğin",
"Toker",
"Toktuğ",
"Toköz",
"Tolonbay",
"Tonguç",
"Topuz",
"Torhan",
"Toy",
"Toğan",
"Tulun",
"Tunahan",
"Tunguç",
"Tunçboğa",
"Tunçkılıç",
"Turabi",
"Turgut",
"Tutkun",
"Tuyuğ",
"Tuğcan",
"Tuğrulhan",
"Tuğtaş",
"Törel",
"Tükelalp",
"Tümer",
"Tümkurt",
"Türabi",
"Türkalp",
"Türkmen",
"Tüzeer",
"Tınal",
"Ufukay",
"Ulakbey",
"Ulu",
"Uludağ",
"Uluman",
"Ulutay",
"Uluğbey",
"Umman",
"Umutcan",
"Uraltay",
"Urhan",
"Us",
"Ushan",
"Utkucan",
"Uygun",
"Uzbay",
"Uzsoy",
"Uçan",
"Uçbeyi",
"Uğan",
"Uğurkan",
"Uğurtan",
"Vafir",
"Vahittin",
"Vargın",
"Vaysal",
"Vedat",
"Veis",
"Velitdin",
"Verim",
"Vezat",
"Vâlâ",
"Yadigar",
"Yahşikan",
"Yalazabay",
"Yalgın",
"Yaltırak",
"Yalın",
"Yamin",
"Yankı",
"Yargı",
"Yasan",
"Yavuz",
"Yayak",
"Yazganalp",
"Yağın",
"Yağızkurt",
"Yaşattin",
"Yekda",
"Yelesen",
"Yeneral",
"Yertan",
"Yetişal",
"Yigit",
"Yilmaz",
"Yolal",
"Yoruç",
"Yunt",
"Yurdanur",
"Yurtgüven",
"Yurttaş",
"Yönetmen",
"Yücelen",
"Yümun",
"Yıldır",
"Yılma",
"Zahid",
"Zamir",
"Zekayi",
"Zennun",
"Zeynelabidin",
"Zihni",
"Ziyaettin",
"Zoral",
"Züfer",
"Zülgarni",
"Âdem",
"Çakar",
"Çakırca",
"Çaltı",
"Çamok",
"Çapkan",
"Çavuldur",
"Çağa",
"Çağdan",
"Çağlasın",
"Çağveren",
"Çelem",
"Çelikkan",
"Çelikyürek",
"Çerçi",
"Çetinsu",
"Çeviköz",
"Çinerk",
"Çokan",
"Çopur",
"Çoğay",
"Çıdal",
"Çıvgın",
"Öge",
"Ökkaş",
"Öktürk",
"Ömür",
"Öncel",
"Önel",
"Öngen",
"Önsal",
"Örik",
"Öryürek",
"Över",
"Özakan",
"Özalpsan",
"Özaslan",
"Özbay",
"Özbilek",
"Özdal",
"Özdil",
"Özdoğdu",
"Özel",
"Özerdinç",
"Özertem",
"Özger",
"Özgür",
"Özinal",
"Özkent",
"Özkutlu",
"Özlü",
"Özokçu",
"Özpınar",
"Özsözlü",
"Öztek",
"Öztürk",
"Özçam",
"Özüdoğru",
"Öğet",
"Übeydullah",
"Ülfet",
"Ülküdeş",
"Ümmet",
"Ünek",
"Ünlen",
"Ünsever",
"Ünübol",
"Ürfettin",
"Üsame",
"Üstün",
"Üzer",
"Ğanim",
"İbrahim",
"İdiris",
"İkiz",
"İlalmış",
"İlbek",
"İldem",
"İlgi",
"İlim",
"İlkim",
"İlmafer",
"İlsu",
"İlteriş",
"İmam",
"İmren",
"İnançlı",
"İntihap",
"İsak",
"İsmet",
"İvecen",
"İyiyürek",
"İzgü",
"İşcan",
"Şabettin",
"Şafii",
"Şahat",
"Şahinbey",
"Şahmettin",
"Şali",
"Şanlı",
"Şavki",
"Şefi",
"Şehamet",
"Şekim",
"Şemsettin",
"Şendoğan",
"Şenkal",
"Şerafeddin",
"Şevket",
"Şide",
"Şinasi",
"Şuayp",
"Şükri",
)
first_names = first_names_male + first_names_female
last_names = (
"Yılmaz",
"Yıldırım",
"Türk",
"Yorulmaz",
"Erdoğan",
"Çorlu",
"Sakarya",
"Demir",
"Yaman",
"Manço",
"Aksu",
"Akçay",
"Akar",
"Bilir",
"Bilgin",
"Yüksel",
"Eraslan",
"Aslan",
"Arslan",
"Hançer",
"Hayrioğlu",
"Şama",
"Ergül",
"Arsoy",
"Kısakürek",
"Gülen",
"Seven",
"Şafak",
"Dumanlı",
"Ertaş",
"Güçlü",
"Soylu",
"Zorlu",
"Fırat",
"Duran",
"Durmuş",
"Durdu",
"Zengin",
"Tevetoğlu",
"Mansız",
"Şener",
"Şensoy",
"Ülker",
"Tarhan",
"Sezer",
"Demirel",
"Gül",
"Korutürk",
"İnönü",
"Öcalan",
"Çetin",
"Sezgin",
"Alemdar",
"Karadeniz",
"Akdeniz",
"Bilge",
"Akgündüz",
"Akçay",
"Çamurcuoğlu",
"İhsanoğlu",
"Akça",
)
prefixes_female = (
"Bayan",
"Dr.",
"Uz.",
"Öğr.",
"Çev.",
"Okt.",
"Öğr.",
"Öğr.",
"Arş. Gör.",
"Yrd. Doç.",
"Dr.",
"Doç.",
"Prof.",
"Av.",
)
prefixes_male = (
"Bay",
"Dr.",
"Uz.",
"Öğr.",
"Çev.",
"Okt.",
"Öğr.",
"Öğr.",
"Arş. Gör.",
"Yrd. Doç.",
"Dr.",
"Doç.",
"Prof.",
"Av.",
)
prefixes = prefixes_female + prefixes_male
| Provider |
python | kamyu104__LeetCode-Solutions | Python/count-of-range-sum.py | {
"start": 1528,
"end": 3020
} | class ____(object):
def countRangeSum(self, nums, lower, upper):
"""
:type nums: List[int]
:type lower: int
:type upper: int
:rtype: int
"""
def countAndMergeSort(sums, start, end, lower, upper):
if end - start <= 0: # The size of range [start, end] less than 2 is always with count 0.
return 0
mid = start + (end - start) / 2
count = countAndMergeSort(sums, start, mid, lower, upper) + \
countAndMergeSort(sums, mid + 1, end, lower, upper)
j, k, r = mid + 1, mid + 1, mid + 1
tmp = []
for i in xrange(start, mid + 1):
# Count the number of range sums that lie in [lower, upper].
while k <= end and sums[k] - sums[i] < lower:
k += 1
while j <= end and sums[j] - sums[i] <= upper:
j += 1
count += j - k
# Merge the two sorted arrays into tmp.
while r <= end and sums[r] < sums[i]:
tmp.append(sums[r])
r += 1
tmp.append(sums[i])
# Copy tmp back to sums
sums[start:start+len(tmp)] = tmp
return count
sums = [0] * (len(nums) + 1)
for i in xrange(len(nums)):
sums[i + 1] = sums[i] + nums[i]
return countAndMergeSort(sums, 0, len(sums) - 1, lower, upper)
| Solution2 |
python | keras-team__keras | keras/src/trainers/trainer_test.py | {
"start": 99954,
"end": 101746
} | class ____(test_case.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
("single_device", False),
("distributed", True),
)
def test_jit_fit_with_out_shardings_logic(self, distributed):
if keras.backend.backend() != "jax":
self.skipTest("This test requires the JAX backend.")
x = np.random.rand(64, 8).astype("float32")
y = np.random.rand(64, 1).astype("float32")
distribution = None
if distributed:
if len(jax.local_devices()) < 2:
self.skipTest(
"Distributed test requires at least 2 JAX devices."
)
devices = jax.local_devices()
mesh = DeviceMesh(
shape=(len(devices),), axis_names=("batch",), devices=devices
)
distribution = DataParallel(mesh)
scope = distribution.scope() if distribution else mock.MagicMock()
with scope:
model = models.Sequential(
[
layers.Dense(4, activation="relu", input_shape=(8,)),
layers.Dense(1),
]
)
model.compile(optimizer="adam", loss="mse", jit_compile=True)
if distribution:
expected_shardings = [
v.value.sharding for v in model.trainable_variables
]
self.assertNotEqual(len(set(expected_shardings)), 1)
model.fit(x, y, epochs=2, batch_size=32, verbose=0)
if distribution:
actual_shardings = [
v.value.sharding for v in model.trainable_variables
]
self.assertListEqual(actual_shardings, expected_shardings)
| JAXTrainerCorrectnessTest |
python | getsentry__sentry | src/sentry/lang/native/applecrashreport.py | {
"start": 467,
"end": 11956
} | class ____:
def __init__(
self, threads=None, context=None, debug_images=None, symbolicated=False, exceptions=None
):
"""
Create an Apple crash report from the provided data.
This constructor can modify the passed structures in place.
"""
self.threads = threads if threads else []
self.context = context
self.symbolicated = symbolicated
self.exceptions = exceptions if exceptions else []
self.image_addrs_to_vmaddrs = {}
# Remove frames that don't have an `instruction_addr` and convert
# sundry addresses into numbers.
ts = self.exceptions + self.threads
frame_keys = ["instruction_addr", "image_addr", "symbol_addr"]
for t in ts:
if stacktrace := t.get("stacktrace"):
if frames := stacktrace.pop("frames", []):
new_frames = []
for frame in frames:
if frame.get("instruction_addr", None) is None:
continue
new_frame = {
key: parse_addr(frame[key]) if key in frame_keys else value
for key, value in frame.items()
}
new_frames.append(new_frame)
stacktrace["frames"] = new_frames
# Remove debug images that don't have an `image_addr` and convert
# `image_addr` and `image_vmaddr` to numbers.
self.debug_images = []
image_keys = ["image_addr", "image_vmaddr"]
for image in debug_images or []:
if image.get("image_addr", None) is None:
continue
new_image = {
key: parse_addr(image[key]) if key in image_keys else value
for key, value in image.items()
}
self.debug_images.append(new_image)
# If the image has an `image_vmaddr`, save the mapping from
# `image_addr` to `image_vmaddr`. This will be used in
# `_get_slide_value`.
if new_image.get("image_vmaddr") is not None:
self.image_addrs_to_vmaddrs[new_image["image_addr"]] = new_image["image_vmaddr"]
@sentry_sdk.trace
def __str__(self) -> str:
rv = []
rv.append(self._get_meta_header())
rv.append(self._get_exception_info())
rv.append(self.get_threads_apple_string())
rv.append(self._get_crashed_thread_registers())
rv.append(self.get_binary_images_apple_string())
return "\n\n".join(rv) + "\n\nEOF"
@sentry_sdk.trace
def _get_meta_header(self):
return "OS Version: {} {} ({})\nReport Version: {}".format(
get_path(self.context, "os", "name"),
get_path(self.context, "os", "version"),
get_path(self.context, "os", "build"),
REPORT_VERSION,
)
def _get_register_index(self, register: str, register_map: Mapping[str, int]) -> int:
return register_map.get(register[1:] if register.startswith("$") else register, -1)
def _get_sorted_registers(
self, registers: Mapping[str, str | None], register_map: Mapping[str, int]
) -> list[tuple[str, str | None]]:
return [
(register_name, registers.get(register_name))
for register_name in sorted(
registers.keys(), key=lambda name: self._get_register_index(name, register_map)
)
]
def _get_register_map_for_arch(self) -> tuple[str, bool, Mapping[str, int]]:
arch = get_path(self.context, "device", "arch")
if not isinstance(arch, str):
return (NATIVE_UNKNOWN_STRING, False, {})
if arch.startswith("x86_64"):
return ("x86", True, REGISTERS_X86_64)
if arch.startswith("x86"):
return ("x86", False, REGISTERS_X86)
if arch.startswith("arm64"):
return ("ARM", True, REGISTERS_ARM64)
if arch.startswith("arm"):
return ("ARM", False, REGISTERS_ARM)
return (arch, False, {})
def _get_padded_hex_value(self, value: str) -> str:
try:
num_value = int(value, 16)
padded_hex_value = f"{num_value:x}".rjust(16, "0")
return "0x" + padded_hex_value
except Exception:
return value
@sentry_sdk.trace
def _get_crashed_thread_registers(self):
rv = []
exception = get_path(self.exceptions, 0)
if not exception:
return ""
thread_id = exception.get("thread_id")
crashed_thread_info = next(
filter(lambda t: t.get("id") == thread_id, self.threads or []), None
)
crashed_thread_registers = get_path(crashed_thread_info, "stacktrace", "registers")
if not isinstance(crashed_thread_registers, Mapping):
return ""
arch_label, is_64_bit, register_map = self._get_register_map_for_arch()
rv.append(
"Thread {} crashed with {} Thread State ({}-bit):".format(
thread_id, arch_label, "64" if is_64_bit else "32"
)
)
line = " "
for i, register in enumerate(
self._get_sorted_registers(crashed_thread_registers, register_map)
):
if i != 0 and (i % 4 == 0):
rv.append(line)
line = " "
register_name, register_value = register
line += "{}: {}".format(
register_name.rjust(5), self._get_padded_hex_value(register_value or "0x0")
)
if line != " ":
rv.append(line)
return "\n".join(rv)
@sentry_sdk.trace
def _get_exception_info(self):
rv = []
# We only have one exception at a time
exception = get_path(self.exceptions, 0)
if not exception:
return ""
mechanism = upgrade_legacy_mechanism(exception.get("mechanism")) or {}
mechanism_meta = get_path(mechanism, "meta", default={})
signal = get_path(mechanism_meta, "signal", "name")
name = get_path(mechanism_meta, "mach_exception", "name")
if name or signal:
rv.append(
"Exception Type: {}{}".format(
name or "Unknown", signal and (" (%s)" % signal) or ""
)
)
exc_name = get_path(mechanism_meta, "signal", "code_name")
exc_addr = get_path(mechanism, "data", "relevant_address")
if exc_name:
rv.append(
"Exception Codes: %s%s"
% (exc_name, exc_addr is not None and (" at %s" % exc_addr) or "")
)
if exception.get("thread_id") is not None:
rv.append("Crashed Thread: %s" % exception["thread_id"])
if exception.get("value"):
rv.append("\nApplication Specific Information:\n%s" % exception["value"])
return "\n".join(rv)
@sentry_sdk.trace
def get_threads_apple_string(self):
rv = []
exception = self.exceptions or []
threads = self.threads or []
for thread_info in exception + threads:
thread_string = self.get_thread_apple_string(thread_info)
if thread_string is not None:
rv.append(thread_string)
return "\n\n".join(rv)
def get_thread_apple_string(self, thread_info):
rv = []
stacktrace = get_path(thread_info, "stacktrace")
if stacktrace is None:
return None
if stacktrace:
frames = get_path(stacktrace, "frames", filter=True)
if frames:
for i, frame in enumerate(reversed(frames)):
frame_string = self._convert_frame_to_apple_string(
frame=frame,
next=frames[len(frames) - i - 2] if i < len(frames) - 1 else None,
number=i,
)
if frame_string is not None:
rv.append(frame_string)
if len(rv) == 0:
return None # No frames in thread, so we remove thread
is_exception = bool(thread_info.get("mechanism"))
thread_id = thread_info.get("id") or thread_info.get("thread_id") or "0"
thread_name = thread_info.get("name")
thread_name_string = " name: %s" % (thread_name) if thread_name else ""
thread_crashed = thread_info.get("crashed") or is_exception
thread_crashed_thread = " Crashed:" if thread_crashed else ""
thread_string = f"Thread {thread_id}{thread_name_string}{thread_crashed_thread}\n"
return thread_string + "\n".join(rv)
def _convert_frame_to_apple_string(self, frame, next=None, number=0):
frame_instruction_addr = frame["instruction_addr"]
frame_image_addr = frame.get("image_addr", 0)
slide_value = self._get_slide_value(frame_image_addr)
instruction_addr = slide_value + frame_instruction_addr
image_addr = slide_value + frame_image_addr
offset = ""
if frame.get("image_addr") is not None and (
not self.symbolicated
or (frame.get("function") or NATIVE_UNKNOWN_STRING) == NATIVE_UNKNOWN_STRING
):
offset_value = instruction_addr - slide_value - frame.get("symbol_addr", 0)
offset = f" + {offset_value}"
symbol = hex(image_addr)
if self.symbolicated:
file = ""
if frame.get("filename") and frame.get("lineno"):
file = " ({}:{})".format(
posixpath.basename(frame.get("filename") or NATIVE_UNKNOWN_STRING),
frame["lineno"],
)
symbol = "{}{}".format(frame.get("function") or NATIVE_UNKNOWN_STRING, file)
if next and frame_instruction_addr == next.get("instruction_addr", 0):
symbol = "[inlined] " + symbol
return "{}{}{}{}{}".format(
str(number).ljust(4, " "),
image_name(frame.get("package") or NATIVE_UNKNOWN_STRING).ljust(32, " "),
hex(instruction_addr).ljust(20, " "),
symbol,
offset,
)
def _get_slide_value(self, image_addr):
return self.image_addrs_to_vmaddrs.get(image_addr, 0)
@sentry_sdk.trace
def get_binary_images_apple_string(self):
# We don't need binary images on symbolicated crashreport
if self.symbolicated or not self.debug_images:
return ""
binary_images = map(
lambda i: self._convert_debug_meta_to_binary_image_row(debug_image=i),
sorted(
self.debug_images,
key=lambda i: i["image_addr"],
),
)
return "Binary Images:\n" + "\n".join(binary_images)
def _convert_debug_meta_to_binary_image_row(self, debug_image):
slide_value = debug_image.get("image_vmaddr", 0)
image_addr = debug_image["image_addr"] + slide_value
return "{} - {} {} {} <{}> {}".format(
hex(image_addr),
hex(image_addr + debug_image["image_size"] - 1),
image_name(debug_image.get("code_file") or NATIVE_UNKNOWN_STRING),
get_path(self.context, "device", "arch") or NATIVE_UNKNOWN_STRING,
debug_image.get("debug_id").replace("-", "").lower(),
debug_image.get("code_file") or NATIVE_UNKNOWN_STRING,
)
| AppleCrashReport |
python | pytorch__pytorch | torch/fx/experimental/accelerator_partitioner.py | {
"start": 569,
"end": 1261
} | class ____:
"""DAGNode class maintains useful information for a partition (submodule),
and its input submodules and output submodules.
"""
def __init__(
self,
submodule_node: Node,
input_nodes: list[Node],
output_nodes: list[Node],
logical_device_ids: list[int],
size_bytes: int,
) -> None:
self.submodule_node: Node = submodule_node
self.input_nodes: list[Node] = input_nodes
self.output_nodes: list[Node] = output_nodes
self.logical_device_ids: list[int] = logical_device_ids
self.size_bytes = size_bytes
def __str__(self) -> str:
return str(self.submodule_node)
| DAGNode |
python | django__django | tests/admin_default_site/sites.py | {
"start": 35,
"end": 84
} | class ____(admin.AdminSite):
pass
| CustomAdminSite |
python | ray-project__ray | python/ray/_private/prometheus_exporter.py | {
"start": 633,
"end": 2001
} | class ____(object):
"""Options contains options for configuring the exporter.
The address can be empty as the prometheus client will
assume it's localhost
:type namespace: str
:param namespace: The prometheus namespace to be used. Defaults to ''.
:type port: int
:param port: The Prometheus port to be used. Defaults to 8000.
:type address: str
:param address: The Prometheus address to be used. Defaults to ''.
:type registry: registry
:param registry: The Prometheus address to be used. Defaults to ''.
:type registry: :class:`~prometheus_client.core.CollectorRegistry`
:param registry: A Prometheus collector registry instance.
"""
def __init__(self, namespace="", port=8000, address="", registry=REGISTRY):
self._namespace = namespace
self._registry = registry
self._port = int(port)
self._address = address
@property
def registry(self):
"""Prometheus Collector Registry instance"""
return self._registry
@property
def namespace(self):
"""Prefix to be used with view name"""
return self._namespace
@property
def port(self):
"""Port number to listen"""
return self._port
@property
def address(self):
"""Endpoint address (default is localhost)"""
return self._address
| Options |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/memberAccess21.py | {
"start": 667,
"end": 923
} | class ____:
field1: ClassVar = Descriptor[str]()
field2: ClassVar = ""
def reset(self) -> None:
self.field1 = ""
# This should generate an error because field2 isn't
# a descriptor object.
self.field2 = ""
| Example |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/django/toystore/models.py | {
"start": 971,
"end": 1065
} | class ____(models.Field):
def db_type(self, connection):
return "char(1)"
| CharmField |
python | kamyu104__LeetCode-Solutions | Python/maximum-number-that-sum-of-the-prices-is-less-than-or-equal-to-k.py | {
"start": 3190,
"end": 4078
} | class ____(object):
def findMaximumNumber(self, k, x):
"""
:type k: int
:type x: int
:rtype: int
"""
def binary_search_right(left, right, check):
while left <= right:
mid = left+(right-left)//2
if not check(mid):
right = mid-1
else:
left = mid+1
return right
def count(v):
cnt = i = 0
while 1<<(i+x-1) <= v:
q, r = divmod(v+1, 1<<((i+x-1)+1))
cnt += q*1*(1<<(i+x-1))+max(r-1*(1<<(i+x-1)), 0)
i += x
return cnt
return binary_search_right(1, max(k<<2, 1<<x), lambda v: count(v) <= k) # right bound is verified by checking all possible (k, v) values, or just set right = solution.findMaximumNumber(10**15, 8) <= 10**15
| Solution4 |
python | run-llama__llama_index | llama-index-integrations/embeddings/llama-index-embeddings-nomic/llama_index/embeddings/nomic/base.py | {
"start": 5021,
"end": 8699
} | class ____(HuggingFaceEmbedding):
tokenizer_name: str = Field(description="Tokenizer name from HuggingFace.")
max_length: int = Field(
default=DEFAULT_HUGGINGFACE_LENGTH, description="Maximum length of input.", gt=0
)
pooling: Pooling = Field(default=Pooling.MEAN, description="Pooling strategy.")
normalize: bool = Field(default=True, description="Normalize embeddings or not.")
query_instruction: Optional[str] = Field(
description="Instruction to prepend to query text."
)
text_instruction: Optional[str] = Field(
description="Instruction to prepend to text."
)
cache_folder: Optional[str] = Field(
description="Cache folder for huggingface files."
)
dimensionality: Optional[int] = Field(description="Dimensionality of embedding")
_model: Any = PrivateAttr()
_tokenizer: Any = PrivateAttr()
_device: str = PrivateAttr()
def __init__(
self,
model_name: Optional[str] = None,
tokenizer_name: Optional[str] = None,
pooling: Union[str, Pooling] = "cls",
max_length: Optional[int] = None,
query_instruction: Optional[str] = None,
text_instruction: Optional[str] = None,
normalize: bool = True,
model: Optional[Any] = None,
tokenizer: Optional[Any] = None,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
cache_folder: Optional[str] = None,
trust_remote_code: bool = False,
device: Optional[str] = None,
callback_manager: Optional[CallbackManager] = None,
dimensionality: int = 768,
):
super().__init__(
model_name=model_name,
tokenizer_name=tokenizer_name,
pooling=pooling,
max_length=max_length,
query_instruction=query_instruction,
text_instruction=text_instruction,
normalize=normalize,
model=model,
tokenizer=tokenizer,
embed_batch_size=embed_batch_size,
cache_folder=cache_folder,
trust_remote_code=trust_remote_code,
device=device,
callback_manager=callback_manager,
)
self.dimensionality = dimensionality
self._model.eval()
def _embed(self, sentences: List[str]) -> List[List[float]]:
"""Embed sentences."""
encoded_input = self._tokenizer(
sentences,
padding=True,
max_length=self.max_length,
truncation=True,
return_tensors="pt",
)
# pop token_type_ids
encoded_input.pop("token_type_ids", None)
# move tokenizer inputs to device
encoded_input = {
key: val.to(self._device) for key, val in encoded_input.items()
}
with torch.no_grad():
model_output = self._model(**encoded_input)
if self.pooling == Pooling.CLS:
context_layer: "torch.Tensor" = model_output[0]
embeddings = self.pooling.cls_pooling(context_layer)
else:
embeddings = self._mean_pooling(
token_embeddings=model_output[0],
attention_mask=encoded_input["attention_mask"],
)
if self.normalize:
import torch.nn.functional as F
if self.model_name == "nomic-ai/nomic-embed-text-v1.5":
emb_ln = F.layer_norm(
embeddings, normalized_shape=(embeddings.shape[1],)
)
embeddings = emb_ln[:, : self.dimensionality]
embeddings = F.normalize(embeddings, p=2, dim=1)
return embeddings.tolist()
| NomicHFEmbedding |
python | ipython__ipython | IPython/core/interactiveshell.py | {
"start": 10612,
"end": 159736
} | class ____(SingletonConfigurable):
"""An enhanced, interactive shell for Python."""
_instance = None
_user_ns: dict
_sys_modules_keys: set[str]
inspector: oinspect.Inspector
ast_transformers: List[ast.NodeTransformer] = List(
[],
help="""
A list of ast.NodeTransformer subclass instances, which will be applied
to user input before code is run.
""",
).tag(config=True)
autocall = Enum((0,1,2), default_value=0, help=
"""
Make IPython automatically call any callable object even if you didn't
type explicit parentheses. For example, 'str 43' becomes 'str(43)'
automatically. The value can be '0' to disable the feature, '1' for
'smart' autocall, where it is not applied if there are no more
arguments on the line, and '2' for 'full' autocall, where all callable
objects are automatically called (even if no arguments are present).
"""
).tag(config=True)
autoindent = Bool(True, help=
"""
Autoindent IPython code entered interactively.
"""
).tag(config=True)
autoawait = Bool(True, help=
"""
Automatically run await statement in the top level repl.
"""
).tag(config=True)
loop_runner_map ={
'asyncio':(_asyncio_runner, True),
'curio':(_curio_runner, True),
'trio':(_trio_runner, True),
'sync': (_pseudo_sync_runner, False)
}
loop_runner = Any(default_value="IPython.core.interactiveshell._asyncio_runner",
allow_none=True,
help="""Select the loop runner that will be used to execute top-level asynchronous code"""
).tag(config=True)
@default('loop_runner')
def _default_loop_runner(self):
return import_item("IPython.core.interactiveshell._asyncio_runner")
@validate('loop_runner')
def _import_runner(self, proposal):
if isinstance(proposal.value, str):
if proposal.value in self.loop_runner_map:
runner, autoawait = self.loop_runner_map[proposal.value]
self.autoawait = autoawait
return runner
runner = import_item(proposal.value)
if not callable(runner):
raise ValueError('loop_runner must be callable')
return runner
if not callable(proposal.value):
raise ValueError('loop_runner must be callable')
return proposal.value
automagic = Bool(True, help=
"""
Enable magic commands to be called without the leading %.
"""
).tag(config=True)
enable_tip = Bool(
True,
help="""
Set to show a tip when IPython starts.""",
).tag(config=True)
banner1 = Unicode(default_banner,
help="""The part of the banner to be printed before the profile"""
).tag(config=True)
banner2 = Unicode('',
help="""The part of the banner to be printed after the profile"""
).tag(config=True)
cache_size = Integer(
1000,
help="""
Set the size of the output cache. The default is 1000, you can
change it permanently in your config file. Setting it to 0 completely
disables the caching system, and the minimum value accepted is 3 (if
you provide a value less than 3, it is reset to 0 and a warning is
issued). This limit is defined because otherwise you'll spend more
time re-flushing a too small cache than working
""",
).tag(config=True)
debug = Bool(False).tag(config=True)
display_formatter = Instance(DisplayFormatter, allow_none=True)
displayhook_class = Type(DisplayHook)
display_pub_class = Type(DisplayPublisher)
compiler_class = Type(CachingCompiler)
inspector_class = Type(
oinspect.Inspector, help="Class to use to instantiate the shell inspector"
).tag(config=True)
sphinxify_docstring = Bool(False, help=
"""
Enables rich html representation of docstrings. (This requires the
docrepr module).
""").tag(config=True)
@observe("sphinxify_docstring")
def _sphinxify_docstring_changed(self, change):
if change['new']:
warn("`sphinxify_docstring` is provisional since IPython 5.0 and might change in future versions." , ProvisionalWarning)
enable_html_pager = Bool(False, help=
"""
(Provisional API) enables html representation in mime bundles sent
to pagers.
""").tag(config=True)
@observe("enable_html_pager")
def _enable_html_pager_changed(self, change):
if change['new']:
warn("`enable_html_pager` is provisional since IPython 5.0 and might change in future versions.", ProvisionalWarning)
data_pub_class = None
exit_now = Bool(False)
exiter = Instance(ExitAutocall)
@default('exiter')
def _exiter_default(self):
return ExitAutocall(self)
# Monotonically increasing execution counter
execution_count = Integer(1)
filename = Unicode("<ipython console>")
ipython_dir = Unicode("").tag(config=True) # Set to get_ipython_dir() in __init__
# Used to transform cells before running them, and check whether code is complete
input_transformer_manager = Instance('IPython.core.inputtransformer2.TransformerManager',
())
@property
def input_transformers_cleanup(self):
return self.input_transformer_manager.cleanup_transforms
input_transformers_post: List = List(
[],
help="A list of string input transformers, to be applied after IPython's "
"own input transformations."
)
logstart = Bool(False, help=
"""
Start logging to the default log file in overwrite mode.
Use `logappend` to specify a log file to **append** logs to.
"""
).tag(config=True)
logfile = Unicode('', help=
"""
The name of the logfile to use.
"""
).tag(config=True)
logappend = Unicode('', help=
"""
Start logging to the given file in append mode.
Use `logfile` to specify a log file to **overwrite** logs to.
"""
).tag(config=True)
object_info_string_level = Enum((0,1,2), default_value=0,
).tag(config=True)
pdb = Bool(False, help=
"""
Automatically call the pdb debugger after every exception.
"""
).tag(config=True)
display_page = Bool(False,
help="""If True, anything that would be passed to the pager
will be displayed as regular output instead."""
).tag(config=True)
show_rewritten_input = Bool(True,
help="Show rewritten input, e.g. for autocall."
).tag(config=True)
quiet = Bool(False).tag(config=True)
history_length = Integer(10000,
help='Total length of command history'
).tag(config=True)
history_load_length = Integer(1000, help=
"""
The number of saved history entries to be loaded
into the history buffer at startup.
"""
).tag(config=True)
ast_node_interactivity = Enum(['all', 'last', 'last_expr', 'none', 'last_expr_or_assign'],
default_value='last_expr',
help="""
'all', 'last', 'last_expr' or 'none', 'last_expr_or_assign' specifying
which nodes should be run interactively (displaying output from expressions).
"""
).tag(config=True)
warn_venv = Bool(
True,
help="Warn if running in a virtual environment with no IPython installed (so IPython from the global environment is used).",
).tag(config=True)
# TODO: this part of prompt management should be moved to the frontends.
# Use custom TraitTypes that convert '0'->'' and '\\n'->'\n'
separate_in = SeparateUnicode('\n').tag(config=True)
separate_out = SeparateUnicode('').tag(config=True)
separate_out2 = SeparateUnicode('').tag(config=True)
wildcards_case_sensitive = Bool(True).tag(config=True)
xmode = CaselessStrEnum(
("Context", "Plain", "Verbose", "Minimal", "Docs"),
default_value="Context",
help="Switch modes for the IPython exception handlers.",
).tag(config=True)
# Subcomponents of InteractiveShell
alias_manager = Instance("IPython.core.alias.AliasManager", allow_none=True)
prefilter_manager = Instance(
"IPython.core.prefilter.PrefilterManager", allow_none=True
)
builtin_trap = Instance("IPython.core.builtin_trap.BuiltinTrap")
display_trap = Instance("IPython.core.display_trap.DisplayTrap")
extension_manager = Instance(
"IPython.core.extensions.ExtensionManager", allow_none=True
)
payload_manager = Instance("IPython.core.payload.PayloadManager", allow_none=True)
history_manager = Instance(
"IPython.core.history.HistoryAccessorBase", allow_none=True
)
magics_manager = Instance("IPython.core.magic.MagicsManager")
profile_dir = Instance('IPython.core.application.ProfileDir', allow_none=True)
@property
def profile(self):
if self.profile_dir is not None:
name = os.path.basename(self.profile_dir.location)
return name.replace('profile_','')
# Private interface
_post_execute = Dict()
# Tracks any GUI loop loaded for pylab
pylab_gui_select = None
last_execution_succeeded = Bool(True, help='Did last executed command succeeded')
last_execution_result = Instance('IPython.core.interactiveshell.ExecutionResult', help='Result of executing the last command', allow_none=True)
def __init__(self, ipython_dir=None, profile_dir=None,
user_module=None, user_ns=None,
custom_exceptions=((), None), **kwargs):
# This is where traits with a config_key argument are updated
# from the values on config.
super(InteractiveShell, self).__init__(**kwargs)
self.configurables = [self]
# These are relatively independent and stateless
self.init_ipython_dir(ipython_dir)
self.init_profile_dir(profile_dir)
self.init_instance_attrs()
self.init_environment()
# Check if we're in a virtualenv, and set up sys.path.
self.init_virtualenv()
# Create namespaces (user_ns, user_global_ns, etc.)
self.init_create_namespaces(user_module, user_ns)
# This has to be done after init_create_namespaces because it uses
# something in self.user_ns, but before init_sys_modules, which
# is the first thing to modify sys.
# TODO: When we override sys.stdout and sys.stderr before this class
# is created, we are saving the overridden ones here. Not sure if this
# is what we want to do.
self.save_sys_module_state()
self.init_sys_modules()
# While we're trying to have each part of the code directly access what
# it needs without keeping redundant references to objects, we have too
# much legacy code that expects ip.db to exist.
self.db = PickleShareDB(os.path.join(self.profile_dir.location, 'db'))
self.init_history()
self.init_encoding()
self.init_prefilter()
self.init_syntax_highlighting()
self.init_hooks()
self.init_events()
self.init_pushd_popd_magic()
self.init_user_ns()
self.init_logger()
self.init_builtins()
# The following was in post_config_initialization
self.raw_input_original = input
self.init_completer()
# TODO: init_io() needs to happen before init_traceback handlers
# because the traceback handlers hardcode the stdout/stderr streams.
# This logic in in debugger.Pdb and should eventually be changed.
self.init_io()
self.init_traceback_handlers(custom_exceptions)
self.init_prompts()
self.init_display_formatter()
self.init_display_pub()
self.init_data_pub()
self.init_displayhook()
self.init_magics()
self.init_alias()
self.init_logstart()
self.init_pdb()
self.init_extension_manager()
self.init_payload()
self.events.trigger('shell_initialized', self)
atexit.register(self.atexit_operations)
# The trio runner is used for running Trio in the foreground thread. It
# is different from `_trio_runner(async_fn)` in `async_helpers.py`
# which calls `trio.run()` for every cell. This runner runs all cells
# inside a single Trio event loop. If used, it is set from
# `ipykernel.kernelapp`.
self.trio_runner = None
self.showing_traceback = False
@property
def user_ns(self):
return self._user_ns
@user_ns.setter
def user_ns(self, ns: dict):
assert hasattr(ns, "clear")
assert isinstance(ns, dict)
self._user_ns = ns
def get_ipython(self):
"""Return the currently running IPython instance."""
return self
#-------------------------------------------------------------------------
# Trait changed handlers
#-------------------------------------------------------------------------
@observe('ipython_dir')
def _ipython_dir_changed(self, change):
ensure_dir_exists(change['new'])
def set_autoindent(self,value=None):
"""Set the autoindent flag.
If called with no arguments, it acts as a toggle."""
if value is None:
self.autoindent = not self.autoindent
else:
self.autoindent = value
def set_trio_runner(self, tr):
self.trio_runner = tr
#-------------------------------------------------------------------------
# init_* methods called by __init__
#-------------------------------------------------------------------------
def init_ipython_dir(self, ipython_dir):
if ipython_dir is not None:
self.ipython_dir = ipython_dir
return
self.ipython_dir = get_ipython_dir()
def init_profile_dir(self, profile_dir):
if profile_dir is not None:
self.profile_dir = profile_dir
return
self.profile_dir = ProfileDir.create_profile_dir_by_name(
self.ipython_dir, "default"
)
def init_instance_attrs(self):
self.more = False
# command compiler
self.compile = self.compiler_class()
# Make an empty namespace, which extension writers can rely on both
# existing and NEVER being used by ipython itself. This gives them a
# convenient location for storing additional information and state
# their extensions may require, without fear of collisions with other
# ipython names that may develop later.
self.meta = Struct()
# Temporary files used for various purposes. Deleted at exit.
# The files here are stored with Path from Pathlib
self.tempfiles = []
self.tempdirs = []
# keep track of where we started running (mainly for crash post-mortem)
# This is not being used anywhere currently.
self.starting_dir = os.getcwd()
# Indentation management
self.indent_current_nsp = 0
# Dict to track post-execution functions that have been registered
self._post_execute = {}
def init_environment(self):
"""Any changes we need to make to the user's environment."""
pass
def init_encoding(self):
# Get system encoding at startup time. Certain terminals (like Emacs
# under Win32 have it set to None, and we need to have a known valid
# encoding to use in the raw_input() method
try:
self.stdin_encoding = sys.stdin.encoding or 'ascii'
except AttributeError:
self.stdin_encoding = 'ascii'
colors = Unicode(
"neutral", help="Set the color scheme (nocolor, neutral, linux, lightbg)."
).tag(config=True)
@validate("colors")
def _check_colors(self, proposal):
new = proposal["value"]
if not new == new.lower():
warn(
f"`TerminalInteractiveShell.colors` is now lowercase: `{new.lower()}`,"
" non lowercase, may be invalid in the future.",
DeprecationWarning,
stacklevel=2,
)
return new.lower()
@observe("colors")
def init_syntax_highlighting(self, changes=None):
# Python source parser/formatter for syntax highlighting
pyformat = PyColorize.Parser(theme_name=self.colors).format
self.pycolorize = lambda src: pyformat(src, "str")
if not hasattr(self, "inspector"):
self.inspector = self.inspector_class(
theme_name=self.colors,
str_detail_level=self.object_info_string_level,
parent=self,
)
try:
# Deprecation in 9.0, colors should always be lower
self.inspector.set_theme_name(self.colors.lower())
except Exception:
warn(
"Error changing object inspector color schemes.\n%s"
% (sys.exc_info()[1]),
stacklevel=2,
)
if hasattr(self, "InteractiveTB"):
self.InteractiveTB.set_theme_name(self.colors)
if hasattr(self, "SyntaxTB"):
self.SyntaxTB.set_theme_name(self.colors)
self.refresh_style()
def refresh_style(self):
# No-op here, used in subclass
pass
def init_pushd_popd_magic(self):
# for pushd/popd management
self.home_dir = get_home_dir()
self.dir_stack = []
def init_logger(self) -> None:
self.logger = Logger(self.home_dir, logfname='ipython_log.py',
logmode='rotate')
def init_logstart(self) -> None:
"""Initialize logging in case it was requested at the command line.
"""
if self.logappend:
self.run_line_magic("logstart", f"{self.logappend} append")
elif self.logfile:
self.run_line_magic("logstart", self.logfile)
elif self.logstart:
self.run_line_magic("logstart", "")
def init_builtins(self):
# A single, static flag that we set to True. Its presence indicates
# that an IPython shell has been created, and we make no attempts at
# removing on exit or representing the existence of more than one
# IPython at a time.
builtin_mod.__dict__['__IPYTHON__'] = True
builtin_mod.__dict__['display'] = display
self.builtin_trap = BuiltinTrap(shell=self)
def init_io(self):
# implemented in subclasses, TerminalInteractiveShell does call
# colorama.init().
pass
def init_prompts(self):
# Set system prompts, so that scripts can decide if they are running
# interactively.
sys.ps1 = 'In : '
sys.ps2 = '...: '
sys.ps3 = 'Out: '
def init_display_formatter(self):
self.display_formatter = DisplayFormatter(parent=self)
self.configurables.append(self.display_formatter)
def init_display_pub(self):
self.display_pub = self.display_pub_class(parent=self, shell=self)
self.configurables.append(self.display_pub)
def init_data_pub(self):
if not self.data_pub_class:
self.data_pub = None
return
self.data_pub = self.data_pub_class(parent=self)
self.configurables.append(self.data_pub)
def init_displayhook(self):
# Initialize displayhook, set in/out prompts and printing system
self.displayhook = self.displayhook_class(
parent=self,
shell=self,
cache_size=self.cache_size,
)
self.configurables.append(self.displayhook)
# This is a context manager that installs/removes the displayhook at
# the appropriate time.
self.display_trap = DisplayTrap(hook=self.displayhook)
@staticmethod
def get_path_links(p: Path):
"""Gets path links including all symlinks
Examples
--------
In [1]: from IPython.core.interactiveshell import InteractiveShell
In [2]: import sys, pathlib
In [3]: paths = InteractiveShell.get_path_links(pathlib.Path(sys.executable))
In [4]: len(paths) == len(set(paths))
Out[4]: True
In [5]: bool(paths)
Out[5]: True
"""
paths = [p]
while p.is_symlink():
new_path = Path(os.readlink(p))
if not new_path.is_absolute():
new_path = p.parent / new_path
p = new_path
paths.append(p)
return paths
def init_virtualenv(self):
"""Add the current virtualenv to sys.path so the user can import modules from it.
This isn't perfect: it doesn't use the Python interpreter with which the
virtualenv was built, and it ignores the --no-site-packages option. A
warning will appear suggesting the user installs IPython in the
virtualenv, but for many cases, it probably works well enough.
Adapted from code snippets online.
http://blog.ufsoft.org/2009/1/29/ipython-and-virtualenv
"""
if 'VIRTUAL_ENV' not in os.environ:
# Not in a virtualenv
return
elif os.environ["VIRTUAL_ENV"] == "":
warn("Virtual env path set to '', please check if this is intended.")
return
p = Path(sys.executable)
p_venv = Path(os.environ["VIRTUAL_ENV"]).resolve()
# fallback venv detection:
# stdlib venv may symlink sys.executable, so we can't use realpath.
# but others can symlink *to* the venv Python, so we can't just use sys.executable.
# So we just check every item in the symlink tree (generally <= 3)
paths = self.get_path_links(p)
# In Cygwin paths like "c:\..." and '\cygdrive\c\...' are possible
if len(p_venv.parts) > 2 and p_venv.parts[1] == "cygdrive":
drive_name = p_venv.parts[2]
p_venv = (drive_name + ":/") / Path(*p_venv.parts[3:])
if any(p_venv == p.parents[1].resolve() for p in paths):
# Our exe is inside or has access to the virtualenv, don't need to do anything.
return
if sys.platform == "win32":
virtual_env = str(Path(os.environ["VIRTUAL_ENV"], "Lib", "site-packages"))
else:
virtual_env_path = Path(
os.environ["VIRTUAL_ENV"], "lib", "python{}.{}", "site-packages"
)
p_ver = sys.version_info[:2]
# Predict version from py[thon]-x.x in the $VIRTUAL_ENV
re_m = re.search(r"\bpy(?:thon)?([23])\.(\d+)\b", os.environ["VIRTUAL_ENV"])
if re_m:
predicted_path = Path(str(virtual_env_path).format(*re_m.groups()))
if predicted_path.exists():
p_ver = re_m.groups()
virtual_env = str(virtual_env_path).format(*p_ver)
if self.warn_venv:
warn(
"Attempting to work in a virtualenv. If you encounter problems, "
"please install IPython inside the virtualenv."
)
import site
sys.path.insert(0, virtual_env)
site.addsitedir(virtual_env)
#-------------------------------------------------------------------------
# Things related to injections into the sys module
#-------------------------------------------------------------------------
def save_sys_module_state(self):
"""Save the state of hooks in the sys module.
This has to be called after self.user_module is created.
"""
self._orig_sys_module_state = {'stdin': sys.stdin,
'stdout': sys.stdout,
'stderr': sys.stderr,
'excepthook': sys.excepthook}
self._orig_sys_modules_main_name = self.user_module.__name__
self._orig_sys_modules_main_mod = sys.modules.get(self.user_module.__name__)
def restore_sys_module_state(self):
"""Restore the state of the sys module."""
try:
for k, v in self._orig_sys_module_state.items():
setattr(sys, k, v)
except AttributeError:
pass
# Reset what what done in self.init_sys_modules
if self._orig_sys_modules_main_mod is not None:
sys.modules[self._orig_sys_modules_main_name] = self._orig_sys_modules_main_mod
#-------------------------------------------------------------------------
# Things related to the banner
#-------------------------------------------------------------------------
@property
def banner(self):
banner = self.banner1
if self.profile and self.profile != 'default':
banner += '\nIPython profile: %s\n' % self.profile
if self.banner2:
banner += '\n' + self.banner2
elif self.enable_tip:
banner += "Tip: {tip}\n".format(tip=pick_tip())
return banner
def show_banner(self, banner=None):
if banner is None:
banner = self.banner
print(banner, end="")
#-------------------------------------------------------------------------
# Things related to hooks
#-------------------------------------------------------------------------
def init_hooks(self):
# hooks holds pointers used for user-side customizations
self.hooks = Struct()
self.strdispatchers = {}
# Set all default hooks, defined in the IPython.hooks module.
hooks = IPython.core.hooks
for hook_name in hooks.__all__:
# default hooks have priority 100, i.e. low; user hooks should have
# 0-100 priority
self.set_hook(hook_name, getattr(hooks, hook_name), 100)
if self.display_page:
self.set_hook('show_in_pager', page.as_hook(page.display_page), 90)
def set_hook(self, name, hook, priority=50, str_key=None, re_key=None):
"""set_hook(name,hook) -> sets an internal IPython hook.
IPython exposes some of its internal API as user-modifiable hooks. By
adding your function to one of these hooks, you can modify IPython's
behavior to call at runtime your own routines."""
# At some point in the future, this should validate the hook before it
# accepts it. Probably at least check that the hook takes the number
# of args it's supposed to.
f = types.MethodType(hook,self)
# check if the hook is for strdispatcher first
if str_key is not None:
sdp = self.strdispatchers.get(name, StrDispatch())
sdp.add_s(str_key, f, priority )
self.strdispatchers[name] = sdp
return
if re_key is not None:
sdp = self.strdispatchers.get(name, StrDispatch())
sdp.add_re(re.compile(re_key), f, priority )
self.strdispatchers[name] = sdp
return
dp = getattr(self.hooks, name, None)
if name not in IPython.core.hooks.__all__:
print("Warning! Hook '%s' is not one of %s" % \
(name, IPython.core.hooks.__all__ ))
if not dp:
dp = IPython.core.hooks.CommandChainDispatcher()
try:
dp.add(f,priority)
except AttributeError:
# it was not commandchain, plain old func - replace
dp = f
setattr(self.hooks,name, dp)
#-------------------------------------------------------------------------
# Things related to events
#-------------------------------------------------------------------------
def init_events(self):
self.events = EventManager(self, available_events)
self.events.register("pre_execute", self._clear_warning_registry)
def _clear_warning_registry(self):
# clear the warning registry, so that different code blocks with
# overlapping line number ranges don't cause spurious suppression of
# warnings (see gh-6611 for details)
if "__warningregistry__" in self.user_global_ns:
del self.user_global_ns["__warningregistry__"]
#-------------------------------------------------------------------------
# Things related to the "main" module
#-------------------------------------------------------------------------
def new_main_mod(self, filename, modname):
"""Return a new 'main' module object for user code execution.
``filename`` should be the path of the script which will be run in the
module. Requests with the same filename will get the same module, with
its namespace cleared.
``modname`` should be the module name - normally either '__main__' or
the basename of the file without the extension.
When scripts are executed via %run, we must keep a reference to their
__main__ module around so that Python doesn't
clear it, rendering references to module globals useless.
This method keeps said reference in a private dict, keyed by the
absolute path of the script. This way, for multiple executions of the
same script we only keep one copy of the namespace (the last one),
thus preventing memory leaks from old references while allowing the
objects from the last execution to be accessible.
"""
filename = os.path.abspath(filename)
try:
main_mod = self._main_mod_cache[filename]
except KeyError:
main_mod = self._main_mod_cache[filename] = types.ModuleType(
modname,
doc="Module created for script run in IPython")
else:
main_mod.__dict__.clear()
main_mod.__name__ = modname
main_mod.__file__ = filename
# It seems pydoc (and perhaps others) needs any module instance to
# implement a __nonzero__ method
main_mod.__nonzero__ = lambda : True
return main_mod
def clear_main_mod_cache(self):
"""Clear the cache of main modules.
Mainly for use by utilities like %reset.
Examples
--------
In [15]: import IPython
In [16]: m = _ip.new_main_mod(IPython.__file__, 'IPython')
In [17]: len(_ip._main_mod_cache) > 0
Out[17]: True
In [18]: _ip.clear_main_mod_cache()
In [19]: len(_ip._main_mod_cache) == 0
Out[19]: True
"""
self._main_mod_cache.clear()
#-------------------------------------------------------------------------
# Things related to debugging
#-------------------------------------------------------------------------
def init_pdb(self):
# Set calling of pdb on exceptions
# self.call_pdb is a property
self.call_pdb = self.pdb
def _get_call_pdb(self):
return self._call_pdb
def _set_call_pdb(self,val):
if val not in (0,1,False,True):
raise ValueError('new call_pdb value must be boolean')
# store value in instance
self._call_pdb = val
# notify the actual exception handlers
self.InteractiveTB.call_pdb = val
call_pdb = property(_get_call_pdb,_set_call_pdb,None,
'Control auto-activation of pdb at exceptions')
def debugger(self,force=False):
"""Call the pdb debugger.
Keywords:
- force(False): by default, this routine checks the instance call_pdb
flag and does not actually invoke the debugger if the flag is false.
The 'force' option forces the debugger to activate even if the flag
is false.
"""
if not (force or self.call_pdb):
return
if not hasattr(sys,'last_traceback'):
error('No traceback has been produced, nothing to debug.')
return
self.InteractiveTB.debugger(force=True)
#-------------------------------------------------------------------------
# Things related to IPython's various namespaces
#-------------------------------------------------------------------------
default_user_namespaces = True
def init_create_namespaces(self, user_module=None, user_ns=None):
# Create the namespace where the user will operate. user_ns is
# normally the only one used, and it is passed to the exec calls as
# the locals argument. But we do carry a user_global_ns namespace
# given as the exec 'globals' argument, This is useful in embedding
# situations where the ipython shell opens in a context where the
# distinction between locals and globals is meaningful. For
# non-embedded contexts, it is just the same object as the user_ns dict.
# FIXME. For some strange reason, __builtins__ is showing up at user
# level as a dict instead of a module. This is a manual fix, but I
# should really track down where the problem is coming from. Alex
# Schmolck reported this problem first.
# A useful post by Alex Martelli on this topic:
# Re: inconsistent value from __builtins__
# Von: Alex Martelli <aleaxit@yahoo.com>
# Datum: Freitag 01 Oktober 2004 04:45:34 nachmittags/abends
# Gruppen: comp.lang.python
# Michael Hohn <hohn@hooknose.lbl.gov> wrote:
# > >>> print type(builtin_check.get_global_binding('__builtins__'))
# > <type 'dict'>
# > >>> print type(__builtins__)
# > <type 'module'>
# > Is this difference in return value intentional?
# Well, it's documented that '__builtins__' can be either a dictionary
# or a module, and it's been that way for a long time. Whether it's
# intentional (or sensible), I don't know. In any case, the idea is
# that if you need to access the built-in namespace directly, you
# should start with "import __builtin__" (note, no 's') which will
# definitely give you a module. Yeah, it's somewhat confusing:-(.
# These routines return a properly built module and dict as needed by
# the rest of the code, and can also be used by extension writers to
# generate properly initialized namespaces.
if (user_ns is not None) or (user_module is not None):
self.default_user_namespaces = False
self.user_module, self.user_ns = self.prepare_user_module(user_module, user_ns)
# A record of hidden variables we have added to the user namespace, so
# we can list later only variables defined in actual interactive use.
self.user_ns_hidden = {}
# Now that FakeModule produces a real module, we've run into a nasty
# problem: after script execution (via %run), the module where the user
# code ran is deleted. Now that this object is a true module (needed
# so doctest and other tools work correctly), the Python module
# teardown mechanism runs over it, and sets to None every variable
# present in that module. Top-level references to objects from the
# script survive, because the user_ns is updated with them. However,
# calling functions defined in the script that use other things from
# the script will fail, because the function's closure had references
# to the original objects, which are now all None. So we must protect
# these modules from deletion by keeping a cache.
#
# To avoid keeping stale modules around (we only need the one from the
# last run), we use a dict keyed with the full path to the script, so
# only the last version of the module is held in the cache. Note,
# however, that we must cache the module *namespace contents* (their
# __dict__). Because if we try to cache the actual modules, old ones
# (uncached) could be destroyed while still holding references (such as
# those held by GUI objects that tend to be long-lived)>
#
# The %reset command will flush this cache. See the cache_main_mod()
# and clear_main_mod_cache() methods for details on use.
# This is the cache used for 'main' namespaces
self._main_mod_cache = {}
# A table holding all the namespaces IPython deals with, so that
# introspection facilities can search easily.
self.ns_table = {'user_global':self.user_module.__dict__,
'user_local':self.user_ns,
'builtin':builtin_mod.__dict__
}
@property
def user_global_ns(self):
return self.user_module.__dict__
def prepare_user_module(self, user_module=None, user_ns=None):
"""Prepare the module and namespace in which user code will be run.
When IPython is started normally, both parameters are None: a new module
is created automatically, and its __dict__ used as the namespace.
If only user_module is provided, its __dict__ is used as the namespace.
If only user_ns is provided, a dummy module is created, and user_ns
becomes the global namespace. If both are provided (as they may be
when embedding), user_ns is the local namespace, and user_module
provides the global namespace.
Parameters
----------
user_module : module, optional
The current user module in which IPython is being run. If None,
a clean module will be created.
user_ns : dict, optional
A namespace in which to run interactive commands.
Returns
-------
A tuple of user_module and user_ns, each properly initialised.
"""
if user_module is None and user_ns is not None:
user_ns.setdefault("__name__", "__main__")
user_module = make_main_module_type(user_ns)()
if user_module is None:
user_module = types.ModuleType("__main__",
doc="Automatically created module for IPython interactive environment")
# We must ensure that __builtin__ (without the final 's') is always
# available and pointing to the __builtin__ *module*. For more details:
# http://mail.python.org/pipermail/python-dev/2001-April/014068.html
user_module.__dict__.setdefault('__builtin__', builtin_mod)
user_module.__dict__.setdefault('__builtins__', builtin_mod)
if user_ns is None:
user_ns = user_module.__dict__
return user_module, user_ns
def init_sys_modules(self):
# We need to insert into sys.modules something that looks like a
# module but which accesses the IPython namespace, for shelve and
# pickle to work interactively. Normally they rely on getting
# everything out of __main__, but for embedding purposes each IPython
# instance has its own private namespace, so we can't go shoving
# everything into __main__.
# note, however, that we should only do this for non-embedded
# ipythons, which really mimic the __main__.__dict__ with their own
# namespace. Embedded instances, on the other hand, should not do
# this because they need to manage the user local/global namespaces
# only, but they live within a 'normal' __main__ (meaning, they
# shouldn't overtake the execution environment of the script they're
# embedded in).
# This is overridden in the InteractiveShellEmbed subclass to a no-op.
main_name = self.user_module.__name__
sys.modules[main_name] = self.user_module
def init_user_ns(self):
"""Initialize all user-visible namespaces to their minimum defaults.
Certain history lists are also initialized here, as they effectively
act as user namespaces.
Notes
-----
All data structures here are only filled in, they are NOT reset by this
method. If they were not empty before, data will simply be added to
them.
"""
# This function works in two parts: first we put a few things in
# user_ns, and we sync that contents into user_ns_hidden so that these
# initial variables aren't shown by %who. After the sync, we add the
# rest of what we *do* want the user to see with %who even on a new
# session (probably nothing, so they really only see their own stuff)
# The user dict must *always* have a __builtin__ reference to the
# Python standard __builtin__ namespace, which must be imported.
# This is so that certain operations in prompt evaluation can be
# reliably executed with builtins. Note that we can NOT use
# __builtins__ (note the 's'), because that can either be a dict or a
# module, and can even mutate at runtime, depending on the context
# (Python makes no guarantees on it). In contrast, __builtin__ is
# always a module object, though it must be explicitly imported.
# For more details:
# http://mail.python.org/pipermail/python-dev/2001-April/014068.html
ns = {}
# make global variables for user access to the histories
if self.history_manager is not None:
ns["_ih"] = self.history_manager.input_hist_parsed
ns["_oh"] = self.history_manager.output_hist
ns["_dh"] = self.history_manager.dir_hist
# user aliases to input and output histories. These shouldn't show up
# in %who, as they can have very large reprs.
ns["In"] = self.history_manager.input_hist_parsed
ns["Out"] = self.history_manager.output_hist
# Store myself as the public api!!!
ns['get_ipython'] = self.get_ipython
ns['exit'] = self.exiter
ns['quit'] = self.exiter
ns["open"] = _modified_open
# Sync what we've added so far to user_ns_hidden so these aren't seen
# by %who
self.user_ns_hidden.update(ns)
# Anything put into ns now would show up in %who. Think twice before
# putting anything here, as we really want %who to show the user their
# stuff, not our variables.
# Finally, update the real user's namespace
self.user_ns.update(ns)
@property
def all_ns_refs(self):
"""Get a list of references to all the namespace dictionaries in which
IPython might store a user-created object.
Note that this does not include the displayhook, which also caches
objects from the output."""
return [self.user_ns, self.user_global_ns, self.user_ns_hidden] + \
[m.__dict__ for m in self._main_mod_cache.values()]
def reset(self, new_session=True, aggressive=False):
"""Clear all internal namespaces, and attempt to release references to
user objects.
If new_session is True, a new history session will be opened.
"""
# Clear histories
if self.history_manager is not None:
self.history_manager.reset(new_session)
# Reset counter used to index all histories
if new_session:
self.execution_count = 1
# Reset last execution result
self.last_execution_succeeded = True
self.last_execution_result = None
# Flush cached output items
if self.displayhook.do_full_cache:
self.displayhook.flush()
# The main execution namespaces must be cleared very carefully,
# skipping the deletion of the builtin-related keys, because doing so
# would cause errors in many object's __del__ methods.
if self.user_ns is not self.user_global_ns:
self.user_ns.clear()
ns = self.user_global_ns
drop_keys = set(ns.keys())
drop_keys.discard('__builtin__')
drop_keys.discard('__builtins__')
drop_keys.discard('__name__')
for k in drop_keys:
del ns[k]
self.user_ns_hidden.clear()
# Restore the user namespaces to minimal usability
self.init_user_ns()
if aggressive and not hasattr(self, "_sys_modules_keys"):
print("Cannot restore sys.module, no snapshot")
elif aggressive:
print("culling sys module...")
current_keys = set(sys.modules.keys())
for k in current_keys - self._sys_modules_keys:
if k.startswith("multiprocessing"):
continue
del sys.modules[k]
# Restore the default and user aliases
self.alias_manager.clear_aliases()
self.alias_manager.init_aliases()
# Now define aliases that only make sense on the terminal, because they
# need direct access to the console in a way that we can't emulate in
# GUI or web frontend
if os.name == 'posix':
for cmd in ('clear', 'more', 'less', 'man'):
if cmd not in self.magics_manager.magics['line']:
self.alias_manager.soft_define_alias(cmd, cmd)
# Flush the private list of module references kept for script
# execution protection
self.clear_main_mod_cache()
def del_var(self, varname, by_name=False):
"""Delete a variable from the various namespaces, so that, as
far as possible, we're not keeping any hidden references to it.
Parameters
----------
varname : str
The name of the variable to delete.
by_name : bool
If True, delete variables with the given name in each
namespace. If False (default), find the variable in the user
namespace, and delete references to it.
"""
if varname in ('__builtin__', '__builtins__'):
raise ValueError("Refusing to delete %s" % varname)
ns_refs = self.all_ns_refs
if by_name: # Delete by name
for ns in ns_refs:
try:
del ns[varname]
except KeyError:
pass
else: # Delete by object
try:
obj = self.user_ns[varname]
except KeyError as e:
raise NameError("name '%s' is not defined" % varname) from e
# Also check in output history
assert self.history_manager is not None
ns_refs.append(self.history_manager.output_hist)
for ns in ns_refs:
to_delete = [n for n, o in ns.items() if o is obj]
for name in to_delete:
del ns[name]
# Ensure it is removed from the last execution result
if self.last_execution_result.result is obj:
self.last_execution_result = None
# displayhook keeps extra references, but not in a dictionary
for name in ('_', '__', '___'):
if getattr(self.displayhook, name) is obj:
setattr(self.displayhook, name, None)
def reset_selective(self, regex=None):
"""Clear selective variables from internal namespaces based on a
specified regular expression.
Parameters
----------
regex : string or compiled pattern, optional
A regular expression pattern that will be used in searching
variable names in the users namespaces.
"""
if regex is not None:
try:
m = re.compile(regex)
except TypeError as e:
raise TypeError('regex must be a string or compiled pattern') from e
# Search for keys in each namespace that match the given regex
# If a match is found, delete the key/value pair.
for ns in self.all_ns_refs:
for var in ns:
if m.search(var):
del ns[var]
def push(self, variables, interactive=True):
"""Inject a group of variables into the IPython user namespace.
Parameters
----------
variables : dict, str or list/tuple of str
The variables to inject into the user's namespace. If a dict, a
simple update is done. If a str, the string is assumed to have
variable names separated by spaces. A list/tuple of str can also
be used to give the variable names. If just the variable names are
give (list/tuple/str) then the variable values looked up in the
callers frame.
interactive : bool
If True (default), the variables will be listed with the ``who``
magic.
"""
vdict = None
# We need a dict of name/value pairs to do namespace updates.
if isinstance(variables, dict):
vdict = variables
elif isinstance(variables, (str, list, tuple)):
if isinstance(variables, str):
vlist = variables.split()
else:
vlist = list(variables)
vdict = {}
cf = sys._getframe(1)
for name in vlist:
try:
vdict[name] = eval(name, cf.f_globals, cf.f_locals)
except:
print('Could not get variable %s from %s' %
(name,cf.f_code.co_name))
else:
raise ValueError('variables must be a dict/str/list/tuple')
# Propagate variables to user namespace
self.user_ns.update(vdict)
# And configure interactive visibility
user_ns_hidden = self.user_ns_hidden
if interactive:
for name in vdict:
user_ns_hidden.pop(name, None)
else:
user_ns_hidden.update(vdict)
def drop_by_id(self, variables):
"""Remove a dict of variables from the user namespace, if they are the
same as the values in the dictionary.
This is intended for use by extensions: variables that they've added can
be taken back out if they are unloaded, without removing any that the
user has overwritten.
Parameters
----------
variables : dict
A dictionary mapping object names (as strings) to the objects.
"""
for name, obj in variables.items():
if name in self.user_ns and self.user_ns[name] is obj:
del self.user_ns[name]
self.user_ns_hidden.pop(name, None)
#-------------------------------------------------------------------------
# Things related to object introspection
#-------------------------------------------------------------------------
@staticmethod
def _find_parts(oname: str) -> Tuple[bool, ListType[str]]:
"""
Given an object name, return a list of parts of this object name.
Basically split on docs when using attribute access,
and extract the value when using square bracket.
For example foo.bar[3].baz[x] -> foo, bar, 3, baz, x
Returns
-------
parts_ok: bool
whether we were properly able to parse parts.
parts: list of str
extracted parts
"""
raw_parts = oname.split(".")
parts = []
parts_ok = True
for p in raw_parts:
if p.endswith("]"):
var, *indices = p.split("[")
if not var.isidentifier():
parts_ok = False
break
parts.append(var)
for ind in indices:
if ind[-1] != "]" and not is_integer_string(ind[:-1]):
parts_ok = False
break
parts.append(ind[:-1])
continue
if not p.isidentifier():
parts_ok = False
parts.append(p)
return parts_ok, parts
def _ofind(
self, oname: str, namespaces: Optional[Sequence[Tuple[str, AnyType]]] = None
) -> OInfo:
"""Find an object in the available namespaces.
Returns
-------
OInfo with fields:
- ismagic
- isalias
- found
- obj
- namespac
- parent
Has special code to detect magic functions.
"""
oname = oname.strip()
parts_ok, parts = self._find_parts(oname)
if (
not oname.startswith(ESC_MAGIC)
and not oname.startswith(ESC_MAGIC2)
and not parts_ok
):
return OInfo(
ismagic=False,
isalias=False,
found=False,
obj=None,
namespace=None,
parent=None,
)
if namespaces is None:
# Namespaces to search in:
# Put them in a list. The order is important so that we
# find things in the same order that Python finds them.
namespaces = [ ('Interactive', self.user_ns),
('Interactive (global)', self.user_global_ns),
('Python builtin', builtin_mod.__dict__),
]
ismagic = False
isalias = False
found = False
ospace = None
parent = None
obj = None
# Look for the given name by splitting it in parts. If the head is
# found, then we look for all the remaining parts as members, and only
# declare success if we can find them all.
oname_parts = parts
oname_head, oname_rest = oname_parts[0],oname_parts[1:]
for nsname,ns in namespaces:
try:
obj = ns[oname_head]
except KeyError:
continue
else:
for idx, part in enumerate(oname_rest):
try:
parent = obj
# The last part is looked up in a special way to avoid
# descriptor invocation as it may raise or have side
# effects.
if idx == len(oname_rest) - 1:
obj = self._getattr_property(obj, part)
else:
if is_integer_string(part):
obj = obj[int(part)]
else:
obj = getattr(obj, part)
except:
# Blanket except b/c some badly implemented objects
# allow __getattr__ to raise exceptions other than
# AttributeError, which then crashes IPython.
break
else:
# If we finish the for loop (no break), we got all members
found = True
ospace = nsname
break # namespace loop
# Try to see if it's magic
if not found:
obj = None
if oname.startswith(ESC_MAGIC2):
oname = oname.lstrip(ESC_MAGIC2)
obj = self.find_cell_magic(oname)
elif oname.startswith(ESC_MAGIC):
oname = oname.lstrip(ESC_MAGIC)
obj = self.find_line_magic(oname)
else:
# search without prefix, so run? will find %run?
obj = self.find_line_magic(oname)
if obj is None:
obj = self.find_cell_magic(oname)
if obj is not None:
found = True
ospace = 'IPython internal'
ismagic = True
isalias = isinstance(obj, Alias)
# Last try: special-case some literals like '', [], {}, etc:
if not found and oname_head in ["''",'""','[]','{}','()']:
obj = eval(oname_head)
found = True
ospace = 'Interactive'
return OInfo(
obj=obj,
found=found,
parent=parent,
ismagic=ismagic,
isalias=isalias,
namespace=ospace,
)
@staticmethod
def _getattr_property(obj, attrname):
"""Property-aware getattr to use in object finding.
If attrname represents a property, return it unevaluated (in case it has
side effects or raises an error.
"""
if not isinstance(obj, type):
try:
# `getattr(type(obj), attrname)` is not guaranteed to return
# `obj`, but does so for property:
#
# property.__get__(self, None, cls) -> self
#
# The universal alternative is to traverse the mro manually
# searching for attrname in class dicts.
if is_integer_string(attrname):
return obj[int(attrname)]
else:
attr = getattr(type(obj), attrname)
except AttributeError:
pass
else:
# This relies on the fact that data descriptors (with both
# __get__ & __set__ magic methods) take precedence over
# instance-level attributes:
#
# class A(object):
# @property
# def foobar(self): return 123
# a = A()
# a.__dict__['foobar'] = 345
# a.foobar # == 123
#
# So, a property may be returned right away.
if isinstance(attr, property):
return attr
# Nothing helped, fall back.
return getattr(obj, attrname)
def _object_find(self, oname, namespaces=None) -> OInfo:
"""Find an object and return a struct with info about it."""
return self._ofind(oname, namespaces)
def _inspect(self, meth, oname: str, namespaces=None, **kw):
"""Generic interface to the inspector system.
This function is meant to be called by pdef, pdoc & friends.
"""
info: OInfo = self._object_find(oname, namespaces)
if self.sphinxify_docstring:
if sphinxify is None:
raise ImportError("Module ``docrepr`` required but missing")
docformat = sphinxify(self.object_inspect(oname))
else:
docformat = None
if info.found or hasattr(info.parent, oinspect.HOOK_NAME):
pmethod = getattr(self.inspector, meth)
# TODO: only apply format_screen to the plain/text repr of the mime
# bundle.
formatter = format_screen if info.ismagic else docformat
if meth == 'pdoc':
pmethod(info.obj, oname, formatter)
elif meth == 'pinfo':
pmethod(
info.obj,
oname,
formatter,
info,
enable_html_pager=self.enable_html_pager,
**kw,
)
else:
pmethod(info.obj, oname)
else:
print('Object `%s` not found.' % oname)
return 'not found' # so callers can take other action
def object_inspect(self, oname, detail_level=0):
"""Get object info about oname"""
with self.builtin_trap:
info = self._object_find(oname)
if info.found:
return self.inspector.info(info.obj, oname, info=info,
detail_level=detail_level
)
else:
return oinspect.object_info(name=oname, found=False)
def object_inspect_text(self, oname, detail_level=0):
"""Get object info as formatted text"""
return self.object_inspect_mime(oname, detail_level)['text/plain']
def object_inspect_mime(self, oname, detail_level=0, omit_sections=()):
"""Get object info as a mimebundle of formatted representations.
A mimebundle is a dictionary, keyed by mime-type.
It must always have the key `'text/plain'`.
"""
with self.builtin_trap:
info = self._object_find(oname)
if info.found:
if self.sphinxify_docstring:
if sphinxify is None:
raise ImportError("Module ``docrepr`` required but missing")
docformat = sphinxify(self.object_inspect(oname))
else:
docformat = None
return self.inspector._get_info(
info.obj,
oname,
info=info,
detail_level=detail_level,
formatter=docformat,
omit_sections=omit_sections,
)
else:
raise KeyError(oname)
#-------------------------------------------------------------------------
# Things related to history management
#-------------------------------------------------------------------------
def init_history(self):
"""Sets up the command history, and starts regular autosaves."""
self.history_manager = HistoryManager(shell=self, parent=self)
self.configurables.append(self.history_manager)
#-------------------------------------------------------------------------
# Things related to exception handling and tracebacks (not debugging)
#-------------------------------------------------------------------------
debugger_cls = InterruptiblePdb
def init_traceback_handlers(self, custom_exceptions) -> None:
# Syntax error handler.
self.SyntaxTB = ultratb.SyntaxTB(theme_name=self.colors)
# The interactive one is initialized with an offset, meaning we always
# want to remove the topmost item in the traceback, which is our own
# internal code. Valid modes: ['Plain','Context','Verbose','Minimal']
self.InteractiveTB = ultratb.AutoFormattedTB(
mode=self.xmode,
theme_name=self.colors,
tb_offset=1,
debugger_cls=self.debugger_cls,
)
# The instance will store a pointer to the system-wide exception hook,
# so that runtime code (such as magics) can access it. This is because
# during the read-eval loop, it may get temporarily overwritten.
self.sys_excepthook = sys.excepthook
# and add any custom exception handlers the user may have specified
self.set_custom_exc(*custom_exceptions)
# Set the exception mode
self.InteractiveTB.set_mode(mode=self.xmode)
def set_custom_exc(self, exc_tuple, handler):
"""set_custom_exc(exc_tuple, handler)
Set a custom exception handler, which will be called if any of the
exceptions in exc_tuple occur in the mainloop (specifically, in the
run_code() method).
Parameters
----------
exc_tuple : tuple of exception classes
A *tuple* of exception classes, for which to call the defined
handler. It is very important that you use a tuple, and NOT A
LIST here, because of the way Python's except statement works. If
you only want to trap a single exception, use a singleton tuple::
exc_tuple == (MyCustomException,)
handler : callable
handler must have the following signature::
def my_handler(self, etype, value, tb, tb_offset=None):
...
return structured_traceback
Your handler must return a structured traceback (a list of strings),
or None.
This will be made into an instance method (via types.MethodType)
of IPython itself, and it will be called if any of the exceptions
listed in the exc_tuple are caught. If the handler is None, an
internal basic one is used, which just prints basic info.
To protect IPython from crashes, if your handler ever raises an
exception or returns an invalid result, it will be immediately
disabled.
Notes
-----
WARNING: by putting in your own exception handler into IPython's main
execution loop, you run a very good chance of nasty crashes. This
facility should only be used if you really know what you are doing.
"""
if not isinstance(exc_tuple, tuple):
raise TypeError("The custom exceptions must be given as a tuple.")
def dummy_handler(self, etype, value, tb, tb_offset=None):
print('*** Simple custom exception handler ***')
print('Exception type :', etype)
print('Exception value:', value)
print('Traceback :', tb)
def validate_stb(stb):
"""validate structured traceback return type
return type of CustomTB *should* be a list of strings, but allow
single strings or None, which are harmless.
This function will *always* return a list of strings,
and will raise a TypeError if stb is inappropriate.
"""
msg = "CustomTB must return list of strings, not %r" % stb
if stb is None:
return []
elif isinstance(stb, str):
return [stb]
elif not isinstance(stb, list):
raise TypeError(msg)
# it's a list
for line in stb:
# check every element
if not isinstance(line, str):
raise TypeError(msg)
return stb
if handler is None:
wrapped = dummy_handler
else:
def wrapped(self,etype,value,tb,tb_offset=None):
"""wrap CustomTB handler, to protect IPython from user code
This makes it harder (but not impossible) for custom exception
handlers to crash IPython.
"""
try:
stb = handler(self,etype,value,tb,tb_offset=tb_offset)
return validate_stb(stb)
except:
# clear custom handler immediately
self.set_custom_exc((), None)
print("Custom TB Handler failed, unregistering", file=sys.stderr)
# show the exception in handler first
stb = self.InteractiveTB.structured_traceback(*sys.exc_info())
print(self.InteractiveTB.stb2text(stb))
print("The original exception:")
stb = self.InteractiveTB.structured_traceback(
etype, value, tb, tb_offset=tb_offset
)
return stb
self.CustomTB = types.MethodType(wrapped,self)
self.custom_exceptions = exc_tuple
def excepthook(self, etype, value, tb):
"""One more defense for GUI apps that call sys.excepthook.
GUI frameworks like wxPython trap exceptions and call
sys.excepthook themselves. I guess this is a feature that
enables them to keep running after exceptions that would
otherwise kill their mainloop. This is a bother for IPython
which expects to catch all of the program exceptions with a try:
except: statement.
Normally, IPython sets sys.excepthook to a CrashHandler instance, so if
any app directly invokes sys.excepthook, it will look to the user like
IPython crashed. In order to work around this, we can disable the
CrashHandler and replace it with this excepthook instead, which prints a
regular traceback using our InteractiveTB. In this fashion, apps which
call sys.excepthook will generate a regular-looking exception from
IPython, and the CrashHandler will only be triggered by real IPython
crashes.
This hook should be used sparingly, only in places which are not likely
to be true IPython errors.
"""
self.showtraceback((etype, value, tb), tb_offset=0)
def _get_exc_info(self, exc_tuple=None):
"""get exc_info from a given tuple, sys.exc_info() or sys.last_type etc.
Ensures sys.last_type,value,traceback hold the exc_info we found,
from whichever source.
raises ValueError if none of these contain any information
"""
if exc_tuple is None:
etype, value, tb = sys.exc_info()
else:
etype, value, tb = exc_tuple
if etype is None:
if hasattr(sys, 'last_type'):
etype, value, tb = sys.last_type, sys.last_value, \
sys.last_traceback
if etype is None:
raise ValueError("No exception to find")
# Now store the exception info in sys.last_type etc.
# WARNING: these variables are somewhat deprecated and not
# necessarily safe to use in a threaded environment, but tools
# like pdb depend on their existence, so let's set them. If we
# find problems in the field, we'll need to revisit their use.
sys.last_type = etype
sys.last_value = value
sys.last_traceback = tb
if sys.version_info >= (3, 12):
sys.last_exc = value
return etype, value, tb
def show_usage_error(self, exc):
"""Show a short message for UsageErrors
These are special exceptions that shouldn't show a traceback.
"""
print("UsageError: %s" % exc, file=sys.stderr)
def get_exception_only(self, exc_tuple=None):
"""
Return as a string (ending with a newline) the exception that
just occurred, without any traceback.
"""
etype, value, tb = self._get_exc_info(exc_tuple)
msg = traceback.format_exception_only(etype, value)
return ''.join(msg)
def showtraceback(
self,
exc_tuple: tuple[type[BaseException], BaseException, Any] | None = None,
filename: str | None = None,
tb_offset: int | None = None,
exception_only: bool = False,
running_compiled_code: bool = False,
) -> None:
"""Display the exception that just occurred.
If nothing is known about the exception, this is the method which
should be used throughout the code for presenting user tracebacks,
rather than directly invoking the InteractiveTB object.
A specific showsyntaxerror() also exists, but this method can take
care of calling it if needed, so unless you are explicitly catching a
SyntaxError exception, don't try to analyze the stack manually and
simply call this method."""
try:
try:
etype, value, tb = self._get_exc_info(exc_tuple)
except ValueError:
print('No traceback available to show.', file=sys.stderr)
return
if issubclass(etype, SyntaxError):
# Though this won't be called by syntax errors in the input
# line, there may be SyntaxError cases with imported code.
self.showsyntaxerror(filename, running_compiled_code)
elif etype is UsageError:
self.show_usage_error(value)
else:
if exception_only:
stb = ['An exception has occurred, use %tb to see '
'the full traceback.\n']
stb.extend(self.InteractiveTB.get_exception_only(etype,
value))
else:
def contains_exceptiongroup(val):
if val is None:
return False
return isinstance(
val, BaseExceptionGroup
) or contains_exceptiongroup(val.__context__)
if contains_exceptiongroup(value):
# fall back to native exception formatting until ultratb
# supports exception groups
traceback.print_exc()
else:
try:
# Exception classes can customise their traceback - we
# use this in IPython.parallel for exceptions occurring
# in the engines. This should return a list of strings.
if hasattr(value, "_render_traceback_"):
stb = value._render_traceback_()
else:
stb = self.InteractiveTB.structured_traceback(
etype, value, tb, tb_offset=tb_offset
)
except Exception:
print(
"Unexpected exception formatting exception. Falling back to standard exception"
)
traceback.print_exc()
return None
self._showtraceback(etype, value, stb)
if self.call_pdb:
# drop into debugger
self.debugger(force=True)
return
# Actually show the traceback
self._showtraceback(etype, value, stb)
except KeyboardInterrupt:
print('\n' + self.get_exception_only(), file=sys.stderr)
def _showtraceback(self, etype, evalue, stb: list[str]):
"""Actually show a traceback.
Subclasses may override this method to put the traceback on a different
place, like a side channel.
"""
val = self.InteractiveTB.stb2text(stb)
self.showing_traceback = True
try:
print(val)
except UnicodeEncodeError:
print(val.encode("utf-8", "backslashreplace").decode())
self.showing_traceback = False
def showsyntaxerror(self, filename=None, running_compiled_code=False):
"""Display the syntax error that just occurred.
This doesn't display a stack trace because there isn't one.
If a filename is given, it is stuffed in the exception instead
of what was there before (because Python's parser always uses
"<string>" when reading from a string).
If the syntax error occurred when running a compiled code (i.e. running_compile_code=True),
longer stack trace will be displayed.
"""
etype, value, last_traceback = self._get_exc_info()
if filename and issubclass(etype, SyntaxError):
try:
value.filename = filename
except:
# Not the format we expect; leave it alone
pass
# If the error occurred when executing compiled code, we should provide full stacktrace.
elist = traceback.extract_tb(last_traceback) if running_compiled_code else []
stb = self.SyntaxTB.structured_traceback(etype, value, elist)
self._showtraceback(etype, value, stb)
# This is overridden in TerminalInteractiveShell to show a message about
# the %paste magic.
def showindentationerror(self):
"""Called by _run_cell when there's an IndentationError in code entered
at the prompt.
This is overridden in TerminalInteractiveShell to show a message about
the %paste magic."""
self.showsyntaxerror()
@skip_doctest
def set_next_input(self, s, replace=False):
""" Sets the 'default' input string for the next command line.
Example::
In [1]: _ip.set_next_input("Hello Word")
In [2]: Hello Word_ # cursor is here
"""
self.rl_next_input = s
#-------------------------------------------------------------------------
# Things related to text completion
#-------------------------------------------------------------------------
def init_completer(self):
"""Initialize the completion machinery.
This creates completion machinery that can be used by client code,
either interactively in-process (typically triggered by the readline
library), programmatically (such as in test suites) or out-of-process
(typically over the network by remote frontends).
"""
from IPython.core.completer import IPCompleter
from IPython.core.completerlib import (
cd_completer,
magic_run_completer,
module_completer,
reset_completer,
)
self.Completer = IPCompleter(shell=self,
namespace=self.user_ns,
global_namespace=self.user_global_ns,
parent=self,
)
self.configurables.append(self.Completer)
# Add custom completers to the basic ones built into IPCompleter
sdisp = self.strdispatchers.get('complete_command', StrDispatch())
self.strdispatchers['complete_command'] = sdisp
self.Completer.custom_completers = sdisp
self.set_hook('complete_command', module_completer, str_key = 'import')
self.set_hook('complete_command', module_completer, str_key = 'from')
self.set_hook('complete_command', module_completer, str_key = '%aimport')
self.set_hook('complete_command', magic_run_completer, str_key = '%run')
self.set_hook('complete_command', cd_completer, str_key = '%cd')
self.set_hook('complete_command', reset_completer, str_key = '%reset')
@skip_doctest
def complete(self, text, line=None, cursor_pos=None):
"""Return the completed text and a list of completions.
Parameters
----------
text : string
A string of text to be completed on. It can be given as empty and
instead a line/position pair are given. In this case, the
completer itself will split the line like readline does.
line : string, optional
The complete line that text is part of.
cursor_pos : int, optional
The position of the cursor on the input line.
Returns
-------
text : string
The actual text that was completed.
matches : list
A sorted list with all possible completions.
Notes
-----
The optional arguments allow the completion to take more context into
account, and are part of the low-level completion API.
This is a wrapper around the completion mechanism, similar to what
readline does at the command line when the TAB key is hit. By
exposing it as a method, it can be used by other non-readline
environments (such as GUIs) for text completion.
Examples
--------
In [1]: x = 'hello'
In [2]: _ip.complete('x.l')
Out[2]: ('x.l', ['x.ljust', 'x.lower', 'x.lstrip'])
"""
# Inject names into __builtin__ so we can complete on the added names.
with self.builtin_trap:
return self.Completer.complete(text, line, cursor_pos)
def set_custom_completer(self, completer, pos=0) -> None:
"""Adds a new custom completer function.
The position argument (defaults to 0) is the index in the completers
list where you want the completer to be inserted.
`completer` should have the following signature::
def completion(self: Completer, text: string) -> List[str]:
raise NotImplementedError
It will be bound to the current Completer instance and pass some text
and return a list with current completions to suggest to the user.
"""
newcomp = types.MethodType(completer, self.Completer)
self.Completer.custom_matchers.insert(pos,newcomp)
def set_completer_frame(self, frame=None):
"""Set the frame of the completer."""
if frame:
self.Completer.namespace = frame.f_locals
self.Completer.global_namespace = frame.f_globals
else:
self.Completer.namespace = self.user_ns
self.Completer.global_namespace = self.user_global_ns
#-------------------------------------------------------------------------
# Things related to magics
#-------------------------------------------------------------------------
def init_magics(self):
from IPython.core import magics as m
self.magics_manager = magic.MagicsManager(shell=self,
parent=self,
user_magics=m.UserMagics(self))
self.configurables.append(self.magics_manager)
# Expose as public API from the magics manager
self.register_magics = self.magics_manager.register
self.register_magics(m.AutoMagics, m.BasicMagics, m.CodeMagics,
m.ConfigMagics, m.DisplayMagics, m.ExecutionMagics,
m.ExtensionMagics, m.HistoryMagics, m.LoggingMagics,
m.NamespaceMagics, m.OSMagics, m.PackagingMagics,
m.PylabMagics, m.ScriptMagics,
)
self.register_magics(m.AsyncMagics)
# Register Magic Aliases
mman = self.magics_manager
# FIXME: magic aliases should be defined by the Magics classes
# or in MagicsManager, not here
mman.register_alias('ed', 'edit')
mman.register_alias('hist', 'history')
mman.register_alias('rep', 'recall')
mman.register_alias('SVG', 'svg', 'cell')
mman.register_alias('HTML', 'html', 'cell')
mman.register_alias('file', 'writefile', 'cell')
# FIXME: Move the color initialization to the DisplayHook, which
# should be split into a prompt manager and displayhook. We probably
# even need a centralize colors management object.
self.run_line_magic('colors', self.colors)
# Defined here so that it's included in the documentation
@functools.wraps(magic.MagicsManager.register_function)
def register_magic_function(self, func, magic_kind='line', magic_name=None):
self.magics_manager.register_function(
func, magic_kind=magic_kind, magic_name=magic_name
)
def _find_with_lazy_load(self, /, type_, magic_name: str):
"""
Try to find a magic potentially lazy-loading it.
Parameters
----------
type_: "line"|"cell"
the type of magics we are trying to find/lazy load.
magic_name: str
The name of the magic we are trying to find/lazy load
Note that this may have any side effects
"""
finder = {"line": self.find_line_magic, "cell": self.find_cell_magic}[type_]
fn = finder(magic_name)
if fn is not None:
return fn
lazy = self.magics_manager.lazy_magics.get(magic_name)
if lazy is None:
return None
self.run_line_magic("load_ext", lazy)
res = finder(magic_name)
return res
def run_line_magic(self, magic_name: str, line: str, _stack_depth=1):
"""Execute the given line magic.
Parameters
----------
magic_name : str
Name of the desired magic function, without '%' prefix.
line : str
The rest of the input line as a single string.
_stack_depth : int
If run_line_magic() is called from magic() then _stack_depth=2.
This is added to ensure backward compatibility for use of 'get_ipython().magic()'
"""
fn = self._find_with_lazy_load("line", magic_name)
if fn is None:
lazy = self.magics_manager.lazy_magics.get(magic_name)
if lazy:
self.run_line_magic("load_ext", lazy)
fn = self.find_line_magic(magic_name)
if fn is None:
cm = self.find_cell_magic(magic_name)
etpl = "Line magic function `%%%s` not found%s."
extra = '' if cm is None else (' (But cell magic `%%%%%s` exists, '
'did you mean that instead?)' % magic_name )
raise UsageError(etpl % (magic_name, extra))
else:
# Note: this is the distance in the stack to the user's frame.
# This will need to be updated if the internal calling logic gets
# refactored, or else we'll be expanding the wrong variables.
# Determine stack_depth depending on where run_line_magic() has been called
stack_depth = _stack_depth
if getattr(fn, magic.MAGIC_NO_VAR_EXPAND_ATTR, False):
# magic has opted out of var_expand
magic_arg_s = line
else:
magic_arg_s = self.var_expand(line, stack_depth)
# Put magic args in a list so we can call with f(*a) syntax
args = [magic_arg_s]
kwargs = {}
# Grab local namespace if we need it:
if getattr(fn, "needs_local_scope", False):
kwargs['local_ns'] = self.get_local_scope(stack_depth)
with self.builtin_trap:
result = fn(*args, **kwargs)
# The code below prevents the output from being displayed
# when using magics with decorator @output_can_be_silenced
# when the last Python token in the expression is a ';'.
if getattr(fn, magic.MAGIC_OUTPUT_CAN_BE_SILENCED, False):
if DisplayHook.semicolon_at_end_of_expression(magic_arg_s):
return None
return result
def get_local_scope(self, stack_depth):
"""Get local scope at given stack depth.
Parameters
----------
stack_depth : int
Depth relative to calling frame
"""
return sys._getframe(stack_depth + 1).f_locals
def run_cell_magic(self, magic_name, line, cell):
"""Execute the given cell magic.
Parameters
----------
magic_name : str
Name of the desired magic function, without '%' prefix.
line : str
The rest of the first input line as a single string.
cell : str
The body of the cell as a (possibly multiline) string.
"""
fn = self._find_with_lazy_load("cell", magic_name)
if fn is None:
lm = self.find_line_magic(magic_name)
etpl = "Cell magic `%%{0}` not found{1}."
extra = '' if lm is None else (' (But line magic `%{0}` exists, '
'did you mean that instead?)'.format(magic_name))
raise UsageError(etpl.format(magic_name, extra))
elif cell == '':
message = '%%{0} is a cell magic, but the cell body is empty.'.format(magic_name)
if self.find_line_magic(magic_name) is not None:
message += ' Did you mean the line magic %{0} (single %)?'.format(magic_name)
raise UsageError(message)
else:
# Note: this is the distance in the stack to the user's frame.
# This will need to be updated if the internal calling logic gets
# refactored, or else we'll be expanding the wrong variables.
stack_depth = 2
if getattr(fn, magic.MAGIC_NO_VAR_EXPAND_ATTR, False):
# magic has opted out of var_expand
magic_arg_s = line
else:
magic_arg_s = self.var_expand(line, stack_depth)
kwargs = {}
if getattr(fn, "needs_local_scope", False):
kwargs['local_ns'] = self.user_ns
with self.builtin_trap:
args = (magic_arg_s, cell)
result = fn(*args, **kwargs)
# The code below prevents the output from being displayed
# when using magics with decorator @output_can_be_silenced
# when the last Python token in the expression is a ';'.
if getattr(fn, magic.MAGIC_OUTPUT_CAN_BE_SILENCED, False):
if DisplayHook.semicolon_at_end_of_expression(cell):
return None
return result
def find_line_magic(self, magic_name):
"""Find and return a line magic by name.
Returns None if the magic isn't found."""
return self.magics_manager.magics['line'].get(magic_name)
def find_cell_magic(self, magic_name):
"""Find and return a cell magic by name.
Returns None if the magic isn't found."""
return self.magics_manager.magics['cell'].get(magic_name)
def find_magic(self, magic_name, magic_kind='line'):
"""Find and return a magic of the given type by name.
Returns None if the magic isn't found."""
return self.magics_manager.magics[magic_kind].get(magic_name)
#-------------------------------------------------------------------------
# Things related to macros
#-------------------------------------------------------------------------
def define_macro(self, name, themacro):
"""Define a new macro
Parameters
----------
name : str
The name of the macro.
themacro : str or Macro
The action to do upon invoking the macro. If a string, a new
Macro object is created by passing the string to it.
"""
from IPython.core import macro
if isinstance(themacro, str):
themacro = macro.Macro(themacro)
if not isinstance(themacro, macro.Macro):
raise ValueError('A macro must be a string or a Macro instance.')
self.user_ns[name] = themacro
#-------------------------------------------------------------------------
# Things related to the running of system commands
#-------------------------------------------------------------------------
def system_piped(self, cmd):
"""Call the given cmd in a subprocess, piping stdout/err
Parameters
----------
cmd : str
Command to execute (can not end in '&', as background processes are
not supported. Should not be a command that expects input
other than simple text.
"""
if cmd.rstrip().endswith('&'):
# this is *far* from a rigorous test
# We do not support backgrounding processes because we either use
# pexpect or pipes to read from. Users can always just call
# os.system() or use ip.system=ip.system_raw
# if they really want a background process.
raise OSError("Background processes not supported.")
# we explicitly do NOT return the subprocess status code, because
# a non-None value would trigger :func:`sys.displayhook` calls.
# Instead, we store the exit_code in user_ns.
self.user_ns['_exit_code'] = system(self.var_expand(cmd, depth=1))
def system_raw(self, cmd):
"""Call the given cmd in a subprocess using os.system on Windows or
subprocess.call using the system shell on other platforms.
Parameters
----------
cmd : str
Command to execute.
"""
cmd = self.var_expand(cmd, depth=1)
# warn if there is an IPython magic alternative.
if cmd == "":
main_cmd = ""
else:
main_cmd = cmd.split()[0]
has_magic_alternatives = ("pip", "conda", "cd")
if main_cmd in has_magic_alternatives:
warnings.warn(
(
"You executed the system command !{0} which may not work "
"as expected. Try the IPython magic %{0} instead."
).format(main_cmd)
)
# protect os.system from UNC paths on Windows, which it can't handle:
if sys.platform == 'win32':
from IPython.utils._process_win32 import AvoidUNCPath
with AvoidUNCPath() as path:
if path is not None:
cmd = '"pushd %s &&"%s' % (path, cmd)
try:
ec = os.system(cmd)
except KeyboardInterrupt:
print('\n' + self.get_exception_only(), file=sys.stderr)
ec = -2
else:
# For posix the result of the subprocess.call() below is an exit
# code, which by convention is zero for success, positive for
# program failure. Exit codes above 128 are reserved for signals,
# and the formula for converting a signal to an exit code is usually
# signal_number+128. To more easily differentiate between exit
# codes and signals, ipython uses negative numbers. For instance
# since control-c is signal 2 but exit code 130, ipython's
# _exit_code variable will read -2. Note that some shells like
# csh and fish don't follow sh/bash conventions for exit codes.
executable = os.environ.get('SHELL', None)
try:
# Use env shell instead of default /bin/sh
ec = subprocess.call(cmd, shell=True, executable=executable)
except KeyboardInterrupt:
# intercept control-C; a long traceback is not useful here
print('\n' + self.get_exception_only(), file=sys.stderr)
ec = 130
if ec > 128:
ec = -(ec - 128)
# We explicitly do NOT return the subprocess status code, because
# a non-None value would trigger :func:`sys.displayhook` calls.
# Instead, we store the exit_code in user_ns. Note the semantics
# of _exit_code: for control-c, _exit_code == -signal.SIGNIT,
# but raising SystemExit(_exit_code) will give status 254!
self.user_ns['_exit_code'] = ec
# use piped system by default, because it is better behaved
system = system_piped
def getoutput(self, cmd, split=True, depth=0):
"""Get output (possibly including stderr) from a subprocess.
Parameters
----------
cmd : str
Command to execute (can not end in '&', as background processes are
not supported.
split : bool, optional
If True, split the output into an IPython SList. Otherwise, an
IPython LSString is returned. These are objects similar to normal
lists and strings, with a few convenience attributes for easier
manipulation of line-based output. You can use '?' on them for
details.
depth : int, optional
How many frames above the caller are the local variables which should
be expanded in the command string? The default (0) assumes that the
expansion variables are in the stack frame calling this function.
"""
if cmd.rstrip().endswith('&'):
# this is *far* from a rigorous test
raise OSError("Background processes not supported.")
out = getoutput(self.var_expand(cmd, depth=depth+1))
if split:
out = SList(out.splitlines())
else:
out = LSString(out)
return out
#-------------------------------------------------------------------------
# Things related to aliases
#-------------------------------------------------------------------------
def init_alias(self):
self.alias_manager = AliasManager(shell=self, parent=self)
self.configurables.append(self.alias_manager)
#-------------------------------------------------------------------------
# Things related to extensions
#-------------------------------------------------------------------------
def init_extension_manager(self):
self.extension_manager = ExtensionManager(shell=self, parent=self)
self.configurables.append(self.extension_manager)
#-------------------------------------------------------------------------
# Things related to payloads
#-------------------------------------------------------------------------
def init_payload(self):
self.payload_manager = PayloadManager(parent=self)
self.configurables.append(self.payload_manager)
#-------------------------------------------------------------------------
# Things related to the prefilter
#-------------------------------------------------------------------------
def init_prefilter(self):
self.prefilter_manager = PrefilterManager(shell=self, parent=self)
self.configurables.append(self.prefilter_manager)
# Ultimately this will be refactored in the new interpreter code, but
# for now, we should expose the main prefilter method (there's legacy
# code out there that may rely on this).
self.prefilter = self.prefilter_manager.prefilter_lines
def auto_rewrite_input(self, cmd):
"""Print to the screen the rewritten form of the user's command.
This shows visual feedback by rewriting input lines that cause
automatic calling to kick in, like::
/f x
into::
------> f(x)
after the user's input prompt. This helps the user understand that the
input line was transformed automatically by IPython.
"""
if not self.show_rewritten_input:
return
# This is overridden in TerminalInteractiveShell to use fancy prompts
print("------> " + cmd)
#-------------------------------------------------------------------------
# Things related to extracting values/expressions from kernel and user_ns
#-------------------------------------------------------------------------
def _user_obj_error(self):
"""return simple exception dict
for use in user_expressions
"""
etype, evalue, tb = self._get_exc_info()
stb = self.InteractiveTB.get_exception_only(etype, evalue)
exc_info = {
"status": "error",
"traceback": stb,
"ename": etype.__name__,
"evalue": py3compat.safe_unicode(evalue),
}
return exc_info
def _format_user_obj(self, obj):
"""format a user object to display dict
for use in user_expressions
"""
data, md = self.display_formatter.format(obj)
value = {
'status' : 'ok',
'data' : data,
'metadata' : md,
}
return value
def user_expressions(self, expressions):
"""Evaluate a dict of expressions in the user's namespace.
Parameters
----------
expressions : dict
A dict with string keys and string values. The expression values
should be valid Python expressions, each of which will be evaluated
in the user namespace.
Returns
-------
A dict, keyed like the input expressions dict, with the rich mime-typed
display_data of each value.
"""
out = {}
user_ns = self.user_ns
global_ns = self.user_global_ns
for key, expr in expressions.items():
try:
value = self._format_user_obj(eval(expr, global_ns, user_ns))
except:
value = self._user_obj_error()
out[key] = value
return out
#-------------------------------------------------------------------------
# Things related to the running of code
#-------------------------------------------------------------------------
def ex(self, cmd):
"""Execute a normal python statement in user namespace."""
with self.builtin_trap:
exec(cmd, self.user_global_ns, self.user_ns)
def ev(self, expr):
"""Evaluate python expression expr in user namespace.
Returns the result of evaluation
"""
with self.builtin_trap:
return eval(expr, self.user_global_ns, self.user_ns)
def safe_execfile(self, fname, *where, exit_ignore=False, raise_exceptions=False, shell_futures=False):
"""A safe version of the builtin execfile().
This version will never throw an exception, but instead print
helpful error messages to the screen. This only works on pure
Python files with the .py extension.
Parameters
----------
fname : string
The name of the file to be executed.
*where : tuple
One or two namespaces, passed to execfile() as (globals,locals).
If only one is given, it is passed as both.
exit_ignore : bool (False)
If True, then silence SystemExit for non-zero status (it is always
silenced for zero status, as it is so common).
raise_exceptions : bool (False)
If True raise exceptions everywhere. Meant for testing.
shell_futures : bool (False)
If True, the code will share future statements with the interactive
shell. It will both be affected by previous __future__ imports, and
any __future__ imports in the code will affect the shell. If False,
__future__ imports are not shared in either direction.
"""
fname = Path(fname).expanduser().resolve()
# Make sure we can open the file
try:
with fname.open("rb"):
pass
except:
warn('Could not open file <%s> for safe execution.' % fname)
return
# Find things also in current directory. This is needed to mimic the
# behavior of running a script from the system command line, where
# Python inserts the script's directory into sys.path
dname = str(fname.parent)
with prepended_to_syspath(dname), self.builtin_trap:
try:
glob, loc = (where + (None, ))[:2]
py3compat.execfile(
fname, glob, loc,
self.compile if shell_futures else None)
except SystemExit as status:
# If the call was made with 0 or None exit status (sys.exit(0)
# or sys.exit() ), don't bother showing a traceback, as both of
# these are considered normal by the OS:
# > python -c'import sys;sys.exit(0)'; echo $?
# 0
# > python -c'import sys;sys.exit()'; echo $?
# 0
# For other exit status, we show the exception unless
# explicitly silenced, but only in short form.
if status.code:
if raise_exceptions:
raise
if not exit_ignore:
self.showtraceback(exception_only=True)
except:
if raise_exceptions:
raise
# tb offset is 2 because we wrap execfile
self.showtraceback(tb_offset=2)
def safe_execfile_ipy(self, fname, shell_futures=False, raise_exceptions=False):
"""Like safe_execfile, but for .ipy or .ipynb files with IPython syntax.
Parameters
----------
fname : str
The name of the file to execute. The filename must have a
.ipy or .ipynb extension.
shell_futures : bool (False)
If True, the code will share future statements with the interactive
shell. It will both be affected by previous __future__ imports, and
any __future__ imports in the code will affect the shell. If False,
__future__ imports are not shared in either direction.
raise_exceptions : bool (False)
If True raise exceptions everywhere. Meant for testing.
"""
fname = Path(fname).expanduser().resolve()
# Make sure we can open the file
try:
with fname.open("rb"):
pass
except:
warn('Could not open file <%s> for safe execution.' % fname)
return
# Find things also in current directory. This is needed to mimic the
# behavior of running a script from the system command line, where
# Python inserts the script's directory into sys.path
dname = str(fname.parent)
def get_cells():
"""generator for sequence of code blocks to run"""
if fname.suffix == ".ipynb":
from nbformat import read
nb = read(fname, as_version=4)
if not nb.cells:
return
for cell in nb.cells:
if cell.cell_type == 'code':
yield cell.source
else:
yield fname.read_text(encoding="utf-8")
with prepended_to_syspath(dname):
try:
for cell in get_cells():
result = self.run_cell(cell, silent=True, shell_futures=shell_futures)
if raise_exceptions:
result.raise_error()
elif not result.success:
break
except:
if raise_exceptions:
raise
self.showtraceback()
warn('Unknown failure executing file: <%s>' % fname)
def safe_run_module(self, mod_name, where):
"""A safe version of runpy.run_module().
This version will never throw an exception, but instead print
helpful error messages to the screen.
`SystemExit` exceptions with status code 0 or None are ignored.
Parameters
----------
mod_name : string
The name of the module to be executed.
where : dict
The globals namespace.
"""
try:
try:
where.update(
runpy.run_module(str(mod_name), run_name="__main__",
alter_sys=True)
)
except SystemExit as status:
if status.code:
raise
except:
self.showtraceback()
warn('Unknown failure executing module: <%s>' % mod_name)
@contextmanager
def _tee(self, channel: Literal["stdout", "stderr"]):
"""Capture output of a given standard stream and store it in history.
Uses patching of write method for maximal compatibility,
because ipykernel checks for instances of the stream class,
and stream classes in ipykernel implement more complex logic.
"""
stream = getattr(sys, channel)
original_write = stream.write
def write(data, *args, **kwargs):
"""Write data to both the original destination and the capture dictionary."""
result = original_write(data, *args, **kwargs)
if any(
[
self.display_pub.is_publishing,
self.displayhook.is_active,
self.showing_traceback,
]
):
return result
if not data:
return result
execution_count = self.execution_count
output_stream = None
outputs_by_counter = self.history_manager.outputs
output_type = "out_stream" if channel == "stdout" else "err_stream"
if execution_count in outputs_by_counter:
outputs = outputs_by_counter[execution_count]
if outputs[-1].output_type == output_type:
output_stream = outputs[-1]
if output_stream is None:
output_stream = HistoryOutput(
output_type=output_type, bundle={"stream": []}
)
outputs_by_counter[execution_count].append(output_stream)
output_stream.bundle["stream"].append(data) # Append to existing stream
return result
stream.write = write
yield
stream.write = original_write
def run_cell(
self,
raw_cell,
store_history=False,
silent=False,
shell_futures=True,
cell_id=None,
):
"""Run a complete IPython cell.
Parameters
----------
raw_cell : str
The code (including IPython code such as %magic functions) to run.
store_history : bool
If True, the raw and translated cell will be stored in IPython's
history. For user code calling back into IPython's machinery, this
should be set to False.
silent : bool
If True, avoid side-effects, such as implicit displayhooks and
and logging. silent=True forces store_history=False.
shell_futures : bool
If True, the code will share future statements with the interactive
shell. It will both be affected by previous __future__ imports, and
any __future__ imports in the code will affect the shell. If False,
__future__ imports are not shared in either direction.
cell_id : str, optional
A unique identifier for the cell. This is used in the messaging system
to match output with execution requests and for tracking cell execution
history across kernel restarts. In notebook contexts, this is typically
a UUID generated by the frontend. If None, the kernel may generate an
internal identifier or proceed without cell tracking capabilities.
Returns
-------
result : :class:`ExecutionResult`
"""
result = None
with self._tee(channel="stdout"), self._tee(channel="stderr"):
try:
result = self._run_cell(
raw_cell, store_history, silent, shell_futures, cell_id
)
finally:
self.events.trigger("post_execute")
if not silent:
self.events.trigger("post_run_cell", result)
return result
def _run_cell(
self,
raw_cell: str,
store_history: bool,
silent: bool,
shell_futures: bool,
cell_id: str,
) -> ExecutionResult:
"""Internal method to run a complete IPython cell."""
# we need to avoid calling self.transform_cell multiple time on the same thing
# so we need to store some results:
preprocessing_exc_tuple = None
try:
transformed_cell = self.transform_cell(raw_cell)
except Exception:
transformed_cell = raw_cell
preprocessing_exc_tuple = sys.exc_info()
assert transformed_cell is not None
coro = self.run_cell_async(
raw_cell,
store_history=store_history,
silent=silent,
shell_futures=shell_futures,
transformed_cell=transformed_cell,
preprocessing_exc_tuple=preprocessing_exc_tuple,
cell_id=cell_id,
)
# run_cell_async is async, but may not actually need an eventloop.
# when this is the case, we want to run it using the pseudo_sync_runner
# so that code can invoke eventloops (for example via the %run , and
# `%paste` magic.
if self.trio_runner:
runner = self.trio_runner
elif self.should_run_async(
raw_cell,
transformed_cell=transformed_cell,
preprocessing_exc_tuple=preprocessing_exc_tuple,
):
runner = self.loop_runner
else:
runner = _pseudo_sync_runner
try:
result = runner(coro)
except BaseException as e:
try:
info = ExecutionInfo(
raw_cell,
store_history,
silent,
shell_futures,
cell_id,
transformed_cell=transformed_cell,
)
result = ExecutionResult(info)
result.error_in_exec = e
self.showtraceback(running_compiled_code=True)
except:
pass
return result
def should_run_async(
self, raw_cell: str, *, transformed_cell=None, preprocessing_exc_tuple=None
) -> bool:
"""Return whether a cell should be run asynchronously via a coroutine runner
Parameters
----------
raw_cell : str
The code to be executed
Returns
-------
result: bool
Whether the code needs to be run with a coroutine runner or not
.. versionadded:: 7.0
"""
if not self.autoawait:
return False
if preprocessing_exc_tuple is not None:
return False
assert preprocessing_exc_tuple is None
if transformed_cell is None:
warnings.warn(
"`should_run_async` will not call `transform_cell`"
" automatically in the future. Please pass the result to"
" `transformed_cell` argument and any exception that happen"
" during the"
"transform in `preprocessing_exc_tuple` in"
" IPython 7.17 and above.",
DeprecationWarning,
stacklevel=2,
)
try:
cell = self.transform_cell(raw_cell)
except Exception:
# any exception during transform will be raised
# prior to execution
return False
else:
cell = transformed_cell
return _should_be_async(cell)
async def run_cell_async(
self,
raw_cell: str,
store_history=False,
silent=False,
shell_futures=True,
*,
transformed_cell: Optional[str] = None,
preprocessing_exc_tuple: Optional[AnyType] = None,
cell_id=None,
) -> ExecutionResult:
"""Run a complete IPython cell asynchronously.
Parameters
----------
raw_cell : str
The code (including IPython code such as %magic functions) to run.
store_history : bool
If True, the raw and translated cell will be stored in IPython's
history. For user code calling back into IPython's machinery, this
should be set to False.
silent : bool
If True, avoid side-effects, such as implicit displayhooks and
and logging. silent=True forces store_history=False.
shell_futures : bool
If True, the code will share future statements with the interactive
shell. It will both be affected by previous __future__ imports, and
any __future__ imports in the code will affect the shell. If False,
__future__ imports are not shared in either direction.
transformed_cell: str
cell that was passed through transformers
preprocessing_exc_tuple:
trace if the transformation failed.
Returns
-------
result : :class:`ExecutionResult`
.. versionadded:: 7.0
"""
info = ExecutionInfo(
raw_cell,
store_history,
silent,
shell_futures,
cell_id,
transformed_cell=transformed_cell,
)
result = ExecutionResult(info)
if (not raw_cell) or raw_cell.isspace():
self.last_execution_succeeded = True
self.last_execution_result = result
return result
if silent:
store_history = False
if store_history:
result.execution_count = self.execution_count
def error_before_exec(value):
if store_history:
if self.history_manager:
# Store formatted traceback and error details
self.history_manager.exceptions[
self.execution_count
] = self._format_exception_for_storage(value)
self.execution_count += 1
result.error_before_exec = value
self.last_execution_succeeded = False
self.last_execution_result = result
return result
self.events.trigger('pre_execute')
if not silent:
self.events.trigger('pre_run_cell', info)
if transformed_cell is None:
warnings.warn(
"`run_cell_async` will not call `transform_cell`"
" automatically in the future. Please pass the result to"
" `transformed_cell` argument and any exception that happen"
" during the"
"transform in `preprocessing_exc_tuple` in"
" IPython 7.17 and above.",
DeprecationWarning,
stacklevel=2,
)
# If any of our input transformation (input_transformer_manager or
# prefilter_manager) raises an exception, we store it in this variable
# so that we can display the error after logging the input and storing
# it in the history.
try:
cell = self.transform_cell(raw_cell)
except Exception:
preprocessing_exc_tuple = sys.exc_info()
cell = raw_cell # cell has to exist so it can be stored/logged
else:
preprocessing_exc_tuple = None
else:
if preprocessing_exc_tuple is None:
cell = transformed_cell
else:
cell = raw_cell
# Do NOT store paste/cpaste magic history
if "get_ipython().run_line_magic(" in cell and "paste" in cell:
store_history = False
# Store raw and processed history
if store_history:
assert self.history_manager is not None
self.history_manager.store_inputs(self.execution_count, cell, raw_cell)
if not silent:
self.logger.log(cell, raw_cell)
# Display the exception if input processing failed.
if preprocessing_exc_tuple is not None:
self.showtraceback(preprocessing_exc_tuple)
if store_history:
self.execution_count += 1
return error_before_exec(preprocessing_exc_tuple[1])
# Our own compiler remembers the __future__ environment. If we want to
# run code with a separate __future__ environment, use the default
# compiler
compiler = self.compile if shell_futures else self.compiler_class()
with self.builtin_trap:
cell_name = compiler.cache(cell, self.execution_count, raw_code=raw_cell)
with self.display_trap:
# Compile to bytecode
try:
code_ast = compiler.ast_parse(cell, filename=cell_name)
except self.custom_exceptions as e:
etype, value, tb = sys.exc_info()
self.CustomTB(etype, value, tb)
return error_before_exec(e)
except IndentationError as e:
self.showindentationerror()
return error_before_exec(e)
except (OverflowError, SyntaxError, ValueError, TypeError,
MemoryError) as e:
self.showsyntaxerror()
return error_before_exec(e)
# Apply AST transformations
try:
code_ast = self.transform_ast(code_ast)
except InputRejected as e:
self.showtraceback()
return error_before_exec(e)
# Give the displayhook a reference to our ExecutionResult so it
# can fill in the output value.
self.displayhook.exec_result = result
# Execute the user code
interactivity = "none" if silent else self.ast_node_interactivity
has_raised = await self.run_ast_nodes(code_ast.body, cell_name,
interactivity=interactivity, compiler=compiler, result=result)
self.last_execution_succeeded = not has_raised
self.last_execution_result = result
# Reset this so later displayed values do not modify the
# ExecutionResult
self.displayhook.exec_result = None
if store_history:
assert self.history_manager is not None
# Write output to the database. Does nothing unless
# history output logging is enabled.
self.history_manager.store_output(self.execution_count)
exec_count = self.execution_count
if result.error_in_exec:
# Store formatted traceback and error details
self.history_manager.exceptions[
exec_count
] = self._format_exception_for_storage(result.error_in_exec)
# Each cell is a *single* input, regardless of how many lines it has
self.execution_count += 1
return result
def _format_exception_for_storage(
self, exception, filename=None, running_compiled_code=False
):
"""
Format an exception's traceback and details for storage, with special handling
for different types of errors.
"""
etype = type(exception)
evalue = exception
tb = exception.__traceback__
# Handle SyntaxError and IndentationError with specific formatting
if issubclass(etype, (SyntaxError, IndentationError)):
if filename and isinstance(evalue, SyntaxError):
try:
evalue.filename = filename
except:
pass # Keep the original filename if modification fails
# Extract traceback if the error happened during compiled code execution
elist = traceback.extract_tb(tb) if running_compiled_code else []
stb = self.SyntaxTB.structured_traceback(etype, evalue, elist)
# Handle UsageError with a simple message
elif etype is UsageError:
stb = [f"UsageError: {evalue}"]
else:
# Check if the exception (or its context) is an ExceptionGroup.
def contains_exceptiongroup(val):
if val is None:
return False
return isinstance(val, BaseExceptionGroup) or contains_exceptiongroup(
val.__context__
)
if contains_exceptiongroup(evalue):
# Fallback: use the standard library's formatting for exception groups.
stb = traceback.format_exception(etype, evalue, tb)
else:
try:
# If the exception has a custom traceback renderer, use it.
if hasattr(evalue, "_render_traceback_"):
stb = evalue._render_traceback_()
else:
# Otherwise, use InteractiveTB to format the traceback.
stb = self.InteractiveTB.structured_traceback(
etype, evalue, tb, tb_offset=1
)
except Exception:
# In case formatting fails, fallback to Python's built-in formatting.
stb = traceback.format_exception(etype, evalue, tb)
return {"ename": etype.__name__, "evalue": str(evalue), "traceback": stb}
def transform_cell(self, raw_cell):
"""Transform an input cell before parsing it.
Static transformations, implemented in IPython.core.inputtransformer2,
deal with things like ``%magic`` and ``!system`` commands.
These run on all input.
Dynamic transformations, for things like unescaped magics and the exit
autocall, depend on the state of the interpreter.
These only apply to single line inputs.
These string-based transformations are followed by AST transformations;
see :meth:`transform_ast`.
"""
# Static input transformations
cell = self.input_transformer_manager.transform_cell(raw_cell)
if len(cell.splitlines()) == 1:
# Dynamic transformations - only applied for single line commands
with self.builtin_trap:
# use prefilter_lines to handle trailing newlines
# restore trailing newline for ast.parse
cell = self.prefilter_manager.prefilter_lines(cell) + '\n'
lines = cell.splitlines(keepends=True)
for transform in self.input_transformers_post:
lines = transform(lines)
cell = ''.join(lines)
return cell
def transform_ast(self, node):
"""Apply the AST transformations from self.ast_transformers
Parameters
----------
node : ast.Node
The root node to be transformed. Typically called with the ast.Module
produced by parsing user input.
Returns
-------
An ast.Node corresponding to the node it was called with. Note that it
may also modify the passed object, so don't rely on references to the
original AST.
"""
for transformer in self.ast_transformers:
try:
node = transformer.visit(node)
except InputRejected:
# User-supplied AST transformers can reject an input by raising
# an InputRejected. Short-circuit in this case so that we
# don't unregister the transform.
raise
except Exception as e:
warn(
"AST transformer %r threw an error. It will be unregistered. %s"
% (transformer, e)
)
self.ast_transformers.remove(transformer)
if self.ast_transformers:
ast.fix_missing_locations(node)
return node
async def run_ast_nodes(
self,
nodelist: ListType[stmt],
cell_name: str,
interactivity="last_expr",
compiler=compile,
result=None,
):
"""Run a sequence of AST nodes. The execution mode depends on the
interactivity parameter.
Parameters
----------
nodelist : list
A sequence of AST nodes to run.
cell_name : str
Will be passed to the compiler as the filename of the cell. Typically
the value returned by ip.compile.cache(cell).
interactivity : str
'all', 'last', 'last_expr' , 'last_expr_or_assign' or 'none',
specifying which nodes should be run interactively (displaying output
from expressions). 'last_expr' will run the last node interactively
only if it is an expression (i.e. expressions in loops or other blocks
are not displayed) 'last_expr_or_assign' will run the last expression
or the last assignment. Other values for this parameter will raise a
ValueError.
compiler : callable
A function with the same interface as the built-in compile(), to turn
the AST nodes into code objects. Default is the built-in compile().
result : ExecutionResult, optional
An object to store exceptions that occur during execution.
Returns
-------
True if an exception occurred while running code, False if it finished
running.
"""
if not nodelist:
return
if interactivity == 'last_expr_or_assign':
if isinstance(nodelist[-1], _assign_nodes):
asg = nodelist[-1]
if isinstance(asg, ast.Assign) and len(asg.targets) == 1:
target = asg.targets[0]
elif isinstance(asg, _single_targets_nodes):
target = asg.target
else:
target = None
if isinstance(target, ast.Name):
nnode = ast.Expr(ast.Name(target.id, ast.Load()))
ast.fix_missing_locations(nnode)
nodelist.append(nnode)
interactivity = 'last_expr'
_async = False
if interactivity == 'last_expr':
if isinstance(nodelist[-1], ast.Expr):
interactivity = "last"
else:
interactivity = "none"
if interactivity == 'none':
to_run_exec, to_run_interactive = nodelist, []
elif interactivity == 'last':
to_run_exec, to_run_interactive = nodelist[:-1], nodelist[-1:]
elif interactivity == 'all':
to_run_exec, to_run_interactive = [], nodelist
else:
raise ValueError("Interactivity was %r" % interactivity)
try:
def compare(code):
is_async = inspect.CO_COROUTINE & code.co_flags == inspect.CO_COROUTINE
return is_async
# refactor that to just change the mod constructor.
to_run = []
for node in to_run_exec:
to_run.append((node, "exec"))
for node in to_run_interactive:
to_run.append((node, "single"))
for node, mode in to_run:
if mode == "exec":
mod = Module([node], [])
elif mode == "single":
mod = ast.Interactive([node])
with compiler.extra_flags(
getattr(ast, "PyCF_ALLOW_TOP_LEVEL_AWAIT", 0x0)
if self.autoawait
else 0x0
):
code = compiler(mod, cell_name, mode)
asy = compare(code)
if await self.run_code(code, result, async_=asy):
return True
# Flush softspace
if softspace(sys.stdout, 0):
print()
except:
# It's possible to have exceptions raised here, typically by
# compilation of odd code (such as a naked 'return' outside a
# function) that did parse but isn't valid. Typically the exception
# is a SyntaxError, but it's safest just to catch anything and show
# the user a traceback.
# We do only one try/except outside the loop to minimize the impact
# on runtime, and also because if any node in the node list is
# broken, we should stop execution completely.
if result:
result.error_before_exec = sys.exc_info()[1]
self.showtraceback()
return True
return False
async def run_code(self, code_obj, result=None, *, async_=False):
"""Execute a code object.
When an exception occurs, self.showtraceback() is called to display a
traceback.
Parameters
----------
code_obj : code object
A compiled code object, to be executed
result : ExecutionResult, optional
An object to store exceptions that occur during execution.
async_ : Bool (Experimental)
Attempt to run top-level asynchronous code in a default loop.
Returns
-------
False : successful execution.
True : an error occurred.
"""
# special value to say that anything above is IPython and should be
# hidden.
__tracebackhide__ = "__ipython_bottom__"
# Set our own excepthook in case the user code tries to call it
# directly, so that the IPython crash handler doesn't get triggered
old_excepthook, sys.excepthook = sys.excepthook, self.excepthook
# we save the original sys.excepthook in the instance, in case config
# code (such as magics) needs access to it.
self.sys_excepthook = old_excepthook
outflag = True # happens in more places, so it's easier as default
try:
try:
if async_:
await eval(code_obj, self.user_global_ns, self.user_ns)
else:
exec(code_obj, self.user_global_ns, self.user_ns)
finally:
# Reset our crash handler in place
sys.excepthook = old_excepthook
except SystemExit as e:
if result is not None:
result.error_in_exec = e
self.showtraceback(exception_only=True)
warn("To exit: use 'exit', 'quit', or Ctrl-D.", stacklevel=1)
except bdb.BdbQuit:
etype, value, tb = sys.exc_info()
if result is not None:
result.error_in_exec = value
# the BdbQuit stops here
except self.custom_exceptions:
etype, value, tb = sys.exc_info()
if result is not None:
result.error_in_exec = value
self.CustomTB(etype, value, tb)
except:
if result is not None:
result.error_in_exec = sys.exc_info()[1]
self.showtraceback(running_compiled_code=True)
else:
outflag = False
return outflag
# For backwards compatibility
runcode = run_code
def check_complete(self, code: str) -> Tuple[str, str]:
"""Return whether a block of code is ready to execute, or should be continued
Parameters
----------
code : string
Python input code, which can be multiline.
Returns
-------
status : str
One of 'complete', 'incomplete', or 'invalid' if source is not a
prefix of valid code.
indent : str
When status is 'incomplete', this is some whitespace to insert on
the next line of the prompt.
"""
status, nspaces = self.input_transformer_manager.check_complete(code)
return status, ' ' * (nspaces or 0)
#-------------------------------------------------------------------------
# Things related to GUI support and pylab
#-------------------------------------------------------------------------
active_eventloop: Optional[str] = None
def enable_gui(self, gui=None):
raise NotImplementedError('Implement enable_gui in a subclass')
def enable_matplotlib(self, gui=None):
"""Enable interactive matplotlib and inline figure support.
This takes the following steps:
1. select the appropriate eventloop and matplotlib backend
2. set up matplotlib for interactive use with that backend
3. configure formatters for inline figure display
4. enable the selected gui eventloop
Parameters
----------
gui : optional, string
If given, dictates the choice of matplotlib GUI backend to use
(should be one of IPython's supported backends, 'qt', 'osx', 'tk',
'gtk', 'wx' or 'inline'), otherwise we use the default chosen by
matplotlib (as dictated by the matplotlib build-time options plus the
user's matplotlibrc configuration file). Note that not all backends
make sense in all contexts, for example a terminal ipython can't
display figures inline.
"""
from .pylabtools import _matplotlib_manages_backends
if not _matplotlib_manages_backends() and gui in (None, "auto"):
# Early import of backend_inline required for its side effect of
# calling _enable_matplotlib_integration()
import matplotlib_inline.backend_inline
from IPython.core import pylabtools as pt
gui, backend = pt.find_gui_and_backend(gui, self.pylab_gui_select)
if gui != None:
# If we have our first gui selection, store it
if self.pylab_gui_select is None:
self.pylab_gui_select = gui
# Otherwise if they are different
elif gui != self.pylab_gui_select:
print('Warning: Cannot change to a different GUI toolkit: %s.'
' Using %s instead.' % (gui, self.pylab_gui_select))
gui, backend = pt.find_gui_and_backend(self.pylab_gui_select)
pt.activate_matplotlib(backend)
from matplotlib_inline.backend_inline import configure_inline_support
configure_inline_support(self, backend)
# Now we must activate the gui pylab wants to use, and fix %run to take
# plot updates into account
self.enable_gui(gui)
self.magics_manager.registry['ExecutionMagics'].default_runner = \
pt.mpl_runner(self.safe_execfile)
return gui, backend
def enable_pylab(self, gui=None, import_all=True):
"""Activate pylab support at runtime.
This turns on support for matplotlib, preloads into the interactive
namespace all of numpy and pylab, and configures IPython to correctly
interact with the GUI event loop. The GUI backend to be used can be
optionally selected with the optional ``gui`` argument.
This method only adds preloading the namespace to InteractiveShell.enable_matplotlib.
Parameters
----------
gui : optional, string
If given, dictates the choice of matplotlib GUI backend to use
(should be one of IPython's supported backends, 'qt', 'osx', 'tk',
'gtk', 'wx' or 'inline'), otherwise we use the default chosen by
matplotlib (as dictated by the matplotlib build-time options plus the
user's matplotlibrc configuration file). Note that not all backends
make sense in all contexts, for example a terminal ipython can't
display figures inline.
import_all : optional, bool, default: True
Whether to do `from numpy import *` and `from pylab import *`
in addition to module imports.
"""
from IPython.core.pylabtools import import_pylab
gui, backend = self.enable_matplotlib(gui)
# We want to prevent the loading of pylab to pollute the user's
# namespace as shown by the %who* magics, so we execute the activation
# code in an empty namespace, and we update *both* user_ns and
# user_ns_hidden with this information.
ns = {}
import_pylab(ns, import_all)
# warn about clobbered names
ignored = {"__builtins__"}
both = set(ns).intersection(self.user_ns).difference(ignored)
clobbered = [ name for name in both if self.user_ns[name] is not ns[name] ]
self.user_ns.update(ns)
self.user_ns_hidden.update(ns)
return gui, backend, clobbered
#-------------------------------------------------------------------------
# Utilities
#-------------------------------------------------------------------------
def var_expand(self, cmd, depth=0, formatter=DollarFormatter()):
"""Expand python variables in a string.
The depth argument indicates how many frames above the caller should
be walked to look for the local namespace where to expand variables.
The global namespace for expansion is always the user's interactive
namespace.
"""
ns = self.user_ns.copy()
try:
frame = sys._getframe(depth+1)
except ValueError:
# This is thrown if there aren't that many frames on the stack,
# e.g. if a script called run_line_magic() directly.
pass
else:
ns.update(frame.f_locals)
try:
# We have to use .vformat() here, because 'self' is a valid and common
# name, and expanding **ns for .format() would make it collide with
# the 'self' argument of the method.
cmd = formatter.vformat(cmd, args=[], kwargs=ns)
except Exception:
# if formatter couldn't format, just let it go untransformed
pass
return cmd
def mktempfile(self, data=None, prefix='ipython_edit_'):
"""Make a new tempfile and return its filename.
This makes a call to tempfile.mkstemp (created in a tempfile.mkdtemp),
but it registers the created filename internally so ipython cleans it up
at exit time.
Optional inputs:
- data(None): if data is given, it gets written out to the temp file
immediately, and the file is closed again."""
dir_path = Path(tempfile.mkdtemp(prefix=prefix))
self.tempdirs.append(dir_path)
handle, filename = tempfile.mkstemp(".py", prefix, dir=str(dir_path))
os.close(handle) # On Windows, there can only be one open handle on a file
file_path = Path(filename)
self.tempfiles.append(file_path)
if data:
file_path.write_text(data, encoding="utf-8")
return filename
def ask_yes_no(self, prompt, default=None, interrupt=None):
if self.quiet:
return True
return ask_yes_no(prompt,default,interrupt)
def show_usage(self):
"""Show a usage message"""
page.page(IPython.core.usage.interactive_usage)
def extract_input_lines(self, range_str, raw=False):
"""Return as a string a set of input history slices.
Parameters
----------
range_str : str
The set of slices is given as a string, like "~5/6-~4/2 4:8 9",
since this function is for use by magic functions which get their
arguments as strings. The number before the / is the session
number: ~n goes n back from the current session.
If empty string is given, returns history of current session
without the last input.
raw : bool, optional
By default, the processed input is used. If this is true, the raw
input history is used instead.
Notes
-----
Slices can be described with two notations:
* ``N:M`` -> standard python form, means including items N...(M-1).
* ``N-M`` -> include items N..M (closed endpoint).
"""
lines = self.history_manager.get_range_by_str(range_str, raw=raw)
text = "\n".join(x for _, _, x in lines)
# Skip the last line, as it's probably the magic that called this
if not range_str:
if "\n" not in text:
text = ""
else:
text = text[: text.rfind("\n")]
return text
def find_user_code(self, target, raw=True, py_only=False, skip_encoding_cookie=True, search_ns=False):
"""Get a code string from history, file, url, or a string or macro.
This is mainly used by magic functions.
Parameters
----------
target : str
A string specifying code to retrieve. This will be tried respectively
as: ranges of input history (see %history for syntax), url,
corresponding .py file, filename, or an expression evaluating to a
string or Macro in the user namespace.
If empty string is given, returns complete history of current
session, without the last line.
raw : bool
If true (default), retrieve raw history. Has no effect on the other
retrieval mechanisms.
py_only : bool (default False)
Only try to fetch python code, do not try alternative methods to decode file
if unicode fails.
Returns
-------
A string of code.
ValueError is raised if nothing is found, and TypeError if it evaluates
to an object of another type. In each case, .args[0] is a printable
message.
"""
code = self.extract_input_lines(target, raw=raw) # Grab history
if code:
return code
try:
if target.startswith(('http://', 'https://')):
return openpy.read_py_url(target, skip_encoding_cookie=skip_encoding_cookie)
except UnicodeDecodeError as e:
if not py_only :
# Deferred import
from urllib.request import urlopen
response = urlopen(target)
return response.read().decode('latin1')
raise ValueError(("'%s' seem to be unreadable.") % target) from e
potential_target = [target]
try :
potential_target.insert(0,get_py_filename(target))
except IOError:
pass
for tgt in potential_target :
if os.path.isfile(tgt): # Read file
try :
return openpy.read_py_file(tgt, skip_encoding_cookie=skip_encoding_cookie)
except UnicodeDecodeError as e:
if not py_only :
with io_open(tgt,'r', encoding='latin1') as f :
return f.read()
raise ValueError(("'%s' seem to be unreadable.") % target) from e
elif os.path.isdir(os.path.expanduser(tgt)):
raise ValueError("'%s' is a directory, not a regular file." % target)
if search_ns:
# Inspect namespace to load object source
object_info = self.object_inspect(target, detail_level=1)
if object_info['found'] and object_info['source']:
return object_info['source']
try: # User namespace
codeobj = eval(target, self.user_ns)
except Exception as e:
raise ValueError(("'%s' was not found in history, as a file, url, "
"nor in the user namespace.") % target) from e
if isinstance(codeobj, str):
return codeobj
elif isinstance(codeobj, Macro):
return codeobj.value
raise TypeError("%s is neither a string nor a macro." % target,
codeobj)
def _atexit_once(self):
"""
At exist operation that need to be called at most once.
Second call to this function per instance will do nothing.
"""
if not getattr(self, "_atexit_once_called", False):
self._atexit_once_called = True
# Clear all user namespaces to release all references cleanly.
self.reset(new_session=False)
# Close the history session (this stores the end time and line count)
# this must be *before* the tempfile cleanup, in case of temporary
# history db
if self.history_manager is not None:
self.history_manager.end_session()
self.history_manager = None
#-------------------------------------------------------------------------
# Things related to IPython exiting
#-------------------------------------------------------------------------
def atexit_operations(self):
"""This will be executed at the time of exit.
Cleanup operations and saving of persistent data that is done
unconditionally by IPython should be performed here.
For things that may depend on startup flags or platform specifics (such
as having readline or not), register a separate atexit function in the
code that has the appropriate information, rather than trying to
clutter
"""
self._atexit_once()
# Cleanup all tempfiles and folders left around
for tfile in self.tempfiles:
try:
tfile.unlink()
self.tempfiles.remove(tfile)
except FileNotFoundError:
pass
del self.tempfiles
for tdir in self.tempdirs:
try:
shutil.rmtree(tdir)
self.tempdirs.remove(tdir)
except FileNotFoundError:
pass
del self.tempdirs
# Restore user's cursor
if hasattr(self, "editing_mode") and self.editing_mode == "vi":
sys.stdout.write("\x1b[0 q")
sys.stdout.flush()
def cleanup(self):
self.restore_sys_module_state()
# Overridden in terminal subclass to change prompts
def switch_doctest_mode(self, mode):
pass
| InteractiveShell |
python | gevent__gevent | src/gevent/tests/test__makefile_ref.py | {
"start": 4821,
"end": 8653
} | class ____(Test):
def test_simple_close(self):
with Closing() as closer:
s = closer(self.make_open_socket())
fileno = s.fileno()
s.close()
self.assert_closed(s, fileno)
def test_makefile1(self):
with Closing() as closer:
s = closer(self.make_open_socket())
fileno = s.fileno()
f = closer(s.makefile())
self.assert_open(s, fileno)
# Under python 2, this closes socket wrapper object but not the file descriptor;
# under python 3, both stay open
s.close()
if PY3:
self.assert_open(s, fileno)
else:
self.assert_closed(s)
self.assert_open(fileno)
f.close()
self.assert_closed(s)
self.assert_closed(fileno)
def test_makefile2(self):
with Closing() as closer:
s = closer(self.make_open_socket())
fileno = s.fileno()
self.assert_open(s, fileno)
f = closer(s.makefile())
self.assert_open(s)
self.assert_open(s, fileno)
f.close()
# closing fileobject does not close the socket
self.assert_open(s, fileno)
s.close()
self.assert_closed(s, fileno)
def test_server_simple(self):
with Closing() as closer:
listener = closer(tcp_listener(backlog=1))
port = listener.getsockname()[1]
connector = closer(socket.socket())
def connect():
connector.connect((DEFAULT_CONNECT, port))
closer.running_task(threading.Thread(target=connect))
client_socket = closer.accept(listener)
fileno = client_socket.fileno()
self.assert_open(client_socket, fileno)
client_socket.close()
self.assert_closed(client_socket)
def test_server_makefile1(self):
with Closing() as closer:
listener = closer(tcp_listener(backlog=1))
port = listener.getsockname()[1]
connector = closer(socket.socket())
def connect():
connector.connect((DEFAULT_CONNECT, port))
closer.running_task(threading.Thread(target=connect))
client_socket = closer.accept(listener)
fileno = client_socket.fileno()
f = closer(client_socket.makefile())
self.assert_open(client_socket, fileno)
client_socket.close()
# Under python 2, this closes socket wrapper object but not the file descriptor;
# under python 3, both stay open
if PY3:
self.assert_open(client_socket, fileno)
else:
self.assert_closed(client_socket)
self.assert_open(fileno)
f.close()
self.assert_closed(client_socket, fileno)
def test_server_makefile2(self):
with Closing() as closer:
listener = closer(tcp_listener(backlog=1))
port = listener.getsockname()[1]
connector = closer(socket.socket())
def connect():
connector.connect((DEFAULT_CONNECT, port))
closer.running_task(threading.Thread(target=connect))
client_socket = closer.accept(listener)
fileno = client_socket.fileno()
f = closer(client_socket.makefile())
self.assert_open(client_socket, fileno)
# closing fileobject does not close the socket
f.close()
self.assert_open(client_socket, fileno)
client_socket.close()
self.assert_closed(client_socket, fileno)
@greentest.skipOnAppVeyor("This sometimes times out for no apparent reason.")
| TestSocket |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_base64.py | {
"start": 1647,
"end": 3893
} | class ____(ColumnMapExpectation):
"""Expect column values to be valid base64 codes."""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"well_formed_base64": [
"NDI=",
"SGVsbG8gd29ybGQh",
"R3JlYXQgRXhwZWN0YXRpb25z",
"TWFueSBoYW5kcyBtYWtlIGxpZ2h0IHdvcmsu",
],
"malformed_base64": [
"NDI",
"=NDI",
"SGVsbG8gd29ybGQhabc",
"This is not a valid base64",
],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "well_formed_base64"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "malformed_base64"},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_base64"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental",
"tags": ["experimental", "hackathon", "typed-entities"],
"contributors": [
"@voidforall",
],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidBase64().print_diagnostic_checklist()
| ExpectColumnValuesToBeValidBase64 |
python | pypa__pip | src/pip/_internal/models/link.py | {
"start": 3012,
"end": 6634
} | class ____:
"""Information about a core metadata file associated with a distribution."""
hashes: dict[str, str] | None
def __post_init__(self) -> None:
if self.hashes is not None:
assert all(name in _SUPPORTED_HASHES for name in self.hashes)
def supported_hashes(hashes: dict[str, str] | None) -> dict[str, str] | None:
# Remove any unsupported hash types from the mapping. If this leaves no
# supported hashes, return None
if hashes is None:
return None
hashes = {n: v for n, v in hashes.items() if n in _SUPPORTED_HASHES}
if not hashes:
return None
return hashes
def _clean_url_path_part(part: str) -> str:
"""
Clean a "part" of a URL path (i.e. after splitting on "@" characters).
"""
# We unquote prior to quoting to make sure nothing is double quoted.
return urllib.parse.quote(urllib.parse.unquote(part))
def _clean_file_url_path(part: str) -> str:
"""
Clean the first part of a URL path that corresponds to a local
filesystem path (i.e. the first part after splitting on "@" characters).
"""
# We unquote prior to quoting to make sure nothing is double quoted.
# Also, on Windows the path part might contain a drive letter which
# should not be quoted. On Linux where drive letters do not
# exist, the colon should be quoted. We rely on urllib.request
# to do the right thing here.
ret = urllib.request.pathname2url(urllib.request.url2pathname(part))
if ret.startswith("///"):
# Remove any URL authority section, leaving only the URL path.
ret = ret.removeprefix("//")
return ret
# percent-encoded: /
_reserved_chars_re = re.compile("(@|%2F)", re.IGNORECASE)
def _clean_url_path(path: str, is_local_path: bool) -> str:
"""
Clean the path portion of a URL.
"""
if is_local_path:
clean_func = _clean_file_url_path
else:
clean_func = _clean_url_path_part
# Split on the reserved characters prior to cleaning so that
# revision strings in VCS URLs are properly preserved.
parts = _reserved_chars_re.split(path)
cleaned_parts = []
for to_clean, reserved in pairwise(itertools.chain(parts, [""])):
cleaned_parts.append(clean_func(to_clean))
# Normalize %xx escapes (e.g. %2f -> %2F)
cleaned_parts.append(reserved.upper())
return "".join(cleaned_parts)
def _ensure_quoted_url(url: str) -> str:
"""
Make sure a link is fully quoted.
For example, if ' ' occurs in the URL, it will be replaced with "%20",
and without double-quoting other characters.
"""
# Split the URL into parts according to the general structure
# `scheme://netloc/path?query#fragment`.
result = urllib.parse.urlsplit(url)
# If the netloc is empty, then the URL refers to a local filesystem path.
is_local_path = not result.netloc
path = _clean_url_path(result.path, is_local_path=is_local_path)
# Temporarily replace scheme with file to ensure the URL generated by
# urlunsplit() contains an empty netloc (file://) as per RFC 1738.
ret = urllib.parse.urlunsplit(result._replace(scheme="file", path=path))
ret = result.scheme + ret[4:] # Restore original scheme.
return ret
def _absolute_link_url(base_url: str, url: str) -> str:
"""
A faster implementation of urllib.parse.urljoin with a shortcut
for absolute http/https URLs.
"""
if url.startswith(("https://", "http://")):
return url
else:
return urllib.parse.urljoin(base_url, url)
@functools.total_ordering
| MetadataFile |
python | spack__spack | lib/spack/spack/error.py | {
"start": 2946,
"end": 3122
} | class ____(SpackError):
"""Raised by packages when a platform is not supported"""
def __init__(self, message):
super().__init__(message)
| UnsupportedPlatformError |
python | pydantic__pydantic | tests/mypy/modules/plugin_success.py | {
"start": 2093,
"end": 2420
} | class ____(BaseModel):
x: str = Field(alias='x_alias')
y: str = Field(validation_alias='y_alias')
z: str = Field(validation_alias='z_alias', alias='unused')
alias_model = AliasModel(x_alias='a', y_alias='a', z_alias='a')
assert alias_model.x == 'a'
assert alias_model.y == 'a'
assert alias_model.z == 'a'
| AliasModel |
python | pytorch__pytorch | test/dynamo/test_modules.py | {
"start": 30199,
"end": 30379
} | class ____(torch.nn.Module):
torchdynamo_force_dynamic = True # forced to be a UnspecializedNNModule
def forward(self, x):
return torch.sin(x)
| UnspecInlinableModule |
python | apache__airflow | providers/atlassian/jira/src/airflow/providers/atlassian/jira/sensors/jira.py | {
"start": 1122,
"end": 2392
} | class ____(BaseSensorOperator):
"""
Monitors a jira ticket for any change.
:param jira_conn_id: reference to a pre-defined Jira Connection
:param method_name: method name from atlassian-python-api JIRA sdk to execute
:param method_params: parameters for the method method_name
:param result_processor: function that return boolean and act as a sensor response
"""
def __init__(
self,
*,
method_name: str,
jira_conn_id: str = "jira_default",
method_params: dict | None = None,
result_processor: Callable | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.jira_conn_id = jira_conn_id
self.result_processor = None
if result_processor is not None:
self.result_processor = result_processor
self.method_name = method_name
self.method_params = method_params
def poke(self, context: Context) -> Any:
hook = JiraHook(jira_conn_id=self.jira_conn_id)
resource = hook.get_conn()
jira_result = getattr(resource, self.method_name)(**self.method_params)
if self.result_processor is None:
return jira_result
return self.result_processor(jira_result)
| JiraSensor |
python | pydantic__pydantic | tests/test_json_schema.py | {
"start": 94531,
"end": 94638
} | class ____(BaseModel):
class NestedModel(BaseModel):
b: Decimal
nested: NestedModel
| ModelTwo |
python | django-haystack__django-haystack | test_haystack/test_fields.py | {
"start": 21980,
"end": 22691
} | class ____(TestCase):
def test_init(self):
try:
foo = FacetDateTimeField(model_attr="foo")
foo_exact = FacetDateTimeField(facet_for="bar")
except:
self.fail()
self.assertEqual(foo.facet_for, None)
self.assertEqual(foo_exact.null, True)
self.assertEqual(foo_exact.facet_for, "bar")
def test_prepare(self):
mock = MockModel()
mock.user = "daniel"
mock.created = datetime.datetime(2010, 10, 30, 3, 14, 25)
created = FacetDateTimeField(model_attr="created")
self.assertEqual(
created.prepare(mock), datetime.datetime(2010, 10, 30, 3, 14, 25)
)
| FacetDateTimeFieldTestCase |
python | ray-project__ray | python/ray/tests/accelerators/mock_dpctl_1.py | {
"start": 0,
"end": 124
} | class ____:
def __init__(self, info):
pass
@property
def device_count(self):
return 6
| SyclContext |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/triggers/test_emr.py | {
"start": 8639,
"end": 9525
} | class ____:
def test_serialization(self):
application_id = "test_application_id"
waiter_delay = 30
waiter_max_attempts = 60
job_id = "job_id"
aws_conn_id = "aws_default"
trigger = EmrServerlessStartJobTrigger(
application_id=application_id,
waiter_delay=waiter_delay,
job_id=job_id,
waiter_max_attempts=waiter_max_attempts,
aws_conn_id=aws_conn_id,
)
classpath, kwargs = trigger.serialize()
assert classpath == "airflow.providers.amazon.aws.triggers.emr.EmrServerlessStartJobTrigger"
assert kwargs == {
"application_id": "test_application_id",
"waiter_delay": 30,
"waiter_max_attempts": 60,
"job_id": "job_id",
"aws_conn_id": "aws_default",
}
| TestEmrServerlessStartJobTrigger |
python | pypa__warehouse | tests/unit/manage/views/test_oidc_publishers.py | {
"start": 782,
"end": 79297
} | class ____:
def test_initializes(self, metrics):
project = pretend.stub(organization=None)
request = pretend.stub(
find_service=pretend.call_recorder(lambda *a, **kw: metrics),
registry=pretend.stub(
settings={
"github.token": "fake-api-token",
},
),
POST=MultiDict(),
)
view = oidc_views.ManageOIDCPublisherViews(project, request)
assert view.project is project
assert view.request is request
assert view.metrics is metrics
assert view.request.find_service.calls == [
pretend.call(IMetricsService, context=None)
]
@pytest.mark.parametrize(
("ip_exceeded", "user_exceeded"),
[
(False, False),
(False, True),
(True, False),
],
)
def test_ratelimiting(self, metrics, ip_exceeded, user_exceeded):
project = pretend.stub(organization=None)
user_rate_limiter = pretend.stub(
hit=pretend.call_recorder(lambda *a, **kw: None),
test=pretend.call_recorder(lambda uid: not user_exceeded),
resets_in=pretend.call_recorder(lambda uid: pretend.stub()),
)
ip_rate_limiter = pretend.stub(
hit=pretend.call_recorder(lambda *a, **kw: None),
test=pretend.call_recorder(lambda ip: not ip_exceeded),
resets_in=pretend.call_recorder(lambda uid: pretend.stub()),
)
def find_service(iface, name=None, context=None):
if iface is IMetricsService:
return metrics
if name == "user_oidc.publisher.register":
return user_rate_limiter
else:
return ip_rate_limiter
request = pretend.stub(
find_service=pretend.call_recorder(find_service),
user=pretend.stub(id=pretend.stub()),
remote_addr=pretend.stub(),
registry=pretend.stub(
settings={
"github.token": "fake-api-token",
},
),
POST=MultiDict(),
)
view = oidc_views.ManageOIDCPublisherViews(project, request)
assert view._ratelimiters == {
"user.oidc": user_rate_limiter,
"ip.oidc": ip_rate_limiter,
}
assert request.find_service.calls == [
pretend.call(IMetricsService, context=None),
pretend.call(IRateLimiter, name="user_oidc.publisher.register"),
pretend.call(IRateLimiter, name="ip_oidc.publisher.register"),
]
view._hit_ratelimits()
assert user_rate_limiter.hit.calls == [
pretend.call(request.user.id),
]
assert ip_rate_limiter.hit.calls == [pretend.call(request.remote_addr)]
if user_exceeded or ip_exceeded:
with pytest.raises(TooManyOIDCRegistrations):
view._check_ratelimits()
else:
view._check_ratelimits()
def test_manage_project_oidc_publishers(self, monkeypatch):
project = pretend.stub(oidc_publishers=[], organization=None)
request = pretend.stub(
user=pretend.stub(),
registry=pretend.stub(
settings={
"github.token": "fake-api-token",
},
),
find_service=lambda *a, **kw: None,
flags=pretend.stub(
disallow_oidc=pretend.call_recorder(lambda f=None: False)
),
POST=MultiDict(),
)
view = oidc_views.ManageOIDCPublisherViews(project, request)
assert view.manage_project_oidc_publishers() == {
"disabled": {
"GitHub": False,
"GitLab": False,
"Google": False,
"ActiveState": False,
},
"project": project,
"github_publisher_form": view.github_publisher_form,
"gitlab_publisher_form": view.gitlab_publisher_form,
"google_publisher_form": view.google_publisher_form,
"activestate_publisher_form": view.activestate_publisher_form,
"prefilled_provider": view.prefilled_provider,
}
assert request.flags.disallow_oidc.calls == [
pretend.call(),
pretend.call(AdminFlagValue.DISALLOW_GITHUB_OIDC),
pretend.call(AdminFlagValue.DISALLOW_GITLAB_OIDC),
pretend.call(AdminFlagValue.DISALLOW_GOOGLE_OIDC),
pretend.call(AdminFlagValue.DISALLOW_ACTIVESTATE_OIDC),
]
def test_manage_project_oidc_publishers_admin_disabled(
self, monkeypatch, pyramid_request
):
project = pretend.stub(oidc_publishers=[], organization=None)
pyramid_request.user = pretend.stub()
pyramid_request.registry = pretend.stub(
settings={
"github.token": "fake-api-token",
},
)
pyramid_request.find_service = lambda *a, **kw: None
pyramid_request.flags = pretend.stub(
disallow_oidc=pretend.call_recorder(lambda f=None: True)
)
pyramid_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
pyramid_request.POST = MultiDict()
view = oidc_views.ManageOIDCPublisherViews(project, pyramid_request)
assert view.manage_project_oidc_publishers() == {
"disabled": {
"GitHub": True,
"GitLab": True,
"Google": True,
"ActiveState": True,
},
"project": project,
"github_publisher_form": view.github_publisher_form,
"gitlab_publisher_form": view.gitlab_publisher_form,
"google_publisher_form": view.google_publisher_form,
"activestate_publisher_form": view.activestate_publisher_form,
"prefilled_provider": view.prefilled_provider,
}
assert pyramid_request.flags.disallow_oidc.calls == [
pretend.call(),
pretend.call(AdminFlagValue.DISALLOW_GITHUB_OIDC),
pretend.call(AdminFlagValue.DISALLOW_GITLAB_OIDC),
pretend.call(AdminFlagValue.DISALLOW_GOOGLE_OIDC),
pretend.call(AdminFlagValue.DISALLOW_ACTIVESTATE_OIDC),
]
assert pyramid_request.session.flash.calls == [
pretend.call(
(
"Trusted publishing is temporarily disabled. "
"See https://pypi.org/help#admin-intervention for details."
),
queue="error",
)
]
@pytest.mark.parametrize(
("form_name", "prefilled_data"),
[
# All fields of GitHub provider
(
"github_publisher_form",
{
"provider": "github",
"owner": "owner",
"repository": "repo",
"workflow_filename": "file.yml",
"environment": "my_env",
},
),
# All fields of GitLab provider
(
"gitlab_publisher_form",
{
"provider": "gitlab",
"namespace": "owner",
"project": "repo",
"workflow_filepath": "file.yml",
"environment": "my_env",
"issuer_url": "https://gitlab.com",
},
),
# All fields of Google provider
(
"google_publisher_form",
{
"provider": "google",
"email": "email@example.com",
"sub": "my_subject",
},
),
# All fields of ActiveState provider
(
"activestate_publisher_form",
{
"provider": "activestate",
"organization": "my_org",
"project": "my_project",
"actor": "my_actor",
},
),
# All fields of GitHub provider, case-insensitive
(
"github_publisher_form",
{
"provider": "GitHub",
"owner": "owner",
"repository": "repo",
"workflow_filename": "file.yml",
"environment": "my_env",
},
),
],
)
def test_manage_project_oidc_publishers_prefill(
self, monkeypatch, form_name, prefilled_data
):
project = pretend.stub(oidc_publishers=[], organization=None)
request = pretend.stub(
user=pretend.stub(),
registry=pretend.stub(
settings={
"github.token": "fake-api-token",
},
),
find_service=lambda *a, **kw: None,
flags=pretend.stub(
disallow_oidc=pretend.call_recorder(lambda f=None: False)
),
POST=MultiDict(),
params=MultiDict(prefilled_data),
)
view = oidc_views.ManageOIDCPublisherViews(project, request)
assert view.manage_project_oidc_publishers_prefill() == {
"disabled": {
"GitHub": False,
"GitLab": False,
"Google": False,
"ActiveState": False,
},
"project": project,
"github_publisher_form": view.github_publisher_form,
"gitlab_publisher_form": view.gitlab_publisher_form,
"google_publisher_form": view.google_publisher_form,
"activestate_publisher_form": view.activestate_publisher_form,
"prefilled_provider": prefilled_data["provider"].lower(),
}
# The form data does not contain the provider, so we'll remove it from
# the prefilled data before comparing them
del prefilled_data["provider"]
form = getattr(view, form_name)
assert form.data == prefilled_data
@pytest.mark.parametrize(
("missing_fields", "prefilled_data", "extra_fields"),
[
# Only some fields present
(
["repository", "environment"],
{
"provider": "github",
"owner": "owner",
"workflow_filename": "file.yml",
},
[],
),
# Extra fields present
(
[],
{
"provider": "github",
"owner": "owner",
"repository": "repo",
"workflow_filename": "file.yml",
"environment": "my_env",
"extra_field_1": "value1",
"extra_field_2": "value2",
},
["extra_field_1", "extra_field_2"],
),
# Both missing fields and extra fields present
(
["owner", "repository"],
{
"provider": "github",
"workflow_filename": "file.yml",
"environment": "my_env",
"extra_field_1": "value1",
"extra_field_2": "value2",
},
["extra_field_1", "extra_field_2"],
),
],
)
def test_manage_project_oidc_publishers_prefill_partial(
self, monkeypatch, missing_fields, prefilled_data, extra_fields
):
project = pretend.stub(oidc_publishers=[], organization=None)
request = pretend.stub(
user=pretend.stub(),
registry=pretend.stub(
settings={
"github.token": "fake-api-token",
},
),
find_service=lambda *a, **kw: None,
flags=pretend.stub(
disallow_oidc=pretend.call_recorder(lambda f=None: False)
),
POST=MultiDict(),
params=MultiDict(prefilled_data),
)
view = oidc_views.ManageOIDCPublisherViews(project, request)
assert view.manage_project_oidc_publishers_prefill() == {
"disabled": {
"GitHub": False,
"GitLab": False,
"Google": False,
"ActiveState": False,
},
"project": project,
"github_publisher_form": view.github_publisher_form,
"gitlab_publisher_form": view.gitlab_publisher_form,
"google_publisher_form": view.google_publisher_form,
"activestate_publisher_form": view.activestate_publisher_form,
"prefilled_provider": prefilled_data["provider"].lower(),
}
# The form data does not contain the provider, so we'll remove it from
# the prefilled data before comparing them
del prefilled_data["provider"]
missing_data = {k: None for k in missing_fields}
# The expected form data is the prefilled data plus the missing fields
# (set to None) minus the extra fields
expected_data = prefilled_data | missing_data
expected_data = {
k: v for k, v in expected_data.items() if k not in extra_fields
}
assert view.github_publisher_form.data == expected_data
def test_manage_project_oidc_publishers_prefill_unknown_provider(self, monkeypatch):
project = pretend.stub(oidc_publishers=[], organization=None)
prefilled_data = {
"provider": "github2",
"owner": "owner",
"repository": "repo",
"workflow_filename": "file.yml",
"environment": "my_env",
}
request = pretend.stub(
user=pretend.stub(),
registry=pretend.stub(
settings={
"github.token": "fake-api-token",
},
),
find_service=lambda *a, **kw: None,
flags=pretend.stub(
disallow_oidc=pretend.call_recorder(lambda f=None: False)
),
POST=MultiDict(),
params=MultiDict(prefilled_data),
)
view = oidc_views.ManageOIDCPublisherViews(project, request)
assert view.manage_project_oidc_publishers_prefill() == {
"disabled": {
"GitHub": False,
"GitLab": False,
"Google": False,
"ActiveState": False,
},
"project": project,
"github_publisher_form": view.github_publisher_form,
"gitlab_publisher_form": view.gitlab_publisher_form,
"google_publisher_form": view.google_publisher_form,
"activestate_publisher_form": view.activestate_publisher_form,
"prefilled_provider": None,
}
assert all(v is None for _, v in view.github_publisher_form.data.items())
@pytest.mark.parametrize(
("publisher", "new_environment_name"),
[
(
GitHubPublisher(
repository_name="some-repository",
repository_owner="some-owner",
repository_owner_id="666",
workflow_filename="some-workflow-filename.yml",
environment="",
),
"fakeenv",
),
(
GitLabPublisher(
namespace="some-namespace",
project="some-project",
workflow_filepath="some-workflow-filename.yml",
environment="",
issuer_url="https://gitlab.com",
),
"fakeenv",
),
],
)
def test_manage_project_oidc_publishers_constrain_environment(
self,
monkeypatch,
metrics,
db_request,
publisher,
new_environment_name,
):
owner = UserFactory.create()
db_request.user = owner
project = ProjectFactory.create(oidc_publishers=[publisher])
project.record_event = pretend.call_recorder(lambda *a, **kw: None)
RoleFactory.create(user=owner, project=project, role_name="Owner")
db_request.db.add(publisher)
db_request.db.flush() # To get the id
db_request.method = "POST"
db_request.POST = MultiDict(
{
"constrained_publisher_id": str(publisher.id),
"constrained_environment_name": new_environment_name,
}
)
db_request.find_service = lambda *a, **kw: metrics
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.flags = pretend.stub(
disallow_oidc=pretend.call_recorder(lambda f=None: False)
)
db_request._ = lambda s: s
view = oidc_views.ManageOIDCPublisherViews(project, db_request)
assert isinstance(view.constrain_environment(), HTTPSeeOther)
assert view.metrics.increment.calls == [
pretend.call(
"warehouse.oidc.constrain_publisher_environment.attempt",
),
]
# The old publisher is actually removed entirely from the DB
# and replaced by the new constrained publisher.
publishers = db_request.db.query(OIDCPublisher).all()
assert len(publishers) == 1
constrained_publisher = publishers[0]
assert constrained_publisher.environment == new_environment_name
assert project.oidc_publishers == [constrained_publisher]
assert project.record_event.calls == [
pretend.call(
tag=EventTag.Project.OIDCPublisherAdded,
request=db_request,
additional={
"publisher": constrained_publisher.publisher_name,
"id": str(constrained_publisher.id),
"specifier": str(constrained_publisher),
"url": publisher.publisher_url(),
"submitted_by": db_request.user.username,
"reified_from_pending_publisher": False,
"constrained_from_existing_publisher": True,
},
),
pretend.call(
tag=EventTag.Project.OIDCPublisherRemoved,
request=db_request,
additional={
"publisher": publisher.publisher_name,
"id": str(publisher.id),
"specifier": str(publisher),
"url": publisher.publisher_url(),
"submitted_by": db_request.user.username,
},
),
]
assert db_request.flags.disallow_oidc.calls == [pretend.call()]
assert db_request.session.flash.calls == [
pretend.call(
f"Trusted publisher for project {project.name!r} has been "
f"constrained to environment {new_environment_name!r}",
queue="success",
)
]
def test_manage_project_oidc_publishers_constrain_environment_shared_publisher(
self,
metrics,
db_request,
):
publisher = GitHubPublisher(
repository_name="some-repository",
repository_owner="some-owner",
repository_owner_id="666",
workflow_filename="some-workflow-filename.yml",
environment="",
)
owner = UserFactory.create()
db_request.user = owner
project = ProjectFactory.create(oidc_publishers=[publisher])
other_project = ProjectFactory.create(oidc_publishers=[publisher])
project.record_event = pretend.call_recorder(lambda *a, **kw: None)
RoleFactory.create(user=owner, project=project, role_name="Owner")
db_request.db.add(publisher)
db_request.db.flush() # To get the id
db_request.method = "POST"
db_request.POST = MultiDict(
{
"constrained_publisher_id": str(publisher.id),
"constrained_environment_name": "fakeenv",
}
)
db_request.find_service = lambda *a, **kw: metrics
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.flags = pretend.stub(
disallow_oidc=pretend.call_recorder(lambda f=None: False)
)
db_request._ = lambda s: s
view = oidc_views.ManageOIDCPublisherViews(project, db_request)
assert isinstance(view.constrain_environment(), HTTPSeeOther)
assert view.metrics.increment.calls == [
pretend.call(
"warehouse.oidc.constrain_publisher_environment.attempt",
),
]
# The old publisher is should still be present in the DB, because other_project
# still uses it.
assert db_request.db.query(OIDCPublisher).count() == 2
assert (
db_request.db.query(GitHubPublisher)
.filter(GitHubPublisher.environment == "")
.filter(GitHubPublisher.projects.contains(other_project))
.count()
) == 1
# The new constrained publisher should exist, and associated to the current
# project
constrained_publisher = (
db_request.db.query(GitHubPublisher)
.filter(GitHubPublisher.environment == "fakeenv")
.one()
)
assert project.oidc_publishers == [constrained_publisher]
assert project.record_event.calls == [
pretend.call(
tag=EventTag.Project.OIDCPublisherAdded,
request=db_request,
additional={
"publisher": constrained_publisher.publisher_name,
"id": str(constrained_publisher.id),
"specifier": str(constrained_publisher),
"url": publisher.publisher_url(),
"submitted_by": db_request.user.username,
"reified_from_pending_publisher": False,
"constrained_from_existing_publisher": True,
},
),
pretend.call(
tag=EventTag.Project.OIDCPublisherRemoved,
request=db_request,
additional={
"publisher": publisher.publisher_name,
"id": str(publisher.id),
"specifier": str(publisher),
"url": publisher.publisher_url(),
"submitted_by": db_request.user.username,
},
),
]
assert db_request.flags.disallow_oidc.calls == [pretend.call()]
assert db_request.session.flash.calls == [
pretend.call(
f"Trusted publisher for project {project.name!r} has been "
f"constrained to environment 'fakeenv'",
queue="success",
)
]
def test_constrain_oidc_publisher_admin_disabled(self, monkeypatch):
project = pretend.stub(organization=None)
request = pretend.stub(
method="POST",
params=MultiDict(),
user=pretend.stub(),
find_service=lambda *a, **kw: None,
flags=pretend.stub(
disallow_oidc=pretend.call_recorder(lambda f=None: True)
),
session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)),
_=lambda s: s,
POST=MultiDict(
{
"constrained_publisher_id": uuid.uuid4(),
"constrained_environment_name": "fakeenv",
}
),
registry=pretend.stub(settings={}),
)
view = oidc_views.ManageOIDCPublisherViews(project, request)
default_response = {"_": pretend.stub()}
monkeypatch.setattr(
oidc_views.ManageOIDCPublisherViews, "default_response", default_response
)
assert view.constrain_environment() == default_response
assert request.session.flash.calls == [
pretend.call(
(
"Trusted publishing is temporarily disabled. See "
"https://pypi.org/help#admin-intervention for details."
),
queue="error",
)
]
def test_constrain_oidc_publisher_invalid_params(self, monkeypatch, metrics):
project = pretend.stub(organization=None)
request = pretend.stub(
method="POST",
params=MultiDict(),
user=pretend.stub(),
find_service=lambda *a, **kw: metrics,
flags=pretend.stub(
disallow_oidc=pretend.call_recorder(lambda f=None: False)
),
session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)),
_=lambda s: s,
POST=MultiDict(
{
"constrained_publisher_id": "not_an_uuid",
"constrained_environment_name": "fakeenv",
}
),
registry=pretend.stub(settings={}),
)
view = oidc_views.ManageOIDCPublisherViews(project, request)
default_response = {"_": pretend.stub()}
monkeypatch.setattr(
oidc_views.ManageOIDCPublisherViews, "default_response", default_response
)
assert view.constrain_environment() == default_response
assert view.metrics.increment.calls == [
pretend.call("warehouse.oidc.constrain_publisher_environment.attempt")
]
assert request.session.flash.calls == [
pretend.call(
"The trusted publisher could not be constrained",
queue="error",
)
]
def test_constrain_non_extant_oidc_publisher(
self, monkeypatch, metrics, db_request
):
project = pretend.stub(organization=None)
db_request.method = "POST"
db_request.POST = MultiDict(
{
"constrained_publisher_id": str(uuid.uuid4()),
"constrained_environment_name": "fakeenv",
}
)
db_request.find_service = lambda *a, **kw: metrics
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.flags = pretend.stub(
disallow_oidc=pretend.call_recorder(lambda f=None: False)
)
view = oidc_views.ManageOIDCPublisherViews(project, db_request)
default_response = {"_": pretend.stub()}
monkeypatch.setattr(
oidc_views.ManageOIDCPublisherViews, "default_response", default_response
)
assert view.constrain_environment() == default_response
assert view.metrics.increment.calls == [
pretend.call("warehouse.oidc.constrain_publisher_environment.attempt")
]
assert db_request.session.flash.calls == [
pretend.call(
"Invalid publisher for project",
queue="error",
)
]
def test_constrain_publisher_from_different_project(
self, monkeypatch, metrics, db_request
):
owner = UserFactory.create()
db_request.user = owner
publisher = GitHubPublisher(
repository_name="some-repository",
repository_owner="some-owner",
repository_owner_id="666",
workflow_filename="some-workflow-filename.yml",
environment="",
)
request_project = ProjectFactory.create(oidc_publishers=[])
request_project.record_event = pretend.call_recorder(lambda *a, **kw: None)
RoleFactory.create(user=owner, project=request_project, role_name="Owner")
ProjectFactory.create(oidc_publishers=[publisher])
db_request.db.add(publisher)
db_request.db.flush() # To get the id
db_request.params = MultiDict()
db_request.method = "POST"
db_request.POST = MultiDict(
{
"constrained_publisher_id": str(publisher.id),
"constrained_environment_name": "fakeenv",
}
)
db_request.find_service = lambda *a, **kw: metrics
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.flags = pretend.stub(
disallow_oidc=pretend.call_recorder(lambda f=None: False)
)
view = oidc_views.ManageOIDCPublisherViews(request_project, db_request)
default_response = {"_": pretend.stub()}
monkeypatch.setattr(
oidc_views.ManageOIDCPublisherViews, "default_response", default_response
)
assert view.constrain_environment() == default_response
assert view.metrics.increment.calls == [
pretend.call("warehouse.oidc.constrain_publisher_environment.attempt")
]
assert db_request.session.flash.calls == [
pretend.call(
"Invalid publisher for project",
queue="error",
)
]
@pytest.mark.parametrize(
"publisher",
[
ActiveStatePublisher(
organization="some-org",
activestate_project_name="some-project",
actor="some-user",
actor_id="some-user-id",
),
GooglePublisher(
email="some-email@example.com",
sub="some-sub",
),
],
)
def test_constrain_unsupported_publisher(
self, monkeypatch, metrics, db_request, publisher
):
owner = UserFactory.create()
db_request.user = owner
db_request.db.add(publisher)
db_request.db.flush() # To get the id
project = ProjectFactory.create(oidc_publishers=[publisher])
project.record_event = pretend.call_recorder(lambda *a, **kw: None)
RoleFactory.create(user=owner, project=project, role_name="Owner")
db_request.params = MultiDict()
db_request.method = "POST"
db_request.POST = MultiDict(
{
"constrained_publisher_id": str(publisher.id),
"constrained_environment_name": "fakeenv",
}
)
db_request.find_service = lambda *a, **kw: metrics
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.flags = pretend.stub(
disallow_oidc=pretend.call_recorder(lambda f=None: False)
)
view = oidc_views.ManageOIDCPublisherViews(project, db_request)
default_response = {"_": pretend.stub()}
monkeypatch.setattr(
oidc_views.ManageOIDCPublisherViews, "default_response", default_response
)
assert view.constrain_environment() == default_response
assert view.metrics.increment.calls == [
pretend.call("warehouse.oidc.constrain_publisher_environment.attempt")
]
assert db_request.session.flash.calls == [
pretend.call(
"Can only constrain the environment for GitHub and GitLab publishers",
queue="error",
)
]
def test_constrain_publisher_with_nonempty_environment(
self, monkeypatch, metrics, db_request
):
owner = UserFactory.create()
db_request.user = owner
publisher = GitHubPublisher(
repository_name="some-repository",
repository_owner="some-owner",
repository_owner_id="666",
workflow_filename="some-workflow-filename.yml",
environment="env-already-constrained",
)
project = ProjectFactory.create(oidc_publishers=[publisher])
project.record_event = pretend.call_recorder(lambda *a, **kw: None)
RoleFactory.create(user=owner, project=project, role_name="Owner")
db_request.db.add(publisher)
db_request.db.flush() # To get the id
db_request.params = MultiDict()
db_request.method = "POST"
db_request.POST = MultiDict(
{
"constrained_publisher_id": str(publisher.id),
"constrained_environment_name": "fakeenv",
}
)
db_request.find_service = lambda *a, **kw: metrics
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.flags = pretend.stub(
disallow_oidc=pretend.call_recorder(lambda f=None: False)
)
view = oidc_views.ManageOIDCPublisherViews(project, db_request)
default_response = {"_": pretend.stub()}
monkeypatch.setattr(
oidc_views.ManageOIDCPublisherViews, "default_response", default_response
)
assert view.constrain_environment() == default_response
assert view.metrics.increment.calls == [
pretend.call("warehouse.oidc.constrain_publisher_environment.attempt")
]
assert db_request.session.flash.calls == [
pretend.call(
"Can only constrain the environment for publishers without an "
"environment configured",
queue="error",
)
]
@pytest.mark.parametrize(
("publisher_class", "publisher_kwargs"),
[
(
GitHubPublisher,
{
"repository_name": "some-repository",
"repository_owner": "some-owner",
"repository_owner_id": "666",
"workflow_filename": "some-workflow-filename.yml",
},
),
(
GitLabPublisher,
{
"namespace": "some-namespace",
"project": "some-project",
"workflow_filepath": "some-workflow-filename.yml",
"issuer_url": "https://gitlab.com",
},
),
],
)
def test_constrain_environment_publisher_already_exists(
self, monkeypatch, metrics, db_request, publisher_class, publisher_kwargs
):
owner = UserFactory.create()
db_request.user = owner
# Create unconstrained and constrained versions of the publisher
unconstrained = publisher_class(environment="", **publisher_kwargs)
constrained = publisher_class(environment="fakeenv", **publisher_kwargs)
project = ProjectFactory.create(oidc_publishers=[unconstrained, constrained])
project.record_event = pretend.call_recorder(lambda *a, **kw: None)
RoleFactory.create(user=owner, project=project, role_name="Owner")
db_request.db.add_all([unconstrained, constrained])
db_request.db.flush() # To get the ids
db_request.method = "POST"
db_request.POST = MultiDict(
{
"constrained_publisher_id": str(unconstrained.id),
"constrained_environment_name": "fakeenv",
}
)
db_request.find_service = lambda *a, **kw: metrics
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.flags = pretend.stub(
disallow_oidc=pretend.call_recorder(lambda f=None: False)
)
db_request._ = lambda s: s
view = oidc_views.ManageOIDCPublisherViews(project, db_request)
default_response = {"_": pretend.stub()}
monkeypatch.setattr(
oidc_views.ManageOIDCPublisherViews, "default_response", default_response
)
assert view.constrain_environment() == default_response
assert view.metrics.increment.calls == [
pretend.call(
"warehouse.oidc.constrain_publisher_environment.attempt",
),
]
assert project.record_event.calls == []
assert db_request.session.flash.calls == [
pretend.call(
f"{unconstrained} is already registered with {project.name}",
queue="error",
)
]
@pytest.mark.parametrize(
("view_name", "publisher", "make_form"),
[
(
"add_github_oidc_publisher",
pretend.stub(
id="fakeid",
publisher_name="GitHub",
repository_name="fakerepo",
publisher_url=(
lambda x=None: "https://github.com/fakeowner/fakerepo"
),
owner="fakeowner",
owner_id="1234",
workflow_filename="fakeworkflow.yml",
environment="some-environment",
),
lambda publisher: pretend.stub(
validate=pretend.call_recorder(lambda: True),
repository=pretend.stub(data=publisher.repository_name),
normalized_owner=publisher.owner,
workflow_filename=pretend.stub(data=publisher.workflow_filename),
normalized_environment=publisher.environment,
),
),
(
"add_gitlab_oidc_publisher",
pretend.stub(
id="fakeid",
publisher_name="GitLab",
project="fakerepo",
publisher_url=(
lambda x=None: "https://gitlab.com/fakeowner/fakerepo"
),
namespace="fakeowner",
workflow_filepath="subfolder/fakeworkflow.yml",
environment="some-environment",
),
lambda publisher: pretend.stub(
validate=pretend.call_recorder(lambda: True),
project=pretend.stub(data=publisher.project),
namespace=pretend.stub(data=publisher.namespace),
workflow_filepath=pretend.stub(data=publisher.workflow_filepath),
normalized_environment=publisher.environment,
issuer_url=pretend.stub(data="https://gitlab.com"),
),
),
(
"add_google_oidc_publisher",
pretend.stub(
id="fakeid",
publisher_name="Google",
publisher_url=lambda x=None: None,
email="some-environment@example.com",
sub="some-sub",
),
lambda publisher: pretend.stub(
validate=pretend.call_recorder(lambda: True),
email=pretend.stub(data=publisher.email),
sub=pretend.stub(data=publisher.sub),
),
),
(
"add_activestate_oidc_publisher",
pretend.stub(
id="fakeid",
publisher_name="ActiveState",
publisher_url=(
lambda x=None: "https://platform.activestate.com/some-org/some-project" # noqa
),
organization="some-org",
activestate_project_name="some-project",
actor="some-user",
actor_id="some-user-id",
),
lambda publisher: pretend.stub(
validate=pretend.call_recorder(lambda: True),
organization=pretend.stub(data=publisher.organization),
project=pretend.stub(data=publisher.activestate_project_name),
actor=pretend.stub(data=publisher.actor),
actor_id="some-user-id",
),
),
],
)
def test_add_oidc_publisher_preexisting(
self, metrics, monkeypatch, view_name, publisher, make_form
):
# NOTE: Can't set __str__ using pretend.stub()
monkeypatch.setattr(publisher.__class__, "__str__", lambda s: "fakespecifier")
project = pretend.stub(
name="fakeproject",
oidc_publishers=[],
organization=None,
record_event=pretend.call_recorder(lambda *a, **kw: None),
users=[],
)
request = pretend.stub(
user=pretend.stub(
username="some-user",
),
registry=pretend.stub(
settings={
"github.token": "fake-api-token",
}
),
find_service=lambda *a, **kw: metrics,
flags=pretend.stub(
disallow_oidc=pretend.call_recorder(lambda f=None: False)
),
session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)),
POST=pretend.stub(),
db=pretend.stub(
query=lambda *a: pretend.stub(
filter=lambda *a: pretend.stub(one_or_none=lambda: publisher)
),
add=pretend.call_recorder(lambda o: None),
),
path="request-path",
)
publisher_form_obj = make_form(publisher)
publisher_form_cls = pretend.call_recorder(lambda *a, **kw: publisher_form_obj)
monkeypatch.setattr(oidc_views, "GitHubPublisherForm", publisher_form_cls)
monkeypatch.setattr(oidc_views, "GitLabPublisherForm", publisher_form_cls)
monkeypatch.setattr(oidc_views, "GooglePublisherForm", publisher_form_cls)
monkeypatch.setattr(oidc_views, "ActiveStatePublisherForm", publisher_form_cls)
view = oidc_views.ManageOIDCPublisherViews(project, request)
monkeypatch.setattr(
view, "_hit_ratelimits", pretend.call_recorder(lambda: None)
)
monkeypatch.setattr(
view, "_check_ratelimits", pretend.call_recorder(lambda: None)
)
assert isinstance(getattr(view, view_name)(), HTTPSeeOther)
assert view.metrics.increment.calls == [
pretend.call(
"warehouse.oidc.add_publisher.attempt",
tags=[f"publisher:{publisher.publisher_name}"],
),
pretend.call(
"warehouse.oidc.add_publisher.ok",
tags=[f"publisher:{publisher.publisher_name}"],
),
]
assert project.record_event.calls == [
pretend.call(
tag=EventTag.Project.OIDCPublisherAdded,
request=request,
additional={
"publisher": publisher.publisher_name,
"id": "fakeid",
"specifier": "fakespecifier",
"url": publisher.publisher_url(),
"submitted_by": "some-user",
"reified_from_pending_publisher": False,
"constrained_from_existing_publisher": False,
},
)
]
assert request.session.flash.calls == [
pretend.call(
"Added fakespecifier "
+ (
f"in {publisher.publisher_url()}"
if publisher.publisher_url()
else ""
)
+ " to fakeproject",
queue="success",
)
]
assert request.db.add.calls == []
assert publisher_form_obj.validate.calls == [pretend.call()]
assert view._hit_ratelimits.calls == [pretend.call()]
assert view._check_ratelimits.calls == [pretend.call()]
assert project.oidc_publishers == [publisher]
@pytest.mark.parametrize(
("view_name", "publisher_form_obj", "expected_publisher"),
[
(
"add_github_oidc_publisher",
pretend.stub(
validate=pretend.call_recorder(lambda: True),
repository=pretend.stub(data="fakerepo"),
normalized_owner="fakeowner",
workflow_filename=pretend.stub(data="fakeworkflow.yml"),
normalized_environment="some-environment",
owner_id="1234",
),
pretend.stub(publisher_name="GitHub"),
),
(
"add_gitlab_oidc_publisher",
pretend.stub(
validate=pretend.call_recorder(lambda: True),
project=pretend.stub(data="fakerepo"),
namespace=pretend.stub(data="fakeowner"),
workflow_filepath=pretend.stub(data="subfolder/fakeworkflow.yml"),
normalized_environment="some-environment",
issuer_url=pretend.stub(data="https://gitlab.com"),
),
pretend.stub(publisher_name="GitLab"),
),
(
"add_google_oidc_publisher",
pretend.stub(
validate=pretend.call_recorder(lambda: True),
email=pretend.stub(data="some-environment@example.com"),
sub=pretend.stub(data="some-sub"),
),
"Google",
),
(
"add_activestate_oidc_publisher",
pretend.stub(
validate=pretend.call_recorder(lambda: True),
id="fakeid",
publisher_name="ActiveState",
publisher_url=lambda x=None: None,
organization=pretend.stub(data="fake-org"),
project=pretend.stub(data="fake-project"),
actor=pretend.stub(data="fake-actor"),
actor_id="some-user-id",
),
"ActiveState",
),
],
)
def test_add_oidc_publisher_created(
self, metrics, monkeypatch, view_name, publisher_form_obj, expected_publisher
):
fakeuser = pretend.stub()
project = pretend.stub(
name="fakeproject",
oidc_publishers=[],
organization=None,
record_event=pretend.call_recorder(lambda *a, **kw: None),
users=[fakeuser],
)
request = pretend.stub(
user=pretend.stub(
username="some-user",
),
registry=pretend.stub(
settings={
"github.token": "fake-api-token",
}
),
find_service=lambda *a, **kw: metrics,
flags=pretend.stub(
disallow_oidc=pretend.call_recorder(lambda f=None: False)
),
session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)),
POST=pretend.stub(),
db=pretend.stub(
query=lambda *a: pretend.stub(
filter=lambda *a: pretend.stub(one_or_none=lambda: None)
),
add=pretend.call_recorder(lambda o: setattr(o, "id", "fakeid")),
),
path="request-path",
)
publisher_form_cls = pretend.call_recorder(lambda *a, **kw: publisher_form_obj)
monkeypatch.setattr(oidc_views, "GitHubPublisherForm", publisher_form_cls)
monkeypatch.setattr(oidc_views, "GitLabPublisherForm", publisher_form_cls)
monkeypatch.setattr(oidc_views, "GooglePublisherForm", publisher_form_cls)
monkeypatch.setattr(oidc_views, "ActiveStatePublisherForm", publisher_form_cls)
monkeypatch.setattr(
oidc_views,
"send_trusted_publisher_added_email",
pretend.call_recorder(lambda *a, **kw: None),
)
view = oidc_views.ManageOIDCPublisherViews(project, request)
monkeypatch.setattr(
view, "_hit_ratelimits", pretend.call_recorder(lambda: None)
)
monkeypatch.setattr(
view, "_check_ratelimits", pretend.call_recorder(lambda: None)
)
assert isinstance(getattr(view, view_name)(), HTTPSeeOther)
assert len(project.oidc_publishers) == 1
publisher = project.oidc_publishers[0]
assert view.metrics.increment.calls == [
pretend.call(
"warehouse.oidc.add_publisher.attempt",
tags=[f"publisher:{publisher.publisher_name}"],
),
pretend.call(
"warehouse.oidc.add_publisher.ok",
tags=[f"publisher:{publisher.publisher_name}"],
),
]
assert project.record_event.calls == [
pretend.call(
tag=EventTag.Project.OIDCPublisherAdded,
request=request,
additional={
"publisher": publisher.publisher_name,
"id": "fakeid",
"specifier": str(publisher),
"url": publisher.publisher_url(),
"submitted_by": "some-user",
"reified_from_pending_publisher": False,
"constrained_from_existing_publisher": False,
},
)
]
assert request.session.flash.calls == [
pretend.call(
f"Added {str(publisher)} "
+ (
f"in {publisher.publisher_url()}"
if publisher.publisher_url()
else ""
)
+ " to fakeproject",
queue="success",
)
]
assert request.db.add.calls == [pretend.call(project.oidc_publishers[0])]
assert publisher_form_obj.validate.calls == [pretend.call()]
assert oidc_views.send_trusted_publisher_added_email.calls == [
pretend.call(
request,
fakeuser,
project_name="fakeproject",
publisher=publisher,
)
]
assert view._hit_ratelimits.calls == [pretend.call()]
assert view._check_ratelimits.calls == [pretend.call()]
@pytest.mark.parametrize(
("view_name", "publisher_name", "publisher", "post_body"),
[
(
"add_github_oidc_publisher",
"GitHub",
GitHubPublisher(
repository_name="some-repository",
repository_owner="some-owner",
repository_owner_id="666",
workflow_filename="some-workflow-filename.yml",
environment="some-environment",
),
MultiDict(
{
"owner": "some-owner",
"repository": "some-repository",
"workflow_filename": "some-workflow-filename.yml",
"environment": "some-environment",
}
),
),
(
"add_gitlab_oidc_publisher",
"GitLab",
GitLabPublisher(
project="some-repository",
namespace="some-owner",
workflow_filepath="subfolder/some-workflow-filename.yml",
environment="some-environment",
issuer_url="https://gitlab.com",
),
MultiDict(
{
"namespace": "some-owner",
"project": "some-repository",
"workflow_filepath": "subfolder/some-workflow-filename.yml",
"environment": "some-environment",
"issuer_url": "https://gitlab.com",
}
),
),
(
"add_google_oidc_publisher",
"Google",
GooglePublisher(
email="some-email@example.com",
sub="some-sub",
),
MultiDict(
{
"email": "some-email@example.com",
"sub": "some-sub",
}
),
),
(
"add_activestate_oidc_publisher",
"ActiveState",
ActiveStatePublisher(
organization="some-org",
activestate_project_name="some-project",
actor="some-user",
actor_id="some-user-id",
),
MultiDict(
{
"organization": "some-org",
"project": "some-project",
"actor": "some-user",
}
),
),
],
)
def test_add_oidc_publisher_already_registered_with_project(
self, monkeypatch, db_request, view_name, publisher_name, publisher, post_body
):
db_request.user = UserFactory.create()
EmailFactory(user=db_request.user, verified=True, primary=True)
db_request.db.add(publisher)
db_request.db.flush() # To get it in the DB
project = pretend.stub(
name="fakeproject",
oidc_publishers=[publisher],
organization=None,
record_event=pretend.call_recorder(lambda *a, **kw: None),
)
db_request.registry = pretend.stub(
settings={
"github.token": "fake-api-token",
}
)
db_request.flags = pretend.stub(
disallow_oidc=pretend.call_recorder(lambda f=None: False)
)
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.POST = post_body
view = oidc_views.ManageOIDCPublisherViews(project, db_request)
monkeypatch.setattr(
oidc_views.GitHubPublisherForm,
"_lookup_owner",
lambda *a: {"login": "some-owner", "id": "some-owner-id"},
)
monkeypatch.setattr(
oidc_views.ActiveStatePublisherForm,
"_lookup_organization",
lambda *a: None,
)
monkeypatch.setattr(
oidc_views.ActiveStatePublisherForm,
"_lookup_actor",
lambda *a: {"user_id": "some-user-id"},
)
monkeypatch.setattr(
view, "_hit_ratelimits", pretend.call_recorder(lambda: None)
)
monkeypatch.setattr(
view, "_check_ratelimits", pretend.call_recorder(lambda: None)
)
assert getattr(view, view_name)() == {
"disabled": {
"GitHub": False,
"GitLab": False,
"Google": False,
"ActiveState": False,
},
"project": project,
"github_publisher_form": view.github_publisher_form,
"gitlab_publisher_form": view.gitlab_publisher_form,
"google_publisher_form": view.google_publisher_form,
"activestate_publisher_form": view.activestate_publisher_form,
"prefilled_provider": view.prefilled_provider,
}
assert view.metrics.increment.calls == [
pretend.call(
"warehouse.oidc.add_publisher.attempt",
tags=[f"publisher:{publisher_name}"],
),
]
assert project.record_event.calls == []
assert db_request.session.flash.calls == [
pretend.call(
f"{str(publisher)} is already registered with fakeproject",
queue="error",
)
]
def test_add_oidc_publisher_already_registered_after_normalization(
self, monkeypatch, db_request
):
publisher = GitHubPublisher(
repository_name="some-repository",
repository_owner="some-owner",
repository_owner_id="666",
workflow_filename="some-workflow-filename.yml",
environment="some-environment",
)
post_body = MultiDict(
{
"owner": "some-owner",
"repository": "some-repository",
"workflow_filename": "some-workflow-filename.yml",
"environment": "SOME-environment",
}
)
db_request.user = UserFactory.create()
EmailFactory(user=db_request.user, verified=True, primary=True)
db_request.db.add(publisher)
db_request.db.flush() # To get it in the DB
project = pretend.stub(
name="fakeproject",
oidc_publishers=[publisher],
organization=None,
record_event=pretend.call_recorder(lambda *a, **kw: None),
)
db_request.registry = pretend.stub(
settings={
"github.token": "fake-api-token",
}
)
db_request.flags = pretend.stub(
disallow_oidc=pretend.call_recorder(lambda f=None: False)
)
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.POST = post_body
view = oidc_views.ManageOIDCPublisherViews(project, db_request)
monkeypatch.setattr(
oidc_views.GitHubPublisherForm,
"_lookup_owner",
lambda *a: {"login": "some-owner", "id": "some-owner-id"},
)
monkeypatch.setattr(
view, "_hit_ratelimits", pretend.call_recorder(lambda: None)
)
monkeypatch.setattr(
view, "_check_ratelimits", pretend.call_recorder(lambda: None)
)
assert view.add_github_oidc_publisher() == {
"disabled": {
"GitHub": False,
"GitLab": False,
"Google": False,
"ActiveState": False,
},
"project": project,
"github_publisher_form": view.github_publisher_form,
"gitlab_publisher_form": view.gitlab_publisher_form,
"google_publisher_form": view.google_publisher_form,
"activestate_publisher_form": view.activestate_publisher_form,
"prefilled_provider": view.prefilled_provider,
}
assert view.metrics.increment.calls == [
pretend.call(
"warehouse.oidc.add_publisher.attempt",
tags=["publisher:GitHub"],
),
]
assert project.record_event.calls == []
assert db_request.session.flash.calls == [
pretend.call(
f"{str(publisher)} is already registered with fakeproject",
queue="error",
)
]
@pytest.mark.parametrize(
("view_name", "publisher_name"),
[
("add_github_oidc_publisher", "GitHub"),
("add_gitlab_oidc_publisher", "GitLab"),
("add_google_oidc_publisher", "Google"),
("add_activestate_oidc_publisher", "ActiveState"),
],
)
def test_add_oidc_publisher_ratelimited(
self, metrics, monkeypatch, view_name, publisher_name
):
project = pretend.stub(organization=None)
request = pretend.stub(
user=pretend.stub(),
registry=pretend.stub(settings={}),
find_service=lambda *a, **kw: metrics,
flags=pretend.stub(
disallow_oidc=pretend.call_recorder(lambda f=None: False)
),
_=lambda s: s,
POST=MultiDict(),
)
view = oidc_views.ManageOIDCPublisherViews(project, request)
monkeypatch.setattr(
view,
"_check_ratelimits",
pretend.call_recorder(
pretend.raiser(
TooManyOIDCRegistrations(
resets_in=pretend.stub(total_seconds=lambda: 60)
)
)
),
)
assert getattr(view, view_name)().__class__ == HTTPTooManyRequests
assert view.metrics.increment.calls == [
pretend.call(
"warehouse.oidc.add_publisher.attempt",
tags=[f"publisher:{publisher_name}"],
),
pretend.call(
"warehouse.oidc.add_publisher.ratelimited",
tags=[f"publisher:{publisher_name}"],
),
]
@pytest.mark.parametrize(
("view_name", "publisher_name"),
[
("add_github_oidc_publisher", "GitHub"),
("add_gitlab_oidc_publisher", "GitLab"),
("add_google_oidc_publisher", "Google"),
("add_activestate_oidc_publisher", "ActiveState"),
],
)
def test_add_oidc_publisher_admin_disabled(
self, monkeypatch, view_name, publisher_name
):
project = pretend.stub(organization=None)
request = pretend.stub(
user=pretend.stub(),
find_service=lambda *a, **kw: None,
flags=pretend.stub(
disallow_oidc=pretend.call_recorder(lambda f=None: True)
),
session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)),
_=lambda s: s,
POST=MultiDict(),
registry=pretend.stub(settings={}),
)
view = oidc_views.ManageOIDCPublisherViews(project, request)
default_response = {"_": pretend.stub()}
monkeypatch.setattr(
oidc_views.ManageOIDCPublisherViews, "default_response", default_response
)
assert getattr(view, view_name)() == default_response
assert request.session.flash.calls == [
pretend.call(
(
f"{publisher_name}-based trusted publishing is temporarily "
"disabled. See https://pypi.org/help#admin-intervention for "
"details."
),
queue="error",
)
]
@pytest.mark.parametrize(
("view_name", "publisher_name"),
[
("add_github_oidc_publisher", "GitHub"),
("add_gitlab_oidc_publisher", "GitLab"),
("add_google_oidc_publisher", "Google"),
("add_activestate_oidc_publisher", "ActiveState"),
],
)
def test_add_oidc_publisher_invalid_form(
self, metrics, monkeypatch, view_name, publisher_name
):
project = pretend.stub(organization=None)
request = pretend.stub(
user=pretend.stub(),
find_service=lambda *a, **kw: metrics,
flags=pretend.stub(
disallow_oidc=pretend.call_recorder(lambda f=None: False)
),
session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)),
_=lambda s: s,
POST=MultiDict(),
registry=pretend.stub(settings={}),
)
publisher_form_obj = pretend.stub(
validate=pretend.call_recorder(lambda: False),
)
publisher_form_cls = pretend.call_recorder(lambda *a, **kw: publisher_form_obj)
monkeypatch.setattr(oidc_views, "GitHubPublisherForm", publisher_form_cls)
monkeypatch.setattr(oidc_views, "GitLabPublisherForm", publisher_form_cls)
monkeypatch.setattr(oidc_views, "GooglePublisherForm", publisher_form_cls)
monkeypatch.setattr(oidc_views, "ActiveStatePublisherForm", publisher_form_cls)
view = oidc_views.ManageOIDCPublisherViews(project, request)
default_response = {
"github_publisher_form": publisher_form_obj,
"gitlab_publisher_form": publisher_form_obj,
"google_publisher_form": publisher_form_obj,
"activestate_publisher_form": publisher_form_obj,
}
monkeypatch.setattr(
oidc_views.ManageOIDCPublisherViews, "default_response", default_response
)
monkeypatch.setattr(
view, "_check_ratelimits", pretend.call_recorder(lambda: None)
)
monkeypatch.setattr(
view, "_hit_ratelimits", pretend.call_recorder(lambda: None)
)
assert getattr(view, view_name)() == default_response
assert view.metrics.increment.calls == [
pretend.call(
"warehouse.oidc.add_publisher.attempt",
tags=[f"publisher:{publisher_name}"],
),
]
assert view._hit_ratelimits.calls == [pretend.call()]
assert view._check_ratelimits.calls == [pretend.call()]
assert publisher_form_obj.validate.calls == [pretend.call()]
@pytest.mark.parametrize(
"publisher",
[
GitHubPublisher(
repository_name="some-repository",
repository_owner="some-owner",
repository_owner_id="666",
workflow_filename="some-workflow-filename.yml",
environment="some-environment",
),
GitLabPublisher(
project="some-repository",
namespace="some-owner",
workflow_filepath="subfolder/some-workflow-filename.yml",
environment="some-environment",
issuer_url="https://gitlab.com",
),
GooglePublisher(
email="some-email@example.com",
sub="some-sub",
),
ActiveStatePublisher(
organization="some-org",
activestate_project_name="some-project",
actor="some-user",
actor_id="some-user-id",
),
],
)
def test_delete_oidc_publisher_registered_to_multiple_projects(
self, monkeypatch, db_request, publisher
):
db_request.user = UserFactory.create()
EmailFactory(user=db_request.user, verified=True, primary=True)
db_request.db.add(publisher)
db_request.db.flush() # To get it in the DB
project = ProjectFactory.create(oidc_publishers=[publisher])
project.record_event = pretend.call_recorder(lambda *a, **kw: None)
RoleFactory.create(user=db_request.user, project=project, role_name="Owner")
another_project = ProjectFactory.create(oidc_publishers=[publisher])
db_request.flags = pretend.stub(
disallow_oidc=pretend.call_recorder(lambda f=None: False)
)
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.POST = MultiDict(
{
"publisher_id": str(publisher.id),
}
)
monkeypatch.setattr(
oidc_views,
"send_trusted_publisher_removed_email",
pretend.call_recorder(lambda *a, **kw: None),
)
view = oidc_views.ManageOIDCPublisherViews(project, db_request)
default_response = {"_": pretend.stub()}
monkeypatch.setattr(
oidc_views.ManageOIDCPublisherViews, "default_response", default_response
)
assert isinstance(view.delete_oidc_publisher(), HTTPSeeOther)
assert publisher not in project.oidc_publishers
assert view.metrics.increment.calls == [
pretend.call(
"warehouse.oidc.delete_publisher.attempt",
),
pretend.call(
"warehouse.oidc.delete_publisher.ok",
tags=[f"publisher:{publisher.publisher_name}"],
),
]
assert project.record_event.calls == [
pretend.call(
tag=EventTag.Project.OIDCPublisherRemoved,
request=db_request,
additional={
"publisher": publisher.publisher_name,
"id": str(publisher.id),
"specifier": str(publisher),
"url": publisher.publisher_url(),
"submitted_by": db_request.user.username,
},
)
]
assert db_request.flags.disallow_oidc.calls == [pretend.call()]
assert db_request.session.flash.calls == [
pretend.call(
f"Removed trusted publisher for project {project.name!r}",
queue="success",
)
]
# The publisher is not actually removed entirely from the DB, since it's
# registered to other projects that haven't removed it.
assert db_request.db.query(OIDCPublisher).one() == publisher
assert another_project.oidc_publishers == [publisher]
assert oidc_views.send_trusted_publisher_removed_email.calls == [
pretend.call(
db_request,
db_request.user,
project_name=project.name,
publisher=publisher,
)
]
@pytest.mark.parametrize(
"publisher",
[
GitHubPublisher(
repository_name="some-repository",
repository_owner="some-owner",
repository_owner_id="666",
workflow_filename="some-workflow-filename.yml",
environment="some-environment",
),
GitLabPublisher(
project="some-repository",
namespace="some-owner",
workflow_filepath="subfolder/some-workflow-filename.yml",
environment="some-environment",
issuer_url="https://gitlab.com",
),
GooglePublisher(
email="some-email@example.com",
sub="some-sub",
),
ActiveStatePublisher(
organization="some-org",
activestate_project_name="some-project",
actor="some-user",
actor_id="some-user-id",
),
],
)
def test_delete_oidc_publisher_entirely(self, monkeypatch, db_request, publisher):
db_request.user = UserFactory.create()
EmailFactory(user=db_request.user, verified=True, primary=True)
db_request.db.add(publisher)
db_request.db.flush() # To get it in the DB
project = ProjectFactory.create(oidc_publishers=[publisher])
RoleFactory.create(user=db_request.user, project=project, role_name="Owner")
db_request.flags = pretend.stub(
disallow_oidc=pretend.call_recorder(lambda f=None: False)
)
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.POST = MultiDict(
{
"publisher_id": str(publisher.id),
}
)
monkeypatch.setattr(
oidc_views,
"send_trusted_publisher_removed_email",
pretend.call_recorder(lambda *a, **kw: None),
)
view = oidc_views.ManageOIDCPublisherViews(project, db_request)
default_response = {"_": pretend.stub()}
monkeypatch.setattr(
oidc_views.ManageOIDCPublisherViews, "default_response", default_response
)
assert isinstance(view.delete_oidc_publisher(), HTTPSeeOther)
assert publisher not in project.oidc_publishers
assert view.metrics.increment.calls == [
pretend.call(
"warehouse.oidc.delete_publisher.attempt",
),
pretend.call(
"warehouse.oidc.delete_publisher.ok",
tags=[f"publisher:{publisher.publisher_name}"],
),
]
events = project.events.all()
assert len(events) == 1
event = events[0]
assert event.tag == EventTag.Project.OIDCPublisherRemoved
assert str(event.ip_address) == db_request.remote_addr
assert event.additional == {
"publisher": publisher.publisher_name,
"id": str(publisher.id),
"specifier": str(publisher),
"url": publisher.publisher_url(),
"submitted_by": db_request.user.username,
}
assert db_request.flags.disallow_oidc.calls == [pretend.call()]
assert db_request.session.flash.calls == [
pretend.call(
f"Removed trusted publisher for project {project.name!r}",
queue="success",
)
]
# The publisher is actually removed entirely from the DB.
assert db_request.db.query(OIDCPublisher).all() == []
assert oidc_views.send_trusted_publisher_removed_email.calls == [
pretend.call(
db_request,
db_request.user,
project_name=project.name,
publisher=publisher,
)
]
def test_delete_oidc_publisher_invalid_form(self, metrics, monkeypatch):
publisher = pretend.stub()
project = pretend.stub(oidc_publishers=[publisher], organization=None)
request = pretend.stub(
user=pretend.stub(),
find_service=lambda *a, **kw: metrics,
flags=pretend.stub(
disallow_oidc=pretend.call_recorder(lambda f=None: False)
),
POST=MultiDict(),
registry=pretend.stub(settings={}),
)
delete_publisher_form_obj = pretend.stub(
validate=pretend.call_recorder(lambda: False),
)
delete_publisher_form_cls = pretend.call_recorder(
lambda *a, **kw: delete_publisher_form_obj
)
monkeypatch.setattr(
oidc_views, "DeletePublisherForm", delete_publisher_form_cls
)
view = oidc_views.ManageOIDCPublisherViews(project, request)
default_response = {"_": pretend.stub()}
monkeypatch.setattr(
oidc_views.ManageOIDCPublisherViews, "default_response", default_response
)
assert view.delete_oidc_publisher() == default_response
assert len(project.oidc_publishers) == 1
assert view.metrics.increment.calls == [
pretend.call(
"warehouse.oidc.delete_publisher.attempt",
),
]
assert delete_publisher_form_cls.calls == [pretend.call(request.POST)]
assert delete_publisher_form_obj.validate.calls == [pretend.call()]
@pytest.mark.parametrize(
"other_publisher", [None, pretend.stub(id="different-fakeid")]
)
def test_delete_oidc_publisher_not_found(
self, metrics, monkeypatch, other_publisher
):
publisher = pretend.stub(
publisher_name="fakepublisher",
id="fakeid",
)
# NOTE: Can't set __str__ using pretend.stub()
monkeypatch.setattr(publisher.__class__, "__str__", lambda s: "fakespecifier")
project = pretend.stub(
oidc_publishers=[publisher],
organization=None,
name="fakeproject",
record_event=pretend.call_recorder(lambda *a, **kw: None),
)
request = pretend.stub(
user=pretend.stub(),
find_service=lambda *a, **kw: metrics,
flags=pretend.stub(
disallow_oidc=pretend.call_recorder(lambda f=None: False)
),
session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)),
POST=MultiDict(),
registry=pretend.stub(settings={}),
db=pretend.stub(
get=pretend.call_recorder(lambda *a, **kw: other_publisher),
),
remote_addr="0.0.0.0",
)
delete_publisher_form_obj = pretend.stub(
validate=pretend.call_recorder(lambda: True),
publisher_id=pretend.stub(data="different-fakeid"),
)
delete_publisher_form_cls = pretend.call_recorder(
lambda *a, **kw: delete_publisher_form_obj
)
monkeypatch.setattr(
oidc_views, "DeletePublisherForm", delete_publisher_form_cls
)
view = oidc_views.ManageOIDCPublisherViews(project, request)
default_response = {"_": pretend.stub()}
monkeypatch.setattr(
oidc_views.ManageOIDCPublisherViews, "default_response", default_response
)
assert view.delete_oidc_publisher() == default_response
assert publisher in project.oidc_publishers # not deleted
assert other_publisher not in project.oidc_publishers
assert view.metrics.increment.calls == [
pretend.call(
"warehouse.oidc.delete_publisher.attempt",
),
]
assert project.record_event.calls == []
assert request.session.flash.calls == [
pretend.call("Invalid publisher for project", queue="error")
]
assert delete_publisher_form_cls.calls == [pretend.call(request.POST)]
assert delete_publisher_form_obj.validate.calls == [pretend.call()]
def test_delete_oidc_publisher_admin_disabled(self, monkeypatch):
project = pretend.stub(organization=None)
request = pretend.stub(
user=pretend.stub(),
find_service=lambda *a, **kw: None,
flags=pretend.stub(
disallow_oidc=pretend.call_recorder(lambda f=None: True)
),
session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)),
POST=MultiDict(),
registry=pretend.stub(settings={}),
)
view = oidc_views.ManageOIDCPublisherViews(project, request)
default_response = {"_": pretend.stub()}
monkeypatch.setattr(
oidc_views.ManageOIDCPublisherViews, "default_response", default_response
)
assert view.delete_oidc_publisher() == default_response
assert request.session.flash.calls == [
pretend.call(
(
"Trusted publishing is temporarily disabled. "
"See https://pypi.org/help#admin-intervention for details."
),
queue="error",
)
]
| TestManageOIDCPublisherViews |
python | weaviate__weaviate-python-client | integration/test_batch_v4.py | {
"start": 1677,
"end": 27828
} | class ____(Protocol):
"""Typing for fixture."""
def __call__(
self, name: str = "", ports: Tuple[int, int] = (8080, 50051), multi_tenant: bool = False
) -> Tuple[weaviate.WeaviateClient, str]:
"""Typing for fixture."""
...
@pytest.fixture
def client_factory(
request: SubRequest,
) -> Generator[
Callable[[str, Tuple[int, int], bool], Tuple[weaviate.WeaviateClient, str]], None, None
]:
name_fixtures: List[str] = []
client_fixture: Optional[weaviate.WeaviateClient] = None
def _factory(
name: str = "", ports: Tuple[int, int] = (8080, 50051), multi_tenant: bool = False
) -> Tuple[weaviate.WeaviateClient, str]:
nonlocal client_fixture, name_fixtures # noqa: F824
name_fixture = _sanitize_collection_name(request.node.name) + name
name_fixtures.append(name_fixture)
if client_fixture is None:
client_fixture = weaviate.connect_to_local(grpc_port=ports[1], port=ports[0])
if client_fixture.collections.exists(name_fixture):
client_fixture.collections.delete(name_fixture)
client_fixture.collections.create(
name=name_fixture,
properties=[
Property(name="name", data_type=DataType.TEXT),
Property(name="age", data_type=DataType.INT),
],
references=[ReferenceProperty(name="test", target_collection=name_fixture)],
multi_tenancy_config=Configure.multi_tenancy(multi_tenant),
vectorizer_config=Configure.Vectorizer.none(),
)
return client_fixture, name_fixture
try:
yield _factory
finally:
if client_fixture is not None and name_fixtures is not None:
for name_fixture in name_fixtures:
client_fixture.collections.delete(name_fixture)
if client_fixture is not None:
client_fixture.close()
def test_add_objects_in_multiple_batches(client_factory: ClientFactory) -> None:
client, name = client_factory()
with client.batch.rate_limit(50) as batch:
batch.add_object(collection=name, properties={})
with client.batch.dynamic() as batch:
batch.add_object(collection=name, properties={})
with client.batch.dynamic() as batch:
batch.add_object(collection=name, properties={})
objs = client.collections.use(name).query.fetch_objects().objects
assert len(objs) == 3
def test_flushing(client_factory: ClientFactory) -> None:
"""Test that batch is working normally after flushing."""
client, name = client_factory()
with client.batch.dynamic() as batch:
batch.add_object(collection=name, properties={})
batch.flush()
objs = client.collections.use(name).query.fetch_objects().objects
assert len(objs) == 1
batch.add_object(collection=name, properties={})
batch.add_object(collection=name, properties={})
objs = client.collections.use(name).query.fetch_objects().objects
assert len(objs) == 3
@pytest.mark.parametrize(
"vector",
[
None,
[1, 2, 3],
MockNumpyTorch([1, 2, 3]),
MockTensorFlow([1, 2, 3]),
MockDFSeries([1, 2, 3]),
],
)
@pytest.mark.parametrize("uid", [None, UUID1, str(UUID2), UUID3.hex])
def test_add_object(
client_factory: ClientFactory,
uid: Optional[UUID],
vector: Optional[VECTORS],
) -> None:
client, name = client_factory()
with client.batch.fixed_size() as batch:
batch.add_object(collection=name, properties={}, uuid=uid, vector=vector)
objs = client.collections.use(name).query.fetch_objects().objects
assert len(objs) == 1
@pytest.mark.parametrize("from_object_uuid", [UUID1, str(UUID2), UUID3.hex])
@pytest.mark.parametrize("to_object_uuid", [UUID4.hex, UUID5, str(UUID6)])
@pytest.mark.parametrize("to_object_collection", [False, True])
def test_add_reference(
client_factory: ClientFactory,
from_object_uuid: UUID,
to_object_uuid: UUID,
to_object_collection: bool,
) -> None:
"""Test the `add_reference` method."""
client, name = client_factory()
with client.batch.dynamic() as batch:
batch.add_object(
properties={},
collection=name,
uuid=from_object_uuid,
)
batch.add_object(
properties={},
collection=name,
uuid=to_object_uuid,
)
batch.add_reference(
from_uuid=from_object_uuid,
from_collection=name,
from_property="test",
to=ReferenceToMulti(target_collection=name, uuids=to_object_uuid)
if to_object_collection
else to_object_uuid,
)
assert len(client.batch.failed_objects) == 0, [
obj.message for obj in client.batch.failed_objects
]
assert len(client.batch.failed_references) == 0, [
ref.message for ref in client.batch.failed_references
]
objs = (
client.collections.use(name)
.query.fetch_objects(return_references=QueryReference(link_on="test"))
.objects
)
obj = client.collections.use(name).query.fetch_object_by_id(
from_object_uuid, return_references=QueryReference(link_on="test")
)
assert len(objs) == 2
assert isinstance(obj.references["test"], _CrossReference)
def test_add_data_object_and_get_class_shards_readiness(
client_factory: ClientFactory, request: SubRequest
) -> None:
client, name = client_factory()
with client.batch.fixed_size() as batch:
batch.add_object(properties={}, collection=request.node.name)
statuses = client.batch._get_shards_readiness(Shard(collection=name))
assert len(statuses) == 1
assert statuses[0]
def test_add_data_object_with_tenant_and_get_class_shards_readiness(
client_factory: ClientFactory,
) -> None:
"""Test the `add_data_object` method."""
client, name = client_factory(multi_tenant=True)
client.collections.use(name).tenants.create([Tenant(name="tenant1"), Tenant(name="tenant2")])
with client.batch.fixed_size() as batch:
batch.add_object(properties={}, collection=name, tenant="tenant1")
statuses = client.batch._get_shards_readiness(Shard(collection=name, tenant="tenant1"))
assert len(statuses) == 1
assert statuses[0]
def test_add_object_batch_with_tenant(client_factory: ClientFactory, request: SubRequest) -> None:
# create two classes and add 5 tenants each
tenants = [Tenant(name="tenant" + str(i)) for i in range(5)]
client, name1 = client_factory(request.node.name + "1", multi_tenant=True)
_, name2 = client_factory(
request.node.name + "2", multi_tenant=True
) # to enable automatic cleanup
client.collections.use(name1).tenants.create(tenants)
client.collections.use(name2).tenants.create(tenants)
nr_objects = 100
objects = []
with client.batch.dynamic() as batch:
for i in range(nr_objects):
obj_uuid = uuid.uuid4()
objects.append((obj_uuid, name1 if i % 2 else name2, "tenant" + str(i % 5)))
batch.add_object(
collection=name1 if i % 2 else name2,
tenant="tenant" + str(i % 5),
properties={"name": "tenant" + str(i % 5)},
uuid=obj_uuid,
)
for obj in objects:
retObj = client.collections.use(obj[1]).with_tenant(obj[2]).query.fetch_object_by_id(obj[0])
assert retObj.properties["name"] == obj[2]
def _from_uuid_to_uuid(uuid: uuid.UUID) -> uuid.UUID:
return uuid
def _from_uuid_to_str(uuid: uuid.UUID) -> str:
return str(uuid)
def _from_uuid_to_uuid_list(uuid: uuid.UUID) -> List[uuid.UUID]:
return [uuid]
def _from_uuid_to_str_list(uuid: uuid.UUID) -> List[str]:
return [str(uuid)]
@pytest.mark.parametrize(
"to_ref",
[
_from_uuid_to_uuid,
_from_uuid_to_str,
_from_uuid_to_uuid_list,
_from_uuid_to_str_list,
],
)
def test_add_ref_batch(client_factory: ClientFactory, to_ref: Callable) -> None:
client, name = client_factory()
nr_objects = 100
objects_class0 = []
with client.batch.dynamic() as batch:
for _ in range(nr_objects):
obj_uuid0 = uuid.uuid4()
objects_class0.append(obj_uuid0)
batch.add_object(collection=name, uuid=obj_uuid0)
batch.add_reference(
from_property="test",
from_collection=name,
from_uuid=obj_uuid0,
to=to_ref(obj_uuid0),
)
collection = client.collections.use(name)
for obj in objects_class0:
ret_obj = collection.query.fetch_object_by_id(
obj,
return_references=QueryReference(link_on="test"),
)
assert ret_obj is not None
assert ret_obj.references["test"].objects[0].uuid == obj
def test_add_ref_batch_with_tenant(client_factory: ClientFactory) -> None:
client, name = client_factory(multi_tenant=True)
client.collections.use(name).tenants.create([Tenant(name="tenant" + str(i)) for i in range(5)])
nr_objects = 100
objects_class0 = []
with client.batch.dynamic() as batch:
for i in range(nr_objects):
tenant = "tenant" + str(i % 5)
obj_uuid0 = uuid.uuid4()
objects_class0.append((obj_uuid0, tenant))
batch.add_object(
collection=name, tenant=tenant, properties={"name": tenant}, uuid=obj_uuid0
)
# add refs between all tenants
batch.add_reference(
from_property="test",
from_collection=name,
from_uuid=obj_uuid0,
to=ReferenceToMulti(
uuids=obj_uuid0, target_collection=name
), # workaround for autodetection with tenant
tenant=tenant,
)
for obj in objects_class0:
ret_obj = (
client.collections.use(name)
.with_tenant(obj[1])
.query.fetch_object_by_id(
obj[0],
return_properties="name",
return_references=QueryReference(link_on="test"),
)
)
assert ret_obj is not None
assert ret_obj.properties["name"] == obj[1]
assert ret_obj.references["test"].objects[0].uuid == obj[0]
@pytest.mark.parametrize(
"batching_method",
[
# lambda client: client.batch.dynamic(),
# lambda client: client.batch.fixed_size(),
# lambda client: client.batch.rate_limit(9999),
lambda client: client.batch.experimental(concurrency=1),
],
ids=[
# "test_add_ten_thousand_data_objects_dynamic",
# "test_add_ten_thousand_data_objects_fixed_size",
# "test_add_ten_thousand_data_objects_rate_limit",
"test_add_ten_thousand_data_objects_experimental",
],
)
def test_add_ten_thousand_data_objects(
client_factory: ClientFactory,
batching_method: Callable[[weaviate.WeaviateClient], ClientBatchingContextManager],
request: SubRequest,
) -> None:
"""Test adding ten thousand data objects."""
client, name = client_factory()
if (
request.node.callspec.id == "test_add_ten_thousand_data_objects_experimental"
and client._connection._weaviate_version.is_lower_than(1, 34, 0)
):
pytest.skip("Server-side batching not supported in Weaviate < 1.34.0")
nr_objects = 100000
import time
start = time.time()
with batching_method(client) as batch:
for i in range(nr_objects):
batch.add_object(
collection=name,
properties={"name": "test" + str(i)},
)
end = time.time()
print(f"Time taken to add {nr_objects} objects: {end - start} seconds")
assert len(client.batch.results.objs.errors) == 0
assert len(client.batch.results.objs.all_responses) == nr_objects
assert len(client.batch.results.objs.uuids) == nr_objects
assert len(client.collections.use(name)) == nr_objects
assert client.batch.results.objs.has_errors is False
assert len(client.batch.failed_objects) == 0, [
obj.message for obj in client.batch.failed_objects
]
client.collections.delete(name)
def make_refs(uuids: List[UUID], name: str) -> List[dict]:
refs = []
for from_ in uuids:
tos = uuids.copy()
tos.remove(from_)
for to in tos:
refs.append(
{
"from_uuid": from_,
"from_collection": name,
"from_property": "test",
"to": to,
}
)
return refs
def test_add_one_hundred_objects_and_references_between_all(
client_factory: ClientFactory,
) -> None:
"""Test adding one hundred objects and references between all of them."""
client, name = client_factory()
nr_objects = 100
uuids: List[UUID] = []
with client.batch.dynamic() as batch:
for i in range(nr_objects):
uuid_ = batch.add_object(
collection=name,
properties={"name": "test" + str(i)},
)
uuids.append(uuid_)
for ref in make_refs(uuids, name):
batch.add_reference(**ref)
objs = (
client.collections.use(name)
.query.fetch_objects(limit=nr_objects, return_references=QueryReference(link_on="test"))
.objects
)
assert len(objs) == nr_objects
for obj in objs:
assert len(obj.references["test"].objects) == nr_objects - 1
client.collections.delete(name)
def test_add_1000_objects_with_async_indexing_and_wait(
client_factory: ClientFactory, request: SubRequest
) -> None:
client, name = client_factory(ports=(8090, 50061))
nr_objects = 1000
with client.batch.dynamic() as batch:
for i in range(nr_objects):
batch.add_object(
collection=name,
properties={"name": "text" + str(i)},
vector=[float((j + i) % nr_objects) / nr_objects for j in range(nr_objects)],
)
assert len(client.batch.failed_objects) == 0
client.batch.wait_for_vector_indexing()
ret = client.collections.use(name).aggregate.over_all(total_count=True)
assert ret.total_count == nr_objects
shards = client.collections.use(name).config.get_shards()
assert shards[0].status == "READY"
assert shards[0].vector_queue_size == 0
@pytest.mark.skip("Difficult to find numbers that work reliably in the CI")
def test_add_10000_objects_with_async_indexing_and_dont_wait(
client_factory: ClientFactory, request: SubRequest
) -> None:
old_client = weaviate.Client("http://localhost:8090")
client, name = client_factory(ports=(8090, 50061))
nr_objects = 10000
vec_length = 1000
with client.batch.fixed_size(batch_size=1000, concurrent_requests=1) as batch:
for i in range(nr_objects):
batch.add_object(
collection=name,
properties={"name": "text" + str(i)},
vector=[float((j + i) % nr_objects) / nr_objects for j in range(vec_length)],
)
shard_status = old_client.schema.get_class_shards(name)
assert shard_status[0]["status"] == "INDEXING"
assert shard_status[0]["vectorQueueSize"] > 0
assert len(client.batch.failed_objects) == 0
ret = client.collections.use(name).aggregate.over_all(total_count=True)
assert ret.total_count == nr_objects
def test_add_1000_tenant_objects_with_async_indexing_and_wait_for_all(
client_factory: ClientFactory, request: SubRequest
) -> None:
client, name = client_factory(ports=(8090, 50061), multi_tenant=True)
tenants = [Tenant(name="tenant" + str(i)) for i in range(2)]
collection = client.collections.use(name)
collection.tenants.create(tenants)
nr_objects = 2000
with client.batch.dynamic() as batch:
for i in range(nr_objects):
batch.add_object(
collection=name,
properties={"name": "text" + str(i)},
vector=[float((j + i) % nr_objects) / nr_objects for j in range(nr_objects)],
tenant=tenants[i % len(tenants)].name,
)
assert len(client.batch.failed_objects) == 0
client.batch.wait_for_vector_indexing()
for tenant in tenants:
ret = collection.with_tenant(tenant.name).aggregate.over_all(total_count=True)
assert ret.total_count == nr_objects / len(tenants)
shards = client.collections.use(name).config.get_shards()
for shard in shards:
assert shard.status == "READY"
assert shard.vector_queue_size == 0
@pytest.mark.skip("Difficult to find numbers that work reliably in the CI")
def test_add_1000_tenant_objects_with_async_indexing_and_wait_for_only_one(
client_factory: ClientFactory,
) -> None:
client, name = client_factory(ports=(8090, 50061), multi_tenant=True)
tenants = [Tenant(name="tenant" + str(i)) for i in range(2)]
collection = client.collections.use(name)
collection.tenants.create(tenants)
nr_objects = 1001
with client.batch.dynamic() as batch:
for i in range(nr_objects):
batch.add_object(
collection=name,
properties={"name": "text" + str(i)},
vector=[float((j + i) % nr_objects) / nr_objects for j in range(nr_objects)],
tenant=tenants[0].name if i < 1000 else tenants[1].name,
)
assert len(client.batch.failed_objects) == 0
assert len(client.batch.results.objs.all_responses) == 1001
client.batch.wait_for_vector_indexing(shards=[Shard(collection=name, tenant=tenants[0].name)])
for tenant in tenants:
ret = collection.with_tenant(tenant.name).aggregate.over_all(total_count=True)
assert ret.total_count == 1000 if tenant.name == tenants[0].name else 1
shards = client.collections.use(name).config.get_shards()
for shard in shards:
if shard.name == tenants[0].name:
assert shard.status == "READY"
assert shard.vector_queue_size == 0
else:
assert shard.status == "INDEXING"
assert shard.vector_queue_size > 0
@pytest.mark.parametrize(
"batching_method",
[
lambda client: client.batch.dynamic(),
lambda client: client.batch.fixed_size(),
lambda client: client.batch.rate_limit(1000),
lambda client: client.batch.experimental(),
],
ids=[
"test_add_one_hundred_objects_and_references_between_all_dynamic",
"test_add_one_hundred_objects_and_references_between_all_fixed_size",
"test_add_one_hundred_objects_and_references_between_all_rate_limit",
"test_add_one_hundred_objects_and_references_between_all_experimental",
],
)
def test_add_one_object_and_a_self_reference(
client_factory: ClientFactory,
batching_method: Callable[[weaviate.WeaviateClient], ClientBatchingContextManager],
request: SubRequest,
) -> None:
"""Test adding one object and a self reference."""
client, name = client_factory()
if (
request.node.callspec.id
== "test_add_one_hundred_objects_and_references_between_all_experimental"
and client._connection._weaviate_version.is_lower_than(1, 34, 0)
):
pytest.skip("Server-side batching not supported in Weaviate < 1.34.0")
with batching_method(client) as batch:
uuid = batch.add_object(collection=name, properties={})
batch.add_reference(
from_uuid=uuid,
from_collection=name,
from_property="test",
to=uuid,
)
obj = client.collections.use(name).query.fetch_object_by_id(
uuid, return_references=QueryReference(link_on="test")
)
assert obj is not None
assert obj.references["test"].objects[0].uuid == uuid
def test_multi_threaded_batching(
client_factory: ClientFactory,
) -> None:
client, name = client_factory()
nr_objects = 1000
nr_threads = 10
def batch_insert(batch) -> None:
for i in range(nr_objects):
batch.add_object(
collection=name,
properties={"name": "test" + str(i)},
)
with concurrent.futures.ThreadPoolExecutor() as executor:
with client.batch.dynamic() as batch:
futures = [executor.submit(batch_insert, batch) for _ in range(nr_threads)]
for future in concurrent.futures.as_completed(futures):
future.result()
objs = client.collections.use(name).query.fetch_objects(limit=nr_objects * nr_threads).objects
assert len(objs) == nr_objects * nr_threads
def test_error_reset(client_factory: ClientFactory) -> None:
client, name = client_factory()
with client.batch.dynamic() as batch:
batch.add_object(properties={"name": 1}, collection=name)
batch.add_object(properties={"name": "correct"}, collection=name)
errs = client.batch.failed_objects
assert len(errs) == 1
assert errs[0].object_.properties is not None
assert errs[0].object_.properties["name"] == 1
with client.batch.dynamic() as batch:
batch.add_object(properties={"name": 2}, collection=name)
batch.add_object(properties={"name": "correct"}, collection=name)
errs2 = client.batch.failed_objects
assert len(errs2) == 1
assert errs2[0].object_.properties is not None
assert errs2[0].object_.properties["name"] == 2
# err still contains original errors
assert len(errs) == 1
assert errs[0].object_.properties is not None
assert errs[0].object_.properties["name"] == 1
def test_non_existant_collection(client_factory: ClientFactory) -> None:
client, _ = client_factory()
with client.batch.dynamic() as batch:
batch.add_object(properties={"name": 2}, collection="DoesNotExist")
# above should not throw - depending on the autoschema config this might create an error or
# not, so we do not check for errors here
def test_number_of_stored_results_in_batch(client_factory: ClientFactory) -> None:
client, name = client_factory()
with client.batch.dynamic() as batch:
for i in range(99999):
batch.add_object(properties={"name": str(i)}, collection=name)
assert len(client.batch.results.objs.all_responses) == 99999
assert len(client.batch.results.objs.errors) == 0
assert len(client.batch.results.objs.uuids) == 99999
assert sorted(client.batch.results.objs.uuids.keys()) == list(range(99999))
with client.batch.dynamic() as batch:
for i in range(100001):
batch.add_object(properties={"name": str(i)}, collection=name)
assert len(client.batch.results.objs.all_responses) == 100000
assert len(client.batch.results.objs.errors) == 0
assert len(client.batch.results.objs.uuids) == 100000
assert sorted(client.batch.results.objs.uuids.keys()) == list(range(1, 100001))
# depending on timings in the event loop, some batches may end before others
# as such the keys of the uuids dict may not be in order but they are still unique
# and correspond to the original indices within the batch
def test_uuids_keys_and_original_index(client_factory: ClientFactory) -> None:
client, name = client_factory()
objs = [(uuid.uuid4(), {"name": str(i)}) for i in range(100)]
with client.batch.dynamic() as batch:
for obj in objs:
batch.add_object(uuid=obj[0], properties=obj[1], collection=name)
assert len(client.batch.results.objs.all_responses) == 100
assert len(client.batch.results.objs.errors) == 0
assert len(client.batch.results.objs.uuids) == 100
assert [objs[k][0] for k in client.batch.results.objs.uuids.keys()] == list(
client.batch.results.objs.uuids.values()
)
def test_batching_error_logs(
client_factory: ClientFactory, caplog: pytest.LogCaptureFixture
) -> None:
client, name = client_factory()
if client._connection._weaviate_version.is_at_least(
1, 32, 1
): # TODO: change to 1.33.0 when released
pytest.skip(
"Batching error logs do not get emitted by the new server-side batching functionality."
)
with client.batch.fixed_size() as batch:
for obj in [{"name": i} for i in range(100)]:
batch.add_object(properties=obj, collection=name)
assert (
("Failed to send" in caplog.text)
and ("objects in a batch of" in caplog.text)
and (
"Please inspect client.batch.failed_objects or collection.batch.failed_objects for the failed objects."
in caplog.text
)
) # number of objects sent per batch is not fixed for less than 100 objects
def test_references_with_to_uuids(client_factory: ClientFactory) -> None:
"""Test that batch waits until the to object is created."""
client, _ = client_factory()
client.collections.delete(["target", "source"])
target = client.collections.create(
"target", multi_tenancy_config=wvc.config.Configure.multi_tenancy(enabled=True)
)
source = client.collections.create(
"source",
references=[wvc.config.ReferenceProperty(name="to", target_collection="target")],
multi_tenancy_config=wvc.config.Configure.multi_tenancy(enabled=True),
)
target.tenants.create("tenant-1")
source.tenants.create("tenant-1")
from_uuid = source.with_tenant("tenant-1").data.insert(properties={})
objs = 20
with client.batch.fixed_size(batch_size=10, concurrent_requests=1) as batch:
for _ in range(objs):
to = batch.add_object(collection="target", properties={}, tenant="tenant-1")
batch.add_reference(
from_uuid=from_uuid,
from_property="to",
to=to,
from_collection="source",
tenant="tenant-1",
)
assert len(client.batch.failed_references) == 0, client.batch.failed_references
client.collections.delete(["target", "source"])
| ClientFactory |
python | getsentry__sentry | tests/acceptance/test_organization_dashboards.py | {
"start": 26995,
"end": 29720
} | class ____(AcceptanceTestCase):
def setUp(self) -> None:
super().setUp()
self.team = self.create_team(organization=self.organization, name="Mariachi Band")
self.project = self.create_project(
organization=self.organization, teams=[self.team], name="Bengal"
)
self.dashboard = Dashboard.objects.create(
title="Dashboard 1", created_by_id=self.user.id, organization=self.organization
)
self.widget_1 = DashboardWidget.objects.create(
dashboard=self.dashboard,
title="Widget 1",
display_type=DashboardWidgetDisplayTypes.LINE_CHART,
widget_type=DashboardWidgetTypes.ERROR_EVENTS,
interval="1d",
)
self.widget_2 = DashboardWidget.objects.create(
dashboard=self.dashboard,
title="Widget 2",
display_type=DashboardWidgetDisplayTypes.TABLE,
widget_type=DashboardWidgetTypes.ERROR_EVENTS,
interval="1d",
)
self.login_as(self.user)
self.default_path = f"/organizations/{self.organization.slug}/dashboards/"
def wait_until_loaded(self) -> None:
self.browser.wait_until_not('[data-test-id="loading-indicator"]')
self.browser.wait_until_not('[data-test-id="loading-placeholder"]')
def test_dashboard_manager(self) -> None:
with self.feature(FEATURE_NAMES + EDIT_FEATURE):
self.browser.get(self.default_path)
self.wait_until_loaded()
def test_dashboard_manager_with_unset_layouts_and_defined_layouts(self) -> None:
dashboard_with_layouts = Dashboard.objects.create(
title="Dashboard with some defined layouts",
created_by_id=self.user.id,
organization=self.organization,
)
DashboardWidget.objects.create(
dashboard=dashboard_with_layouts,
title="Widget 1",
display_type=DashboardWidgetDisplayTypes.BAR_CHART,
widget_type=DashboardWidgetTypes.ERROR_EVENTS,
interval="1d",
detail={"layout": {"x": 1, "y": 0, "w": 3, "h": 3, "minH": 2}},
)
# This widget has no layout, but should position itself at
# x: 4, y: 0, w: 2, h: 2
DashboardWidget.objects.create(
dashboard=dashboard_with_layouts,
title="Widget 2",
display_type=DashboardWidgetDisplayTypes.TABLE,
widget_type=DashboardWidgetTypes.ERROR_EVENTS,
interval="1d",
)
with self.feature(FEATURE_NAMES + EDIT_FEATURE):
self.browser.get(self.default_path)
self.wait_until_loaded()
| OrganizationDashboardsManageAcceptanceTest |
python | pyca__cryptography | tests/hazmat/primitives/decrepit/test_algorithms.py | {
"start": 6169,
"end": 6641
} | class ____:
test_cbc = generate_encrypt_test(
load_nist_vectors,
os.path.join("ciphers", "CAST5"),
["cast5-cbc.txt"],
lambda key, **kwargs: CAST5(binascii.unhexlify(key)),
lambda iv, **kwargs: modes.CBC(binascii.unhexlify(iv)),
)
@pytest.mark.supported(
only_if=lambda backend: backend.cipher_supported(
CAST5(b"\x00" * 16), OFB(b"\x00" * 8)
),
skip_message="Does not support CAST5 OFB",
)
| TestCAST5ModeCBC |
python | pallets__werkzeug | src/werkzeug/exceptions.py | {
"start": 15457,
"end": 15778
} | class ____(HTTPException):
"""*415* `Unsupported Media Type`
The status code returned if the server is unable to handle the media type
the client transmitted.
"""
code = 415
description = (
"The server does not support the media type transmitted in the request."
)
| UnsupportedMediaType |
python | getsentry__sentry | tests/sentry/tasks/test_update_code_owners_schema.py | {
"start": 407,
"end": 2322
} | class ____(TestCase):
def setUp(self) -> None:
self.organization = self.create_organization()
self.project = self.create_project(organization=self.organization)
self.project_codeowner = self.create_codeowners(project=self.project)
with assume_test_silo_mode(SiloMode.CONTROL):
self.integration = Integration.objects.get()
@pytest.fixture(autouse=True)
def patch_update_schema(self) -> Generator[None]:
with mock.patch.object(ProjectCodeOwners, "update_schema") as self.mock_update:
yield
def test_no_op(self) -> None:
with self.feature("organizations:integrations-codeowners"):
update_code_owners_schema(self.organization.id)
self.mock_update.assert_not_called()
def test_with_project(self) -> None:
with self.feature("organizations:integrations-codeowners"):
update_code_owners_schema(self.organization.id, projects=[self.project.id])
self.mock_update.assert_called_with(organization=self.organization)
def test_with_project_id(self) -> None:
with self.feature("organizations:integrations-codeowners"):
update_code_owners_schema(self.organization.id, projects=[self.project.id])
self.mock_update.assert_called_with(organization=self.organization)
def test_with_integration(self) -> None:
with self.feature("organizations:integrations-codeowners"):
update_code_owners_schema(self.organization.id, integration=self.integration.id)
self.mock_update.assert_called_with(organization=self.organization)
def test_with_integration_id(self) -> None:
with self.feature("organizations:integrations-codeowners"):
update_code_owners_schema(self.organization.id, integration=self.integration.id)
self.mock_update.assert_called_with(organization=self.organization)
| UpdateCodeOwnersSchemaTest |
python | facebookresearch__faiss | tests/test_io.py | {
"start": 14060,
"end": 15809
} | class ____(unittest.TestCase):
@unittest.skipIf(
platform.system() not in ["Windows", "Linux"],
"supported OSes only"
)
def test_mmap(self):
xt, xb, xq = get_dataset_2(32, 0, 100, 50)
index = faiss.index_factory(32, "SQfp16", faiss.METRIC_L2)
# does not need training
index.add(xb)
Dref, Iref = index.search(xq, 10)
fd, fname = tempfile.mkstemp()
os.close(fd)
index2 = None
try:
faiss.write_index(index, fname)
index2 = faiss.read_index(fname, faiss.IO_FLAG_MMAP_IFC)
Dnew, Inew = index2.search(xq, 10)
np.testing.assert_array_equal(Iref, Inew)
np.testing.assert_array_equal(Dref, Dnew)
finally:
del index2
if os.path.exists(fname):
# skip the error. On Windows, index2 holds the handle file,
# so it cannot be ensured that the file can be deleted
# unless index2 is collected by a GC
try:
os.unlink(fname)
except:
pass
def test_zerocopy(self):
xt, xb, xq = get_dataset_2(32, 0, 100, 50)
index = faiss.index_factory(32, "SQfp16", faiss.METRIC_L2)
# does not need training
index.add(xb)
Dref, Iref = index.search(xq, 10)
serialized_index = faiss.serialize_index(index)
reader = faiss.ZeroCopyIOReader(
faiss.swig_ptr(serialized_index), serialized_index.size)
index2 = faiss.read_index(reader)
Dnew, Inew = index2.search(xq, 10)
np.testing.assert_array_equal(Iref, Inew)
np.testing.assert_array_equal(Dref, Dnew)
| TestIOFlatMMap |
python | django__django | tests/forms_tests/tests/test_input_formats.py | {
"start": 30439,
"end": 35033
} | class ____(SimpleTestCase):
@classmethod
def setUpClass(cls):
cls.enterClassContext(translation.override(None))
super().setUpClass()
def test_dateTimeField(self):
"DateTimeFields can parse dates in the default format"
f = forms.DateTimeField()
# Parse a date in an unaccepted format; get an error
with self.assertRaises(ValidationError):
f.clean("2010/12/21 13:30:05")
# Parse a date in a valid format, get a parsed result
result = f.clean("1:30:05 PM 21/12/2010")
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# The parsed result does a round trip
text = f.widget.format_value(result)
self.assertEqual(text, "01:30:05 PM 21/12/2010")
# Parse a date in a valid, but non-default format, get a parsed result
result = f.clean("1:30 PM 21-12-2010")
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# The parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "01:30:00 PM 21/12/2010")
def test_localized_dateTimeField(self):
"Localized DateTimeFields act as unlocalized widgets"
f = forms.DateTimeField(localize=True)
# Parse a date in an unaccepted format; get an error
with self.assertRaises(ValidationError):
f.clean("2010/12/21 13:30:05")
# Parse a date in a valid format, get a parsed result
result = f.clean("1:30:05 PM 21/12/2010")
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# The parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "01:30:05 PM 21/12/2010")
# Parse a date in a valid format, get a parsed result
result = f.clean("1:30 PM 21-12-2010")
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# The parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "01:30:00 PM 21/12/2010")
def test_dateTimeField_with_inputformat(self):
"""
DateTimeFields with manually specified input formats can accept those
formats
"""
f = forms.DateTimeField(input_formats=["%m.%d.%Y %H:%M:%S", "%m-%d-%Y %H:%M"])
# Parse a date in an unaccepted format; get an error
with self.assertRaises(ValidationError):
f.clean("13:30:05 21.12.2010")
with self.assertRaises(ValidationError):
f.clean("2010/12/21 13:30:05")
# Parse a date in a valid format, get a parsed result
result = f.clean("12.21.2010 13:30:05")
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# The parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "01:30:05 PM 21/12/2010")
# Parse a date in a valid format, get a parsed result
result = f.clean("12-21-2010 13:30")
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# The parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "01:30:00 PM 21/12/2010")
def test_localized_dateTimeField_with_inputformat(self):
"""
Localized DateTimeFields with manually specified input formats can
accept those formats.
"""
f = forms.DateTimeField(
input_formats=["%m.%d.%Y %H:%M:%S", "%m-%d-%Y %H:%M"], localize=True
)
# Parse a date in an unaccepted format; get an error
with self.assertRaises(ValidationError):
f.clean("13:30:05 21.12.2010")
with self.assertRaises(ValidationError):
f.clean("2010/12/21 13:30:05")
# Parse a date in a valid format, get a parsed result
result = f.clean("12.21.2010 13:30:05")
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# The parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "01:30:05 PM 21/12/2010")
# Parse a date in a valid format, get a parsed result
result = f.clean("12-21-2010 13:30")
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# The parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "01:30:00 PM 21/12/2010")
| CustomDateTimeInputFormatsTests |
python | huggingface__transformers | src/transformers/models/vilt/image_processing_vilt.py | {
"start": 4206,
"end": 22291
} | class ____(BaseImageProcessor):
r"""
Constructs a ViLT image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
`do_resize` parameter in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 384}`):
Resize the shorter side of the input to `size["shortest_edge"]`. The longer side will be limited to under
`int((1333 / 800) * size["shortest_edge"])` while preserving the aspect ratio. Only has an effect if
`do_resize` is set to `True`. Can be overridden by the `size` parameter in the `preprocess` method.
size_divisor (`int`, *optional*, defaults to 32):
The size by which to make sure both the height and width can be divided. Only has an effect if `do_resize`
is set to `True`. Can be overridden by the `size_divisor` parameter in the `preprocess` method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. Can be
overridden by the `resample` parameter in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Wwhether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
`do_rescale` parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Only has an effect if `do_rescale` is set to `True`. Can be
overridden by the `rescale_factor` parameter in the `preprocess` method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method. Can be overridden by the `do_normalize` parameter in the `preprocess` method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
Can be overridden by the `image_std` parameter in the `preprocess` method.
do_pad (`bool`, *optional*, defaults to `True`):
Whether to pad the image to the `(max_height, max_width)` of the images in the batch. Can be overridden by
the `do_pad` parameter in the `preprocess` method.
"""
model_input_names = ["pixel_values"]
valid_kwargs = ViltImageProcessorKwargs
def __init__(
self,
do_resize: bool = True,
size: Optional[dict[str, int]] = None,
size_divisor: int = 32,
resample: PILImageResampling = PILImageResampling.BICUBIC,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_normalize: bool = True,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_pad: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
size = size if size is not None else {"shortest_edge": 384}
size = get_size_dict(size, default_to_square=False)
self.do_resize = do_resize
self.size = size
self.size_divisor = size_divisor
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
self.do_pad = kwargs.pop("pad_and_return_pixel_mask", do_pad)
def resize(
self,
image: np.ndarray,
size: dict[str, int],
size_divisor: int = 32,
resample: PILImageResampling = PILImageResampling.BICUBIC,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize an image.
Resizes the shorter side of the image to `size["shortest_edge"]` while preserving the aspect ratio. If the
longer side is larger than the max size `(int(`size["shortest_edge"]` * 1333 / 800))`, the longer side is then
resized to the max size while preserving the aspect ratio.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Controls the size of the output image. Should be of the form `{"shortest_edge": int}`.
size_divisor (`int`, *optional*, defaults to 32):
The image is resized to a size that is a multiple of this value.
resample (`PILImageResampling` filter, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
size = get_size_dict(size, default_to_square=False)
if "shortest_edge" not in size:
raise ValueError(f"The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}")
shorter = size["shortest_edge"]
longer = int(1333 / 800 * shorter)
output_size = get_resize_output_image_size(
image, shorter=shorter, longer=longer, size_divisor=size_divisor, input_data_format=input_data_format
)
return resize(
image,
size=output_size,
resample=resample,
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
def _pad_image(
self,
image: np.ndarray,
output_size: tuple[int, int],
constant_values: Union[float, Iterable[float]] = 0,
data_format: Optional[ChannelDimension] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> np.ndarray:
"""
Pad an image with zeros to the given size.
"""
input_height, input_width = get_image_size(image, channel_dim=input_data_format)
output_height, output_width = output_size
pad_bottom = output_height - input_height
pad_right = output_width - input_width
padding = ((0, pad_bottom), (0, pad_right))
padded_image = pad(
image,
padding,
mode=PaddingMode.CONSTANT,
constant_values=constant_values,
data_format=data_format,
input_data_format=input_data_format,
)
return padded_image
def pad(
self,
images: list[np.ndarray],
constant_values: Union[float, Iterable[float]] = 0,
return_pixel_mask: bool = True,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: Optional[ChannelDimension] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> BatchFeature:
"""
Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width
in the batch and optionally returns their corresponding pixel mask.
Args:
image (`np.ndarray`):
Image to pad.
constant_values (`float` or `Iterable[float]`, *optional*):
The value to use for the padding if `mode` is `"constant"`.
return_pixel_mask (`bool`, *optional*, defaults to `True`):
Whether to return a pixel mask.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
pad_size = get_max_height_width(images, input_data_format=input_data_format)
padded_images = [
self._pad_image(
image,
pad_size,
constant_values=constant_values,
data_format=data_format,
input_data_format=input_data_format,
)
for image in images
]
data = {"pixel_values": padded_images}
if return_pixel_mask:
masks = [
make_pixel_mask(image=image, output_size=pad_size, input_data_format=input_data_format)
for image in images
]
data["pixel_mask"] = masks
return BatchFeature(data=data, tensor_type=return_tensors)
@filter_out_non_signature_kwargs()
def preprocess(
self,
images: ImageInput,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
size_divisor: Optional[int] = None,
resample: Optional[PILImageResampling] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_pad: Optional[bool] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: ChannelDimension = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Controls the size of the image after `resize`. The shortest edge of the image is resized to
`size["shortest_edge"]` whilst preserving the aspect ratio. If the longest edge of this resized image
is > `int(size["shortest_edge"] * (1333 / 800))`, then the image is resized again to make the longest
edge equal to `int(size["shortest_edge"] * (1333 / 800))`.
size_divisor (`int`, *optional*, defaults to `self.size_divisor`):
The image is resized to a size that is a multiple of this value.
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to normalize the image by if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to normalize the image by if `do_normalize` is set to `True`.
do_pad (`bool`, *optional*, defaults to `self.do_pad`):
Whether to pad the image to the (max_height, max_width) in the batch. If `True`, a pixel mask is also
created and returned.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
size_divisor = size_divisor if size_divisor is not None else self.size_divisor
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
do_pad = do_pad if do_pad is not None else self.do_pad
size = size if size is not None else self.size
size = get_size_dict(size, default_to_square=False)
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
# Here the pad() method does not require any additional argument as it takes the maximum of (height, width).
# Hence, it does not need to be passed to a validate_preprocess_arguments() method.
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_resize=do_resize,
size=size,
resample=resample,
)
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
if do_resize:
images = [
self.resize(
image=image,
size=size,
size_divisor=size_divisor,
resample=resample,
input_data_format=input_data_format,
)
for image in images
]
if do_rescale:
images = [
self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
for image in images
]
if do_normalize:
images = [
self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
for image in images
]
images = [
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
]
if do_pad:
encoded_outputs = self.pad(
images, return_pixel_mask=True, return_tensors=return_tensors, input_data_format=data_format
)
else:
encoded_outputs = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors)
return encoded_outputs
__all__ = ["ViltImageProcessor"]
| ViltImageProcessor |
python | plotly__plotly.py | plotly/graph_objs/pie/_domain.py | {
"start": 233,
"end": 4925
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "pie"
_path_str = "pie.domain"
_valid_props = {"column", "row", "x", "y"}
@property
def column(self):
"""
If there is a layout grid, use the domain for this column in
the grid for this pie trace .
The 'column' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["column"]
@column.setter
def column(self, val):
self["column"] = val
@property
def row(self):
"""
If there is a layout grid, use the domain for this row in the
grid for this pie trace .
The 'row' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["row"]
@row.setter
def row(self, val):
self["row"] = val
@property
def x(self):
"""
Sets the horizontal domain of this pie trace (in plot
fraction).
The 'x' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'x[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'x[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
@property
def y(self):
"""
Sets the vertical domain of this pie trace (in plot fraction).
The 'y' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'y[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'y[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
@property
def _prop_descriptions(self):
return """\
column
If there is a layout grid, use the domain for this
column in the grid for this pie trace .
row
If there is a layout grid, use the domain for this row
in the grid for this pie trace .
x
Sets the horizontal domain of this pie trace (in plot
fraction).
y
Sets the vertical domain of this pie trace (in plot
fraction).
"""
def __init__(self, arg=None, column=None, row=None, x=None, y=None, **kwargs):
"""
Construct a new Domain object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.pie.Domain`
column
If there is a layout grid, use the domain for this
column in the grid for this pie trace .
row
If there is a layout grid, use the domain for this row
in the grid for this pie trace .
x
Sets the horizontal domain of this pie trace (in plot
fraction).
y
Sets the vertical domain of this pie trace (in plot
fraction).
Returns
-------
Domain
"""
super().__init__("domain")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.pie.Domain
constructor must be a dict or
an instance of :class:`plotly.graph_objs.pie.Domain`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("column", arg, column)
self._set_property("row", arg, row)
self._set_property("x", arg, x)
self._set_property("y", arg, y)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Domain |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/callbackProtocol6.py | {
"start": 228,
"end": 373
} | class ____(Protocol):
def __call__(self, path: str = ...) -> str: ...
# Callback with positional parameter without default arg value.
| Callback1 |
python | langchain-ai__langchain | libs/core/tests/unit_tests/output_parsers/test_pydantic_parser.py | {
"start": 2994,
"end": 6535
} | class ____(BaseModel):
action: Actions = Field(description="Action to be performed")
action_input: str = Field(description="Input to be used in the action")
additional_fields: str | None = Field(description="Additional fields", default=None)
for_new_lines: str = Field(description="To be used to test newlines")
# Prevent pytest from trying to run tests on TestModel
TestModel.__test__ = False # type: ignore[attr-defined]
DEF_RESULT = """{
"action": "Update",
"action_input": "The PydanticOutputParser class is powerful",
"additional_fields": null,
"for_new_lines": "not_escape_newline:\n escape_newline: \\n"
}"""
# action 'update' with a lowercase 'u' to test schema validation failure.
DEF_RESULT_FAIL = """{
"action": "update",
"action_input": "The PydanticOutputParser class is powerful",
"additional_fields": null
}"""
DEF_EXPECTED_RESULT = TestModel(
action=Actions.UPDATE,
action_input="The PydanticOutputParser class is powerful",
additional_fields=None,
for_new_lines="not_escape_newline:\n escape_newline: \n",
)
def test_pydantic_output_parser() -> None:
"""Test PydanticOutputParser."""
pydantic_parser: PydanticOutputParser = PydanticOutputParser(
pydantic_object=TestModel
)
result = pydantic_parser.parse(DEF_RESULT)
assert result == DEF_EXPECTED_RESULT
assert pydantic_parser.OutputType is TestModel
def test_pydantic_output_parser_fail() -> None:
"""Test PydanticOutputParser where completion result fails schema validation."""
pydantic_parser: PydanticOutputParser = PydanticOutputParser(
pydantic_object=TestModel
)
with pytest.raises(
OutputParserException, match="Failed to parse TestModel from completion"
):
pydantic_parser.parse(DEF_RESULT_FAIL)
def test_pydantic_output_parser_type_inference() -> None:
"""Test pydantic output parser type inference."""
class SampleModel(BaseModel):
foo: int
bar: str
# Ignoring mypy error that appears in python 3.8, but not 3.11.
# This seems to be functionally correct, so we'll ignore the error.
pydantic_parser = PydanticOutputParser[SampleModel](pydantic_object=SampleModel)
schema = pydantic_parser.get_output_schema().model_json_schema()
assert schema == {
"properties": {
"bar": {"title": "Bar", "type": "string"},
"foo": {"title": "Foo", "type": "integer"},
},
"required": ["foo", "bar"],
"title": "SampleModel",
"type": "object",
}
@pytest.mark.parametrize("pydantic_object", _FORECAST_MODELS)
def test_format_instructions(pydantic_object: TypeBaseModel) -> None:
"""Test format instructions."""
parser = PydanticOutputParser[PydanticBaseModel](pydantic_object=pydantic_object)
instructions = parser.get_format_instructions()
assert "temperature" in instructions
def test_format_instructions_preserves_language() -> None:
"""Test format instructions does not attempt to encode into ascii."""
description = (
"你好, こんにちは, नमस्ते, Bonjour, Hola, "
"Olá, 안녕하세요, Jambo, Merhaba, Γειά σου" # noqa: RUF001
)
class Foo(BaseModel):
hello: str = Field(
description=(
"你好, こんにちは, नमस्ते, Bonjour, Hola, "
"Olá, 안녕하세요, Jambo, Merhaba, Γειά σου" # noqa: RUF001
)
)
parser = PydanticOutputParser[Foo](pydantic_object=Foo)
assert description in parser.get_format_instructions()
| TestModel |
python | doocs__leetcode | solution/3400-3499/3459.Length of Longest V-Shaped Diagonal Segment/Solution.py | {
"start": 0,
"end": 817
} | class ____:
def lenOfVDiagonal(self, grid: List[List[int]]) -> int:
@cache
def dfs(i: int, j: int, k: int, cnt: int) -> int:
x, y = i + dirs[k], j + dirs[k + 1]
target = 2 if grid[i][j] == 1 else (2 - grid[i][j])
if not 0 <= x < m or not 0 <= y < n or grid[x][y] != target:
return 0
res = dfs(x, y, k, cnt)
if cnt > 0:
res = max(res, dfs(x, y, (k + 1) % 4, 0))
return 1 + res
m, n = len(grid), len(grid[0])
dirs = (1, 1, -1, -1, 1)
ans = 0
for i, row in enumerate(grid):
for j, x in enumerate(row):
if x == 1:
for k in range(4):
ans = max(ans, dfs(i, j, k, 1) + 1)
return ans
| Solution |
python | sqlalchemy__sqlalchemy | test/base/test_events.py | {
"start": 22387,
"end": 23296
} | class ____(TearDownLocalEventsFixture, fixtures.TestBase):
"""Test custom target acceptance."""
def setup_test(self):
class TargetEvents(event.Events):
@classmethod
def _accept_with(cls, target, identifier):
if target == "one":
return Target
else:
return None
def event_one(self, x, y):
pass
class Target:
dispatch = event.dispatcher(TargetEvents)
self.Target = Target
def test_indirect(self):
def listen(x, y):
pass
event.listen("one", "event_one", listen)
eq_(list(self.Target().dispatch.event_one), [listen])
assert_raises(
exc.InvalidRequestError,
event.listen,
listen,
"event_one",
self.Target,
)
| CustomTargetsTest |
python | kamyu104__LeetCode-Solutions | Python/neighboring-bitwise-xor.py | {
"start": 37,
"end": 248
} | class ____(object):
def doesValidArrayExist(self, derived):
"""
:type derived: List[int]
:rtype: bool
"""
return reduce(lambda total, x: total^x, derived, 0) == 0
| Solution |
python | readthedocs__readthedocs.org | readthedocs/builds/migrations/0006_add_config_field.py | {
"start": 145,
"end": 555
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("builds", "0005_remove-version-alias"),
]
operations = [
migrations.AddField(
model_name="build",
name="_config",
field=jsonfield.fields.JSONField(
default=dict, verbose_name="Configuration used in the build"
),
),
]
| Migration |
python | getsentry__sentry | tests/sentry/sentry_apps/api/endpoints/test_sentry_app_stats.py | {
"start": 153,
"end": 4730
} | class ____(APITestCase):
def setUp(self) -> None:
self.superuser = self.create_user(email="superuser@example.com", is_superuser=True)
self.user = self.create_user(email="user@example.com")
self.org = self.create_organization(owner=self.user)
self.project = self.create_project(organization=self.org)
self.published_app = self.create_sentry_app(
name="Published App", organization=self.org, published=True
)
self.unowned_published_app = self.create_sentry_app(
name="Unowned Published App", organization=self.create_organization(), published=True
)
self.unpublished_app = self.create_sentry_app(name="Unpublished App", organization=self.org)
self.unowned_unpublished_app = self.create_sentry_app(
name="Unowned Unpublished App", organization=self.create_organization()
)
self.internal_app = self.create_internal_integration(organization=self.org)
self.published_app_install = self.create_sentry_app_installation(
slug=self.published_app.slug, organization=self.create_organization()
)
self.unowned_published_app_install = self.create_sentry_app_installation(
slug=self.unowned_published_app.slug, organization=self.create_organization()
)
def test_superuser_sees_unowned_published_stats(self) -> None:
self.login_as(user=self.superuser, superuser=True)
url = reverse("sentry-api-0-sentry-app-stats", args=[self.unowned_published_app.slug])
response = self.client.get(url, format="json")
assert response.status_code == 200
assert response.data["totalInstalls"] == 1
assert response.data["totalUninstalls"] == 0
install_epoch = int(
self.unowned_published_app_install.date_added.replace(
microsecond=0, second=0, minute=0
).timestamp()
)
assert (install_epoch, 1) in response.data["installStats"]
def test_superuser_sees_unowned_unpublished_stats(self) -> None:
self.login_as(user=self.superuser, superuser=True)
url = reverse("sentry-api-0-sentry-app-stats", args=[self.unowned_unpublished_app.slug])
response = self.client.get(url, format="json")
assert response.status_code == 200
assert response.data["totalInstalls"] == 0
assert response.data["totalUninstalls"] == 0
def test_user_sees_owned_published_stats(self) -> None:
self.login_as(self.user)
url = reverse("sentry-api-0-sentry-app-stats", args=[self.published_app.slug])
response = self.client.get(url, format="json")
assert response.status_code == 200
assert response.data["totalInstalls"] == 1
assert response.data["totalUninstalls"] == 0
install_epoch = int(
self.published_app_install.date_added.replace(
microsecond=0, second=0, minute=0
).timestamp()
)
assert (install_epoch, 1) in response.data["installStats"]
def test_user_does_not_see_unowned_published_stats(self) -> None:
self.login_as(self.user)
url = reverse("sentry-api-0-sentry-app-stats", args=[self.unowned_published_app.slug])
response = self.client.get(url, format="json")
assert response.status_code == 403
assert response.data["detail"] == "You do not have permission to perform this action."
def test_user_sees_owned_unpublished_stats(self) -> None:
self.login_as(self.user)
url = reverse("sentry-api-0-sentry-app-stats", args=[self.unpublished_app.slug])
response = self.client.get(url, format="json")
assert response.status_code == 200
assert response.data["totalInstalls"] == 0
assert response.data["totalUninstalls"] == 0
def test_user_sees_internal_stats(self) -> None:
self.login_as(self.user)
url = reverse("sentry-api-0-sentry-app-stats", args=[self.internal_app.slug])
response = self.client.get(url, format="json")
assert response.status_code == 200
assert response.data["totalInstalls"] == 1
assert response.data["totalUninstalls"] == 0
def test_invalid_startend_throws_error(self) -> None:
self.login_as(self.user)
url = "%s?since=1569523068&until=1566931068" % reverse(
"sentry-api-0-sentry-app-stats", args=[self.published_app.slug]
)
response = self.client.get(url, format="json")
assert response.status_code == 400
| GetSentryAppStatsTest |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/integrations/duckdb/reference/multiple_dataframe_types.py | {
"start": 438,
"end": 1623
} | class ____(DuckDBIOManager):
@staticmethod
def type_handlers():
"""type_handlers should return a list of the TypeHandlers that the I/O manager can use.
Here we return the DuckDBPandasTypeHandler, DuckDBPySparkTypeHandler, and DuckDBPolarsTypeHandler so that the I/O
manager can store Pandas DataFrames, PySpark DataFrames, and Polars DataFrames.
"""
return [
DuckDBPandasTypeHandler(),
DuckDBPySparkTypeHandler(),
DuckDBPolarsTypeHandler(),
]
@staticmethod
def default_load_type() -> Optional[type]:
"""If an asset is not annotated with an return type, default_load_type will be used to
determine which TypeHandler to use to store and load the output.
In this case, unannotated assets will be stored and loaded as Pandas DataFrames.
"""
return pd.DataFrame
defs = Definitions(
assets=[iris_dataset, rose_dataset],
resources={
"io_manager": DuckDBPandasPySparkPolarsIOManager(
database="path/to/my_duckdb_database.duckdb",
schema="IRIS",
)
},
)
# end_example
| DuckDBPandasPySparkPolarsIOManager |
python | pytorch__pytorch | torch/_inductor/output_code.py | {
"start": 35867,
"end": 39326
} | class ____(OutputCode):
"""
OutputCode for regional inductor compilation results.
Regional inductor returns a torch.fx.GraphModule that contains both
compiled regions (via standalone_compile) and eager regions. This needs
special serialization using GraphPickler instead of standard pickle.
The serialization strategy stores the GraphModule as bytes using
GraphPickler.dumps(), which handles FakeTensors, AOTCompiledArtifacts,
and other special objects that standard pickle cannot handle.
"""
# The serialized graph module as bytes (using GraphPickler)
_serialized_graph_module: Optional[bytes] = dataclasses.field(
default=None, init=False
)
# The actual graph module (cleared during serialization)
_graph_module: Optional[torch.fx.GraphModule] = dataclasses.field(
default=None, init=False
)
def __init__(self, graph_module: torch.fx.GraphModule):
"""
Args:
graph_module: The torch.fx.GraphModule returned by regional_inductor
"""
super().__init__()
self._graph_module = graph_module
self._serialized_graph_module = None
def __call__(self, inputs: Sequence[Any]) -> Any:
"""Execute the regional compiled graph."""
if self._graph_module is None:
raise RuntimeError(
"RegionalOutputCode has no graph module loaded. "
"Did you forget to call post_compile()?"
)
return self._graph_module(*inputs)
def post_compile(
self,
example_inputs: Sequence[InputType],
constants: CompiledFxGraphConstants,
graph_kwargs: _CompileFxKwargs,
) -> None:
"""
Post-compile processing for regional inductor.
This deserializes the GraphModule from bytes using GraphPickler,
extracting the fake_mode from example_inputs.
"""
if self._graph_module is not None:
return
assert self._serialized_graph_module is not None
# Get fake mode from example inputs
from torch._guards import detect_fake_mode
fake_mode = detect_fake_mode(example_inputs)
if fake_mode is None:
raise RuntimeError(
"Could not detect fake mode from example inputs. "
"Regional inductor requires fake mode for deserialization."
)
# Deserialize the graph module
from torch.fx._graph_pickler import GraphPickler
gm = GraphPickler.loads(self._serialized_graph_module, fake_mode)
assert isinstance(gm, torch.fx.GraphModule)
gm.recompile()
self._graph_module = gm
def set_triton_bundle(self, triton_bundle: Any) -> None:
"""Regional inductor doesn't use triton bundles directly."""
def prepare_for_serialization(self) -> None:
"""
Prepare for serialization by converting the GraphModule to bytes.
This uses GraphPickler to serialize the graph module since it contains
special objects like FakeTensors and AOTCompiledArtifacts that need
custom pickling.
"""
if self._graph_module is not None:
from torch.fx._graph_pickler import GraphPickler
self._serialized_graph_module = GraphPickler.dumps(self._graph_module)
# Clear the graph module to avoid pickling it with standard pickle
self._graph_module = None
| RegionalOutputCode |
python | getsentry__sentry | src/sentry/replays/usecases/query/conditions/selector.py | {
"start": 7720,
"end": 8135
} | class ____(ComputedBase):
"""Streaming dead click selector composite condition class."""
@staticmethod
def visit_eq(value: list[QueryType]) -> Condition:
return contains(DeadClickSelectorComposite.visit_eq(value))
@staticmethod
def visit_neq(value: list[QueryType]) -> Condition:
return does_not_contain(DeadClickSelectorComposite.visit_eq(value))
| SumOfDeadClickSelectorComposite |
python | neetcode-gh__leetcode | python/0213-house-robber-ii.py | {
"start": 0,
"end": 324
} | class ____:
def rob(self, nums: List[int]) -> int:
return max(nums[0], self.helper(nums[1:]), self.helper(nums[:-1]))
def helper(self, nums):
rob1, rob2 = 0, 0
for n in nums:
newRob = max(rob1 + n, rob2)
rob1 = rob2
rob2 = newRob
return rob2
| Solution |
python | sympy__sympy | sympy/matrices/common.py | {
"start": 24287,
"end": 37957
} | class ____(MatrixRequired):
"""Construction of special matrices"""
@classmethod
def _eval_diag(cls, rows, cols, diag_dict):
"""diag_dict is a defaultdict containing
all the entries of the diagonal matrix."""
def entry(i, j):
return diag_dict[(i, j)]
return cls._new(rows, cols, entry)
@classmethod
def _eval_eye(cls, rows, cols):
vals = [cls.zero]*(rows*cols)
vals[::cols+1] = [cls.one]*min(rows, cols)
return cls._new(rows, cols, vals, copy=False)
@classmethod
def _eval_jordan_block(cls, size: int, eigenvalue, band='upper'):
if band == 'lower':
def entry(i, j):
if i == j:
return eigenvalue
elif j + 1 == i:
return cls.one
return cls.zero
else:
def entry(i, j):
if i == j:
return eigenvalue
elif i + 1 == j:
return cls.one
return cls.zero
return cls._new(size, size, entry)
@classmethod
def _eval_ones(cls, rows, cols):
def entry(i, j):
return cls.one
return cls._new(rows, cols, entry)
@classmethod
def _eval_zeros(cls, rows, cols):
return cls._new(rows, cols, [cls.zero]*(rows*cols), copy=False)
@classmethod
def _eval_wilkinson(cls, n):
def entry(i, j):
return cls.one if i + 1 == j else cls.zero
D = cls._new(2*n + 1, 2*n + 1, entry)
wminus = cls.diag(list(range(-n, n + 1)), unpack=True) + D + D.T
wplus = abs(cls.diag(list(range(-n, n + 1)), unpack=True)) + D + D.T
return wminus, wplus
@classmethod
def diag(kls, *args, strict=False, unpack=True, rows=None, cols=None, **kwargs):
"""Returns a matrix with the specified diagonal.
If matrices are passed, a block-diagonal matrix
is created (i.e. the "direct sum" of the matrices).
kwargs
======
rows : rows of the resulting matrix; computed if
not given.
cols : columns of the resulting matrix; computed if
not given.
cls : class for the resulting matrix
unpack : bool which, when True (default), unpacks a single
sequence rather than interpreting it as a Matrix.
strict : bool which, when False (default), allows Matrices to
have variable-length rows.
Examples
========
>>> from sympy import Matrix
>>> Matrix.diag(1, 2, 3)
Matrix([
[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
The current default is to unpack a single sequence. If this is
not desired, set `unpack=False` and it will be interpreted as
a matrix.
>>> Matrix.diag([1, 2, 3]) == Matrix.diag(1, 2, 3)
True
When more than one element is passed, each is interpreted as
something to put on the diagonal. Lists are converted to
matrices. Filling of the diagonal always continues from
the bottom right hand corner of the previous item: this
will create a block-diagonal matrix whether the matrices
are square or not.
>>> col = [1, 2, 3]
>>> row = [[4, 5]]
>>> Matrix.diag(col, row)
Matrix([
[1, 0, 0],
[2, 0, 0],
[3, 0, 0],
[0, 4, 5]])
When `unpack` is False, elements within a list need not all be
of the same length. Setting `strict` to True would raise a
ValueError for the following:
>>> Matrix.diag([[1, 2, 3], [4, 5], [6]], unpack=False)
Matrix([
[1, 2, 3],
[4, 5, 0],
[6, 0, 0]])
The type of the returned matrix can be set with the ``cls``
keyword.
>>> from sympy import ImmutableMatrix
>>> from sympy.utilities.misc import func_name
>>> func_name(Matrix.diag(1, cls=ImmutableMatrix))
'ImmutableDenseMatrix'
A zero dimension matrix can be used to position the start of
the filling at the start of an arbitrary row or column:
>>> from sympy import ones
>>> r2 = ones(0, 2)
>>> Matrix.diag(r2, 1, 2)
Matrix([
[0, 0, 1, 0],
[0, 0, 0, 2]])
See Also
========
eye
diagonal
.dense.diag
.expressions.blockmatrix.BlockMatrix
.sparsetools.banded
"""
from sympy.matrices.matrixbase import MatrixBase
from sympy.matrices.dense import Matrix
from sympy.matrices import SparseMatrix
klass = kwargs.get('cls', kls)
if unpack and len(args) == 1 and is_sequence(args[0]) and \
not isinstance(args[0], MatrixBase):
args = args[0]
# fill a default dict with the diagonal entries
diag_entries = defaultdict(int)
rmax = cmax = 0 # keep track of the biggest index seen
for m in args:
if isinstance(m, list):
if strict:
# if malformed, Matrix will raise an error
_ = Matrix(m)
r, c = _.shape
m = _.tolist()
else:
r, c, smat = SparseMatrix._handle_creation_inputs(m)
for (i, j), _ in smat.items():
diag_entries[(i + rmax, j + cmax)] = _
m = [] # to skip process below
elif hasattr(m, 'shape'): # a Matrix
# convert to list of lists
r, c = m.shape
m = m.tolist()
else: # in this case, we're a single value
diag_entries[(rmax, cmax)] = m
rmax += 1
cmax += 1
continue
# process list of lists
for i, mi in enumerate(m):
for j, _ in enumerate(mi):
diag_entries[(i + rmax, j + cmax)] = _
rmax += r
cmax += c
if rows is None:
rows, cols = cols, rows
if rows is None:
rows, cols = rmax, cmax
else:
cols = rows if cols is None else cols
if rows < rmax or cols < cmax:
raise ValueError(filldedent('''
The constructed matrix is {} x {} but a size of {} x {}
was specified.'''.format(rmax, cmax, rows, cols)))
return klass._eval_diag(rows, cols, diag_entries)
@classmethod
def eye(kls, rows, cols=None, **kwargs):
"""Returns an identity matrix.
Parameters
==========
rows : rows of the matrix
cols : cols of the matrix (if None, cols=rows)
kwargs
======
cls : class of the returned matrix
"""
if cols is None:
cols = rows
if rows < 0 or cols < 0:
raise ValueError("Cannot create a {} x {} matrix. "
"Both dimensions must be positive".format(rows, cols))
klass = kwargs.get('cls', kls)
rows, cols = as_int(rows), as_int(cols)
return klass._eval_eye(rows, cols)
@classmethod
def jordan_block(kls, size=None, eigenvalue=None, *, band='upper', **kwargs):
"""Returns a Jordan block
Parameters
==========
size : Integer, optional
Specifies the shape of the Jordan block matrix.
eigenvalue : Number or Symbol
Specifies the value for the main diagonal of the matrix.
.. note::
The keyword ``eigenval`` is also specified as an alias
of this keyword, but it is not recommended to use.
We may deprecate the alias in later release.
band : 'upper' or 'lower', optional
Specifies the position of the off-diagonal to put `1` s on.
cls : Matrix, optional
Specifies the matrix class of the output form.
If it is not specified, the class type where the method is
being executed on will be returned.
Returns
=======
Matrix
A Jordan block matrix.
Raises
======
ValueError
If insufficient arguments are given for matrix size
specification, or no eigenvalue is given.
Examples
========
Creating a default Jordan block:
>>> from sympy import Matrix
>>> from sympy.abc import x
>>> Matrix.jordan_block(4, x)
Matrix([
[x, 1, 0, 0],
[0, x, 1, 0],
[0, 0, x, 1],
[0, 0, 0, x]])
Creating an alternative Jordan block matrix where `1` is on
lower off-diagonal:
>>> Matrix.jordan_block(4, x, band='lower')
Matrix([
[x, 0, 0, 0],
[1, x, 0, 0],
[0, 1, x, 0],
[0, 0, 1, x]])
Creating a Jordan block with keyword arguments
>>> Matrix.jordan_block(size=4, eigenvalue=x)
Matrix([
[x, 1, 0, 0],
[0, x, 1, 0],
[0, 0, x, 1],
[0, 0, 0, x]])
References
==========
.. [1] https://en.wikipedia.org/wiki/Jordan_matrix
"""
klass = kwargs.pop('cls', kls)
eigenval = kwargs.get('eigenval', None)
if eigenvalue is None and eigenval is None:
raise ValueError("Must supply an eigenvalue")
elif eigenvalue != eigenval and None not in (eigenval, eigenvalue):
raise ValueError(
"Inconsistent values are given: 'eigenval'={}, "
"'eigenvalue'={}".format(eigenval, eigenvalue))
else:
if eigenval is not None:
eigenvalue = eigenval
if size is None:
raise ValueError("Must supply a matrix size")
size = as_int(size)
return klass._eval_jordan_block(size, eigenvalue, band)
@classmethod
def ones(kls, rows, cols=None, **kwargs):
"""Returns a matrix of ones.
Parameters
==========
rows : rows of the matrix
cols : cols of the matrix (if None, cols=rows)
kwargs
======
cls : class of the returned matrix
"""
if cols is None:
cols = rows
klass = kwargs.get('cls', kls)
rows, cols = as_int(rows), as_int(cols)
return klass._eval_ones(rows, cols)
@classmethod
def zeros(kls, rows, cols=None, **kwargs):
"""Returns a matrix of zeros.
Parameters
==========
rows : rows of the matrix
cols : cols of the matrix (if None, cols=rows)
kwargs
======
cls : class of the returned matrix
"""
if cols is None:
cols = rows
if rows < 0 or cols < 0:
raise ValueError("Cannot create a {} x {} matrix. "
"Both dimensions must be positive".format(rows, cols))
klass = kwargs.get('cls', kls)
rows, cols = as_int(rows), as_int(cols)
return klass._eval_zeros(rows, cols)
@classmethod
def companion(kls, poly):
"""Returns a companion matrix of a polynomial.
Examples
========
>>> from sympy import Matrix, Poly, Symbol, symbols
>>> x = Symbol('x')
>>> c0, c1, c2, c3, c4 = symbols('c0:5')
>>> p = Poly(c0 + c1*x + c2*x**2 + c3*x**3 + c4*x**4 + x**5, x)
>>> Matrix.companion(p)
Matrix([
[0, 0, 0, 0, -c0],
[1, 0, 0, 0, -c1],
[0, 1, 0, 0, -c2],
[0, 0, 1, 0, -c3],
[0, 0, 0, 1, -c4]])
"""
poly = kls._sympify(poly)
if not isinstance(poly, Poly):
raise ValueError("{} must be a Poly instance.".format(poly))
if not poly.is_monic:
raise ValueError("{} must be a monic polynomial.".format(poly))
if not poly.is_univariate:
raise ValueError(
"{} must be a univariate polynomial.".format(poly))
size = poly.degree()
if not size >= 1:
raise ValueError(
"{} must have degree not less than 1.".format(poly))
coeffs = poly.all_coeffs()
def entry(i, j):
if j == size - 1:
return -coeffs[-1 - i]
elif i == j + 1:
return kls.one
return kls.zero
return kls._new(size, size, entry)
@classmethod
def wilkinson(kls, n, **kwargs):
"""Returns two square Wilkinson Matrix of size 2*n + 1
$W_{2n + 1}^-, W_{2n + 1}^+ =$ Wilkinson(n)
Examples
========
>>> from sympy import Matrix
>>> wminus, wplus = Matrix.wilkinson(3)
>>> wminus
Matrix([
[-3, 1, 0, 0, 0, 0, 0],
[ 1, -2, 1, 0, 0, 0, 0],
[ 0, 1, -1, 1, 0, 0, 0],
[ 0, 0, 1, 0, 1, 0, 0],
[ 0, 0, 0, 1, 1, 1, 0],
[ 0, 0, 0, 0, 1, 2, 1],
[ 0, 0, 0, 0, 0, 1, 3]])
>>> wplus
Matrix([
[3, 1, 0, 0, 0, 0, 0],
[1, 2, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 1, 2, 1],
[0, 0, 0, 0, 0, 1, 3]])
References
==========
.. [1] https://blogs.mathworks.com/cleve/2013/04/15/wilkinsons-matrices-2/
.. [2] J. H. Wilkinson, The Algebraic Eigenvalue Problem, Claredon Press, Oxford, 1965, 662 pp.
"""
klass = kwargs.get('cls', kls)
n = as_int(n)
return klass._eval_wilkinson(n)
| MatrixSpecial |
python | gevent__gevent | src/greentest/3.11/test_wsgiref.py | {
"start": 1140,
"end": 2856
} | class ____(WSGIRequestHandler):
"""Non-socket HTTP handler"""
def setup(self):
self.connection = self.request
self.rfile, self.wfile = self.connection
def finish(self):
pass
def hello_app(environ,start_response):
start_response("200 OK", [
('Content-Type','text/plain'),
('Date','Mon, 05 Jun 2006 18:49:54 GMT')
])
return [b"Hello, world!"]
def header_app(environ, start_response):
start_response("200 OK", [
('Content-Type', 'text/plain'),
('Date', 'Mon, 05 Jun 2006 18:49:54 GMT')
])
return [';'.join([
environ['HTTP_X_TEST_HEADER'], environ['QUERY_STRING'],
environ['PATH_INFO']
]).encode('iso-8859-1')]
def run_amock(app=hello_app, data=b"GET / HTTP/1.0\n\n"):
server = make_server("", 80, app, MockServer, MockHandler)
inp = BufferedReader(BytesIO(data))
out = BytesIO()
olderr = sys.stderr
err = sys.stderr = StringIO()
try:
server.finish_request((inp, out), ("127.0.0.1",8888))
finally:
sys.stderr = olderr
return out.getvalue(), err.getvalue()
def compare_generic_iter(make_it, match):
"""Utility to compare a generic iterator with an iterable
This tests the iterator using iter()/next().
'make_it' must be a function returning a fresh
iterator to be tested (since this may test the iterator twice)."""
it = make_it()
if not iter(it) is it:
raise AssertionError
for item in match:
if not next(it) == item:
raise AssertionError
try:
next(it)
except StopIteration:
pass
else:
raise AssertionError("Too many items from .__next__()", it)
| MockHandler |
python | pytorch__pytorch | test/dynamo/test_modules.py | {
"start": 28717,
"end": 29331
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.layer0 = torch.nn.Linear(10, 10)
self.layer1 = torch.nn.Linear(10, 10)
self.layer2 = torch.nn.Linear(10, 10)
@property
def encoder_layers(self):
return [self.layer0, self.layer1, self.layer2]
def forward(self, x):
for layer in self.encoder_layers:
output = layer(x)
if layer is None or layer == self.layer0:
output = F.relu6(output)
else:
output = F.relu(output)
return output
| ModuleComparison |
python | walkccc__LeetCode | solutions/231. Power of Two/231.py | {
"start": 0,
"end": 99
} | class ____:
def isPowerOfTwo(self, n: int) -> bool:
return n >= 0 and n.bit_count() == 1
| Solution |
python | django-import-export__django-import-export | import_export/widgets.py | {
"start": 857,
"end": 2024
} | class ____:
"""Internal Mixin for shared logic with date and datetime conversions."""
def __init__(
self,
format=None,
input_formats=None,
default_format="%Y-%m-%d",
coerce_to_string=True,
):
super().__init__(coerce_to_string=coerce_to_string)
self.formats = (format,) if format else (input_formats or (default_format,))
def _parse_value(self, value, value_type):
"""Attempt to parse the value using the provided formats.
Raise ValueError if parsing fails."""
if not value:
return None
if isinstance(value, value_type):
return value
for format_ in self.formats:
try:
parsed_date = datetime.strptime(value, format_)
if value_type is date:
return parsed_date.date()
if value_type is time:
return parsed_date.time()
return parsed_date
except (ValueError, TypeError) as e:
logger.debug(str(e))
raise ValueError("Value could not be parsed using defined formats.")
| _ParseDateTimeMixin |
python | apache__thrift | lib/py/src/protocol/TBinaryProtocol.py | {
"start": 903,
"end": 6451
} | class ____(TProtocolBase):
"""Binary implementation of the Thrift protocol driver."""
# NastyHaxx. Python 2.4+ on 32-bit machines forces hex constants to be
# positive, converting this into a long. If we hardcode the int value
# instead it'll stay in 32 bit-land.
# VERSION_MASK = 0xffff0000
VERSION_MASK = -65536
# VERSION_1 = 0x80010000
VERSION_1 = -2147418112
TYPE_MASK = 0x000000ff
def __init__(self, trans, strictRead=False, strictWrite=True, **kwargs):
TProtocolBase.__init__(self, trans)
self.strictRead = strictRead
self.strictWrite = strictWrite
self.string_length_limit = kwargs.get('string_length_limit', None)
self.container_length_limit = kwargs.get('container_length_limit', None)
def _check_string_length(self, length):
self._check_length(self.string_length_limit, length)
def _check_container_length(self, length):
self._check_length(self.container_length_limit, length)
def writeMessageBegin(self, name, type, seqid):
if self.strictWrite:
self.writeI32(TBinaryProtocol.VERSION_1 | type)
self.writeString(name)
self.writeI32(seqid)
else:
self.writeString(name)
self.writeByte(type)
self.writeI32(seqid)
def writeMessageEnd(self):
pass
def writeStructBegin(self, name):
pass
def writeStructEnd(self):
pass
def writeFieldBegin(self, name, type, id):
self.writeByte(type)
self.writeI16(id)
def writeFieldEnd(self):
pass
def writeFieldStop(self):
self.writeByte(TType.STOP)
def writeMapBegin(self, ktype, vtype, size):
self.writeByte(ktype)
self.writeByte(vtype)
self.writeI32(size)
def writeMapEnd(self):
pass
def writeListBegin(self, etype, size):
self.writeByte(etype)
self.writeI32(size)
def writeListEnd(self):
pass
def writeSetBegin(self, etype, size):
self.writeByte(etype)
self.writeI32(size)
def writeSetEnd(self):
pass
def writeBool(self, bool):
if bool:
self.writeByte(1)
else:
self.writeByte(0)
def writeByte(self, byte):
buff = pack("!b", byte)
self.trans.write(buff)
def writeI16(self, i16):
buff = pack("!h", i16)
self.trans.write(buff)
def writeI32(self, i32):
buff = pack("!i", i32)
self.trans.write(buff)
def writeI64(self, i64):
buff = pack("!q", i64)
self.trans.write(buff)
def writeDouble(self, dub):
buff = pack("!d", dub)
self.trans.write(buff)
def writeBinary(self, str):
self.writeI32(len(str))
self.trans.write(str)
def readMessageBegin(self):
sz = self.readI32()
if sz < 0:
version = sz & TBinaryProtocol.VERSION_MASK
if version != TBinaryProtocol.VERSION_1:
raise TProtocolException(
type=TProtocolException.BAD_VERSION,
message='Bad version in readMessageBegin: %d' % (sz))
type = sz & TBinaryProtocol.TYPE_MASK
name = self.readString()
seqid = self.readI32()
else:
if self.strictRead:
raise TProtocolException(type=TProtocolException.BAD_VERSION,
message='No protocol version header')
name = self.trans.readAll(sz).decode('utf-8')
type = self.readByte()
seqid = self.readI32()
return (name, type, seqid)
def readMessageEnd(self):
pass
def readStructBegin(self):
pass
def readStructEnd(self):
pass
def readFieldBegin(self):
type = self.readByte()
if type == TType.STOP:
return (None, type, 0)
id = self.readI16()
return (None, type, id)
def readFieldEnd(self):
pass
def readMapBegin(self):
ktype = self.readByte()
vtype = self.readByte()
size = self.readI32()
self._check_container_length(size)
return (ktype, vtype, size)
def readMapEnd(self):
pass
def readListBegin(self):
etype = self.readByte()
size = self.readI32()
self._check_container_length(size)
return (etype, size)
def readListEnd(self):
pass
def readSetBegin(self):
etype = self.readByte()
size = self.readI32()
self._check_container_length(size)
return (etype, size)
def readSetEnd(self):
pass
def readBool(self):
byte = self.readByte()
if byte == 0:
return False
return True
def readByte(self):
buff = self.trans.readAll(1)
val, = unpack('!b', buff)
return val
def readI16(self):
buff = self.trans.readAll(2)
val, = unpack('!h', buff)
return val
def readI32(self):
buff = self.trans.readAll(4)
val, = unpack('!i', buff)
return val
def readI64(self):
buff = self.trans.readAll(8)
val, = unpack('!q', buff)
return val
def readDouble(self):
buff = self.trans.readAll(8)
val, = unpack('!d', buff)
return val
def readBinary(self):
size = self.readI32()
self._check_string_length(size)
s = self.trans.readAll(size)
return s
| TBinaryProtocol |
python | networkx__networkx | networkx/utils/misc.py | {
"start": 8650,
"end": 11045
} | class ____(random.Random):
"""Provide the random.random algorithms using a numpy.random bit generator
The intent is to allow people to contribute code that uses Python's random
library, but still allow users to provide a single easily controlled random
bit-stream for all work with NetworkX. This implementation is based on helpful
comments and code from Robert Kern on NumPy's GitHub Issue #24458.
This implementation supersedes that of `PythonRandomInterface` which rewrote
methods to account for subtle differences in API between `random` and
`numpy.random`. Instead this subclasses `random.Random` and overwrites
the methods `random`, `getrandbits`, `getstate`, `setstate` and `seed`.
It makes them use the rng values from an input numpy `RandomState` or `Generator`.
Those few methods allow the rest of the `random.Random` methods to provide
the API interface of `random.random` while using randomness generated by
a numpy generator.
"""
def __init__(self, rng=None):
try:
import numpy as np
except ImportError:
msg = "numpy not found, only random.random available."
warnings.warn(msg, ImportWarning)
if rng is None:
self._rng = np.random.mtrand._rand
else:
self._rng = rng
# Not necessary, given our overriding of gauss() below, but it's
# in the superclass and nominally public, so initialize it here.
self.gauss_next = None
def random(self):
"""Get the next random number in the range 0.0 <= X < 1.0."""
return self._rng.random()
def getrandbits(self, k):
"""getrandbits(k) -> x. Generates an int with k random bits."""
if k < 0:
raise ValueError("number of bits must be non-negative")
numbytes = (k + 7) // 8 # bits / 8 and rounded up
x = int.from_bytes(self._rng.bytes(numbytes), "big")
return x >> (numbytes * 8 - k) # trim excess bits
def getstate(self):
return self._rng.__getstate__()
def setstate(self, state):
self._rng.__setstate__(state)
def seed(self, *args, **kwds):
"Do nothing override method."
raise NotImplementedError("seed() not implemented in PythonRandomViaNumpyBits")
##################################################################
| PythonRandomViaNumpyBits |
python | getsentry__sentry | tests/sentry/tasks/test_post_process.py | {
"start": 133088,
"end": 136398
} | class ____(
TestCase,
SnubaTestCase,
CorePostProcessGroupTestMixin,
InboxTestMixin,
RuleProcessorTestMixin,
SnoozeTestMixin,
SnoozeTestSkipSnoozeMixin,
PerformanceIssueTestCase,
KickOffSeerAutomationTestMixin,
TriageSignalsV0TestMixin,
):
def create_event(self, data, project_id, assert_no_errors=True):
fingerprint = data["fingerprint"][0] if data.get("fingerprint") else "some_group"
fingerprint = f"{PerformanceNPlusOneGroupType.type_id}-{fingerprint}"
return self.create_performance_issue(fingerprint=fingerprint)
def call_post_process_group(
self, is_new, is_regression, is_new_group_environment, event, cache_key=None
):
if cache_key is None:
cache_key = write_event_to_cache(event)
with self.feature(PerformanceNPlusOneGroupType.build_post_process_group_feature_name()):
post_process_group(
is_new=is_new,
is_regression=is_regression,
is_new_group_environment=is_new_group_environment,
cache_key=cache_key,
group_id=event.group_id,
project_id=event.project_id,
eventstream_type=EventStreamEventType.Error.value,
)
return cache_key
@patch("sentry.tasks.post_process.handle_owner_assignment")
@patch("sentry.tasks.post_process.handle_auto_assignment")
@patch("sentry.tasks.post_process.process_rules")
@patch("sentry.tasks.post_process.run_post_process_job")
@patch("sentry.rules.processing.processor.RuleProcessor")
@patch("sentry.signals.transaction_processed.send_robust")
@patch("sentry.signals.event_processed.send_robust")
def test_full_pipeline_with_group_states(
self,
event_processed_signal_mock,
transaction_processed_signal_mock,
mock_processor,
run_post_process_job_mock,
mock_process_rules,
mock_handle_auto_assignment,
mock_handle_owner_assignment,
):
event = self.create_performance_issue()
assert event.group
# TODO(jangjodi): Fix this ordering test; side_effects should be a function (lambda),
# but because post-processing is async, this causes the assert to fail because it doesn't
# wait for the side effects to happen
call_order = [mock_handle_owner_assignment, mock_handle_auto_assignment, mock_process_rules]
mock_handle_owner_assignment.side_effect = None
mock_handle_auto_assignment.side_effect = None
mock_process_rules.side_effect = None
post_process_group(
is_new=True,
is_regression=False,
is_new_group_environment=True,
cache_key="dummykey",
group_id=event.group_id,
occurrence_id=event.occurrence_id,
project_id=self.project.id,
eventstream_type=EventStreamEventType.Error.value,
)
assert event_processed_signal_mock.call_count == 0
assert mock_processor.call_count == 0
assert run_post_process_job_mock.call_count == 1
assert call_order == [
mock_handle_owner_assignment,
mock_handle_auto_assignment,
mock_process_rules,
]
| PostProcessGroupPerformanceTest |
python | PrefectHQ__prefect | tests/workers/test_utilities.py | {
"start": 2384,
"end": 3152
} | class ____:
async def test_get_default_base_job_template_for_local_registry(self):
result = await get_default_base_job_template_for_infrastructure_type("process")
assert result == ProcessWorker.get_default_base_job_template()
async def test_get_default_base_job_template_for_collection_registry(self):
result = await get_default_base_job_template_for_infrastructure_type("fake")
assert result == FAKE_DEFAULT_BASE_JOB_TEMPLATE
async def test_get_default_base_job_template_for_non_existent_infrastructure_type(
self,
):
result = await get_default_base_job_template_for_infrastructure_type(
"non-existent"
)
assert result is None
| TestGetDefaultBaseJobTemplateForInfrastructureType |
python | keras-team__keras | keras/src/callbacks/csv_logger_test.py | {
"start": 361,
"end": 5831
} | class ____(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_CSVLogger(self):
OUTPUT_DIM = 1
np.random.seed(1337)
temp_dir = tempfile.TemporaryDirectory()
filepath = os.path.join(temp_dir.name, "log.tsv")
sep = "\t"
x_train = np.random.random((TRAIN_SAMPLES, INPUT_DIM))
y_train = np.random.random((TRAIN_SAMPLES, OUTPUT_DIM))
x_test = np.random.random((TEST_SAMPLES, INPUT_DIM))
y_test = np.random.random((TEST_SAMPLES, OUTPUT_DIM))
def make_model():
np.random.seed(1337)
model = Sequential(
[
layers.Dense(2, activation="relu"),
layers.Dense(OUTPUT_DIM),
]
)
model.compile(
loss="mse",
optimizer="sgd",
metrics=["mse"],
)
return model
# case 1, create new file with defined separator
model = make_model()
cbks = [callbacks.CSVLogger(filepath, separator=sep)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0,
)
assert os.path.exists(filepath)
with open(filepath) as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read())
assert dialect.delimiter == sep
del model
del cbks
# case 2, append data to existing file, skip header
model = make_model()
cbks = [callbacks.CSVLogger(filepath, separator=sep, append=True)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0,
)
# case 3, reuse of CSVLogger object
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0,
)
with open(filepath) as csvfile:
list_lines = csvfile.readlines()
for line in list_lines:
assert line.count(sep) == 4
assert len(list_lines) == 5
output = " ".join(list_lines)
assert len(re.findall("epoch", output)) == 1
os.remove(filepath)
# case 3, Verify Val. loss also registered when Validation Freq > 1
model = make_model()
cbks = [callbacks.CSVLogger(filepath, separator=sep)]
hist = model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
validation_freq=3,
callbacks=cbks,
epochs=5,
verbose=0,
)
assert os.path.exists(filepath)
# Verify that validation loss is registered at val. freq
with open(filepath) as csvfile:
rows = csv.DictReader(csvfile, delimiter=sep)
for idx, row in enumerate(rows, 1):
self.assertIn("val_loss", row)
if idx == 3:
self.assertEqual(
row["val_loss"], str(hist.history["val_loss"][0])
)
else:
self.assertEqual(row["val_loss"], "NA")
@pytest.mark.requires_trainable_backend
def test_stop_training_csv(self):
# Test that using the CSVLogger callback with the TerminateOnNaN
# callback does not result in invalid CSVs.
tmpdir = tempfile.TemporaryDirectory()
csv_logfile = os.path.join(tmpdir.name, "csv_logger.csv")
NUM_CLASSES = 2
np.random.seed(1337)
x_train = np.random.random((TRAIN_SAMPLES, INPUT_DIM))
y_train = np.random.choice(np.arange(NUM_CLASSES), size=TRAIN_SAMPLES)
x_test = np.random.random((TEST_SAMPLES, INPUT_DIM))
y_test = np.random.choice(np.arange(NUM_CLASSES), size=TEST_SAMPLES)
y_test = numerical_utils.to_categorical(y_test)
y_train = numerical_utils.to_categorical(y_train)
model = Sequential()
initializer = initializers.Constant(value=1e5)
for _ in range(5):
model.add(
layers.Dense(
2,
activation="relu",
kernel_initializer=initializer,
)
)
model.add(layers.Dense(NUM_CLASSES))
model.compile(loss="mean_squared_error", optimizer="sgd")
history = model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=[
callbacks.TerminateOnNaN(),
callbacks.CSVLogger(csv_logfile),
],
epochs=20,
)
loss = history.history["loss"]
self.assertEqual(len(loss), 1)
self.assertTrue(np.isnan(loss[0]) or np.isinf(loss[0]))
values = []
with open(csv_logfile) as f:
# On Windows, due to \r\n line ends, we may end up reading empty
# lines after each line. Skip empty lines.
values = [x for x in csv.reader(f) if x]
self.assertIn("nan", values[-1], "NaN not logged in CSV Logger.")
| CSVLoggerTest |
python | openai__openai-python | src/openai/types/fine_tuning/alpha/grader_validate_response.py | {
"start": 645,
"end": 773
} | class ____(BaseModel):
grader: Optional[Grader] = None
"""The grader used for the fine-tuning job."""
| GraderValidateResponse |
python | pytorch__pytorch | test/torch_np/test_basic.py | {
"start": 3209,
"end": 4640
} | class ____(TestCase):
@parametrize("func", [w.transpose])
@parametrize("axes", [(0, 2, 1), (1, 2, 0), None])
def test_andtuple_tensor(self, func, axes):
t = torch.ones((1, 2, 3))
ta = func(t, axes=axes)
assert isinstance(ta, w.ndarray)
# a np.transpose -specific test
if axes is None:
newshape = (3, 2, 1)
else:
newshape = tuple(t.shape[axes[i]] for i in range(w.ndim(t)))
assert ta.shape == newshape
@parametrize("func", [w.transpose])
@parametrize("axes", [(0, 2, 1), (1, 2, 0), None])
def test_andtuple_list(self, func, axes):
t = [[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]] # shape = (1, 2, 3)
ta = func(t, axes=axes)
assert isinstance(ta, w.ndarray)
@parametrize("func", [w.transpose])
@parametrize("axes", [(0, 2, 1), (1, 2, 0), None])
def test_andtuple_array(self, func, axes):
t = w.asarray([[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]])
ta = func(t, axes=axes)
assert isinstance(ta, w.ndarray)
if axes is None:
newshape = (3, 2, 1)
else:
newshape = tuple(t.shape[axes[i]] for i in range(t.ndim))
assert ta.shape == newshape
arr_shape_funcs = [
w.reshape,
w.empty_like,
w.ones_like,
functools.partial(w.full_like, fill_value=42),
w.broadcast_to,
]
@instantiate_parametrized_tests
| TestOneArrAndAxesTuple |
python | gevent__gevent | src/gevent/_patcher.py | {
"start": 7337,
"end": 9020
} | class ____(object):
"""
Context manager that caches ``platform.architecture``.
Some things that load shared libraries (like Cryptodome, via
dnspython) invoke ``platform.architecture()`` for each one. That
in turn wants to fork and run commands , which in turn wants to
call ``threading._after_fork`` if the GIL has been initialized.
All of that means that certain imports done early may wind up
wanting to have the hub initialized potentially much earlier than
before.
Part of the fix is to observe when that happens and delay
initializing parts of gevent until as late as possible (e.g., we
delay importing and creating the resolver until the hub needs it,
unless explicitly configured).
The rest of the fix is to avoid the ``_after_fork`` issues by
first caching the results of platform.architecture before doing
patched imports.
(See events.py for similar issues with platform, and
test__threading_2.py for notes about threading._after_fork if the
GIL has been initialized)
"""
_arch_result = None
_orig_arch = None
_platform = None
def __enter__(self):
import platform
self._platform = platform
self._arch_result = platform.architecture()
self._orig_arch = platform.architecture
def arch(*args, **kwargs):
if not args and not kwargs:
return self._arch_result
return self._orig_arch(*args, **kwargs)
platform.architecture = arch
return self
def __exit__(self, *_args):
self._platform.architecture = self._orig_arch
self._platform = None
| cached_platform_architecture |
python | neetcode-gh__leetcode | python/1838-frequency-of-the-most-frequent-element.py | {
"start": 0,
"end": 405
} | class ____:
def maxFrequency(self, nums: List[int], k: int) -> int:
nums.sort()
l, r = 0, 0
res, total = 0, 0
while r < len(nums):
total += nums[r]
while nums[r] * (r - l + 1) > total + k:
total -= nums[l]
l += 1
res = max(res, r - l + 1)
r += 1
return res
| Solution |
python | google__pytype | pytype/pytd/pytd.py | {
"start": 13296,
"end": 13448
} | class ____(Type):
"""A type specified by name and, optionally, the module it is in."""
name: str
def __str__(self):
return self.name
| NamedType |
python | cython__cython | pyximport/pyximport.py | {
"start": 9552,
"end": 11579
} | class ____(MetaPathFinder):
def __init__(self, extension=PY_EXT, pyxbuild_dir=None, inplace=False, language_level=None):
self.pyxbuild_dir = pyxbuild_dir
self.inplace = inplace
self.language_level = language_level
self.extension = extension
self.uncompilable_modules = {}
self.blocked_modules = ['Cython', 'pyxbuild', 'pyximport.pyxbuild',
'distutils', 'cython']
self.blocked_packages = ['Cython.', 'distutils.']
self.found = False
def find_spec(self, fullname, path, target=None):
if self.found:
return None
if fullname in sys.modules:
return None
if any([fullname.startswith(pkg) for pkg in self.blocked_packages]):
return None
if fullname in self.blocked_modules:
# prevent infinite recursion
return None
self.blocked_modules.append(fullname)
name = fullname
if not path:
path = [os.getcwd()] + sys.path # top level import --
try:
for entry in path:
if os.path.isdir(os.path.join(entry, name)):
# this module has child modules
filename = os.path.join(entry, name, "__init__" + self.extension)
submodule_locations = [os.path.join(entry, name)]
else:
filename = os.path.join(entry, name + self.extension)
submodule_locations = None
if not os.path.exists(filename):
continue
self.found = True
return spec_from_file_location(
fullname, filename,
loader=PyxImportLoader(filename, self.pyxbuild_dir, self.inplace, self.language_level),
submodule_search_locations=submodule_locations)
finally:
self.blocked_modules.pop()
return None # we don't know how to import this
| PyImportMetaFinder |
python | pytorch__pytorch | torch/testing/_internal/common_dist_composable.py | {
"start": 1878,
"end": 2313
} | class ____(nn.Module):
# Define this class to achieve a desired nested wrapping using the module
# wrap policy with `nn.Sequential`
def __init__(self, *modules: tuple[nn.Module, ...]) -> None:
super().__init__()
self._module_sequence = list(modules)
def forward(self, x: torch.Tensor) -> torch.Tensor:
for module in self._module_sequence:
x = module(x)
return x
| FakeSequential |
python | Textualize__textual | docs/examples/widgets/progress_bar.py | {
"start": 169,
"end": 1169
} | class ____(App[None]):
CSS_PATH = "progress_bar.tcss"
TITLE = "Funding tracking"
def compose(self) -> ComposeResult:
yield Header()
with Center():
yield Label("Funding: ")
yield ProgressBar(total=100, show_eta=False) # (1)!
with Center():
yield Input(placeholder="$$$")
yield Button("Donate")
yield VerticalScroll(id="history")
def on_button_pressed(self) -> None:
self.add_donation()
def on_input_submitted(self) -> None:
self.add_donation()
def add_donation(self) -> None:
text_value = self.query_one(Input).value
try:
value = int(text_value)
except ValueError:
return
self.query_one(ProgressBar).advance(value)
self.query_one(VerticalScroll).mount(Label(f"Donation for ${value} received!"))
self.query_one(Input).value = ""
if __name__ == "__main__":
FundingProgressApp().run()
| FundingProgressApp |
python | sympy__sympy | sympy/physics/mechanics/joint.py | {
"start": 31429,
"end": 41230
} | class ____(Joint):
"""Prismatic (Sliding) Joint.
.. image:: PrismaticJoint.svg
Explanation
===========
It is defined such that the child body translates with respect to the parent
body along the body-fixed joint axis. The location of the joint is defined
by two points, one in each body, which coincide when the generalized
coordinate is zero. The direction cosine matrix between the
parent_interframe and child_interframe is the identity matrix. Therefore,
the direction cosine matrix between the parent and child frames is fully
defined by the definition of the intermediate frames. The page on the joints
framework gives a more detailed explanation of the intermediate frames.
Parameters
==========
name : string
A unique name for the joint.
parent : Particle or RigidBody
The parent body of joint.
child : Particle or RigidBody
The child body of joint.
coordinates : dynamicsymbol, optional
Generalized coordinates of the joint. The default value is
``dynamicsymbols(f'q_{joint.name}')``.
speeds : dynamicsymbol, optional
Generalized speeds of joint. The default value is
``dynamicsymbols(f'u_{joint.name}')``.
parent_point : Point or Vector, optional
Attachment point where the joint is fixed to the parent body. If a
vector is provided, then the attachment point is computed by adding the
vector to the body's mass center. The default value is the parent's mass
center.
child_point : Point or Vector, optional
Attachment point where the joint is fixed to the child body. If a
vector is provided, then the attachment point is computed by adding the
vector to the body's mass center. The default value is the child's mass
center.
parent_axis : Vector, optional
.. deprecated:: 1.12
Axis fixed in the parent body which aligns with an axis fixed in the
child body. The default is the x axis of parent's reference frame.
For more information on this deprecation, see
:ref:`deprecated-mechanics-joint-axis`.
child_axis : Vector, optional
.. deprecated:: 1.12
Axis fixed in the child body which aligns with an axis fixed in the
parent body. The default is the x axis of child's reference frame.
For more information on this deprecation, see
:ref:`deprecated-mechanics-joint-axis`.
parent_interframe : ReferenceFrame, optional
Intermediate frame of the parent body with respect to which the joint
transformation is formulated. If a Vector is provided then an interframe
is created which aligns its X axis with the given vector. The default
value is the parent's own frame.
child_interframe : ReferenceFrame, optional
Intermediate frame of the child body with respect to which the joint
transformation is formulated. If a Vector is provided then an interframe
is created which aligns its X axis with the given vector. The default
value is the child's own frame.
joint_axis : Vector
The axis along which the translation occurs. Note that the components
of this axis are the same in the parent_interframe and child_interframe.
parent_joint_pos : Point or Vector, optional
.. deprecated:: 1.12
This argument is replaced by parent_point and will be removed in a
future version.
See :ref:`deprecated-mechanics-joint-pos` for more information.
child_joint_pos : Point or Vector, optional
.. deprecated:: 1.12
This argument is replaced by child_point and will be removed in a
future version.
See :ref:`deprecated-mechanics-joint-pos` for more information.
Attributes
==========
name : string
The joint's name.
parent : Particle or RigidBody
The joint's parent body.
child : Particle or RigidBody
The joint's child body.
coordinates : Matrix
Matrix of the joint's generalized coordinates.
speeds : Matrix
Matrix of the joint's generalized speeds.
parent_point : Point
Attachment point where the joint is fixed to the parent body.
child_point : Point
Attachment point where the joint is fixed to the child body.
parent_axis : Vector
The axis fixed in the parent frame that represents the joint.
child_axis : Vector
The axis fixed in the child frame that represents the joint.
parent_interframe : ReferenceFrame
Intermediate frame of the parent body with respect to which the joint
transformation is formulated.
child_interframe : ReferenceFrame
Intermediate frame of the child body with respect to which the joint
transformation is formulated.
kdes : Matrix
Kinematical differential equations of the joint.
Examples
=========
A single prismatic joint is created from two bodies and has the following
basic attributes:
>>> from sympy.physics.mechanics import RigidBody, PrismaticJoint
>>> parent = RigidBody('P')
>>> parent
P
>>> child = RigidBody('C')
>>> child
C
>>> joint = PrismaticJoint('PC', parent, child)
>>> joint
PrismaticJoint: PC parent: P child: C
>>> joint.name
'PC'
>>> joint.parent
P
>>> joint.child
C
>>> joint.parent_point
P_masscenter
>>> joint.child_point
C_masscenter
>>> joint.parent_axis
P_frame.x
>>> joint.child_axis
C_frame.x
>>> joint.coordinates
Matrix([[q_PC(t)]])
>>> joint.speeds
Matrix([[u_PC(t)]])
>>> child.frame.ang_vel_in(parent.frame)
0
>>> child.frame.dcm(parent.frame)
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> joint.child_point.pos_from(joint.parent_point)
q_PC(t)*P_frame.x
To further demonstrate the use of the prismatic joint, the kinematics of two
masses sliding, one moving relative to a fixed body and the other relative
to the moving body. about the X axis of each connected body can be created
as follows.
>>> from sympy.physics.mechanics import PrismaticJoint, RigidBody
First create bodies to represent the fixed ceiling and one to represent
a particle.
>>> wall = RigidBody('W')
>>> Part1 = RigidBody('P1')
>>> Part2 = RigidBody('P2')
The first joint will connect the particle to the ceiling and the
joint axis will be about the X axis for each body.
>>> J1 = PrismaticJoint('J1', wall, Part1)
The second joint will connect the second particle to the first particle
and the joint axis will also be about the X axis for each body.
>>> J2 = PrismaticJoint('J2', Part1, Part2)
Once the joint is established the kinematics of the connected bodies can
be accessed. First the direction cosine matrices of Part relative
to the ceiling are found:
>>> Part1.frame.dcm(wall.frame)
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> Part2.frame.dcm(wall.frame)
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
The position of the particles' masscenter is found with:
>>> Part1.masscenter.pos_from(wall.masscenter)
q_J1(t)*W_frame.x
>>> Part2.masscenter.pos_from(wall.masscenter)
q_J1(t)*W_frame.x + q_J2(t)*P1_frame.x
The angular velocities of the two particle links can be computed with
respect to the ceiling.
>>> Part1.frame.ang_vel_in(wall.frame)
0
>>> Part2.frame.ang_vel_in(wall.frame)
0
And finally, the linear velocities of the two particles can be computed
with respect to the ceiling.
>>> Part1.masscenter.vel(wall.frame)
u_J1(t)*W_frame.x
>>> Part2.masscenter.vel(wall.frame)
u_J1(t)*W_frame.x + Derivative(q_J2(t), t)*P1_frame.x
"""
def __init__(self, name, parent, child, coordinates=None, speeds=None,
parent_point=None, child_point=None, parent_interframe=None,
child_interframe=None, parent_axis=None, child_axis=None,
joint_axis=None, parent_joint_pos=None, child_joint_pos=None):
self._joint_axis = joint_axis
super().__init__(name, parent, child, coordinates, speeds, parent_point,
child_point, parent_interframe, child_interframe,
parent_axis, child_axis, parent_joint_pos,
child_joint_pos)
def __str__(self):
return (f'PrismaticJoint: {self.name} parent: {self.parent} '
f'child: {self.child}')
@property
def joint_axis(self):
"""Axis along which the child translates with respect to the parent."""
return self._joint_axis
def _generate_coordinates(self, coordinate):
return self._fill_coordinate_list(coordinate, 1, 'q')
def _generate_speeds(self, speed):
return self._fill_coordinate_list(speed, 1, 'u')
def _orient_frames(self):
self._joint_axis = self._axis(self.joint_axis, self.parent_interframe)
self.child_interframe.orient_axis(
self.parent_interframe, self.joint_axis, 0)
def _set_angular_velocity(self):
self.child_interframe.set_ang_vel(self.parent_interframe, 0)
def _set_linear_velocity(self):
axis = self.joint_axis.normalize()
self.child_point.set_pos(self.parent_point, self.coordinates[0] * axis)
self.parent_point.set_vel(self._parent_frame, 0)
self.child_point.set_vel(self._child_frame, 0)
self.child_point.set_vel(self._parent_frame, self.speeds[0] * axis)
self.child.masscenter.set_vel(self._parent_frame, self.speeds[0] * axis)
| PrismaticJoint |
python | automl__auto-sklearn | test/test_pipeline/components/data_preprocessing/test_data_preprocessing_numerical.py | {
"start": 190,
"end": 2906
} | class ____(unittest.TestCase):
def test_data_type_consistency(self):
X = np.random.rand(3, 4)
Y = NumericalPreprocessingPipeline(
feat_type={0: "numerical", 1: "numerical", 2: "numerical"}
).fit_transform(X)
self.assertFalse(sparse.issparse(Y))
X = sparse.csc_matrix(
([3.0, 6.0, 4.0, 5.0], ([0, 1, 2, 1], [3, 2, 1, 0])), shape=(3, 4)
)
Y = NumericalPreprocessingPipeline(
feat_type={0: "numerical", 1: "numerical", 2: "numerical"}
).fit_transform(X)
self.assertTrue(sparse.issparse(Y))
def test_fit_transform(self):
X = np.array(
[[3.14, 1.0, 1.0], [3.14, 2.0, np.nan], [3.14, 3.0, 3.0]]
) # noqa : matrix legibility
# 1st column should be droped due to low variance
# The 2nd should be be standardized (default rescaling algorithm)
# The 3rd will get a value imputed by the mean (2.), therefore the
# transformation here will have the same effect as on the the 2nd column
sdev = np.sqrt(2 / 3)
Y1 = np.array(
[
[-1 / sdev, -1 / sdev],
[0.0, 0.0], # noqa : matrix legibility
[1 / sdev, 1 / sdev],
]
) # noqa : matrix legibility
# dense input
Yt = NumericalPreprocessingPipeline(
feat_type={0: "numerical", 1: "numerical", 2: "numerical"}
).fit_transform(X)
np.testing.assert_array_almost_equal(Yt, Y1)
# sparse input (uses with_mean=False)
Y2 = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]]) / sdev
X_sparse = sparse.csc_matrix(X)
Yt = NumericalPreprocessingPipeline(
feat_type={0: "numerical", 1: "numerical", 2: "numerical"}
).fit_transform(X_sparse)
np.testing.assert_array_almost_equal(Yt.todense(), Y2)
def test_transform(self):
X1 = np.array(
[[3.14, 1.0, 1.0], [3.14, 2.0, np.nan], [3.14, 3.0, 3.0]]
) # noqa : matrix legibility
sdev = np.sqrt(2 / 3)
# fit
NPP = NumericalPreprocessingPipeline(
feat_type={0: "numerical", 1: "numerical", 2: "numerical"}
)
NPP.fit_transform(X1)
# transform
X2 = np.array([[1.0, 5.0, 8.0], [2.0, 6.0, 9.0], [3.0, 7.0, np.nan]])
Yt = NPP.transform(X2)
# imputation, variance_threshold and rescaling are done using the data already
# fitted, therefore:
Y2 = np.array(
[[3 / sdev, 6 / sdev], [4 / sdev, 7 / sdev], [5 / sdev, 0.0]]
) # noqa : matrix legibility
np.testing.assert_array_almost_equal(Yt, Y2)
| NumericalPreprocessingPipelineTest |
python | django__django | tests/field_defaults/models.py | {
"start": 2074,
"end": 2217
} | class ____(models.Model):
language_code = models.ForeignKey(
DBDefaultsPK, db_default="fr", on_delete=models.CASCADE
)
| DBDefaultsFK |
python | apache__airflow | airflow-core/src/airflow/executors/local_executor.py | {
"start": 4892,
"end": 9383
} | class ____(BaseExecutor):
"""
LocalExecutor executes tasks locally in parallel.
It uses the multiprocessing Python library and queues to parallelize the execution of tasks.
:param parallelism: how many parallel processes are run in the executor, must be > 0
"""
is_local: bool = True
serve_logs: bool = True
activity_queue: SimpleQueue[workloads.All | None]
result_queue: SimpleQueue[TaskInstanceStateType]
workers: dict[int, multiprocessing.Process]
_unread_messages: multiprocessing.sharedctypes.Synchronized[int]
def start(self) -> None:
"""Start the executor."""
# We delay opening these queues until the start method mostly for unit tests. ExecutorLoader caches
# instances, so each test reusues the same instance! (i.e. test 1 runs, closes the queues, then test 2
# comes back and gets the same LocalExecutor instance, so we have to open new here.)
self.activity_queue = SimpleQueue()
self.result_queue = SimpleQueue()
self.workers = {}
# Mypy sees this value as `SynchronizedBase[c_uint]`, but that isn't the right runtime type behaviour
# (it looks like an int to python)
self._unread_messages = multiprocessing.Value(ctypes.c_uint)
def _check_workers(self):
# Reap any dead workers
to_remove = set()
for pid, proc in self.workers.items():
if not proc.is_alive():
to_remove.add(pid)
proc.close()
if to_remove:
self.workers = {pid: proc for pid, proc in self.workers.items() if pid not in to_remove}
with self._unread_messages:
num_outstanding = self._unread_messages.value
if num_outstanding <= 0 or self.activity_queue.empty():
# Nothing to do. Future enhancement if someone wants: shut down workers that have been idle for N
# seconds
return
# If we're using spawn in multiprocessing (default on macOS now) to start tasks, this can get called a
# via `sync()` a few times before the spawned process actually starts picking up messages. Try not to
# create too much
if num_outstanding and len(self.workers) < self.parallelism:
# This only creates one worker, which is fine as we call this directly after putting a message on
# activity_queue in execute_async
self._spawn_worker()
def _spawn_worker(self):
p = multiprocessing.Process(
target=_run_worker,
kwargs={
"logger_name": self.log.name,
"input": self.activity_queue,
"output": self.result_queue,
"unread_messages": self._unread_messages,
},
)
p.start()
if TYPE_CHECKING:
assert p.pid # Since we've called start
self.workers[p.pid] = p
def sync(self) -> None:
"""Sync will get called periodically by the heartbeat method."""
self._read_results()
self._check_workers()
def _read_results(self):
while not self.result_queue.empty():
key, state, exc = self.result_queue.get()
self.change_state(key, state)
def end(self) -> None:
"""End the executor."""
self.log.info(
"Shutting down LocalExecutor"
"; waiting for running tasks to finish. Signal again if you don't want to wait."
)
# We can't tell which proc will pick which close message up, so we send all the messages, and then
# wait on all the procs
for proc in self.workers.values():
# Send the shutdown message once for each alive worker
if proc.is_alive():
self.activity_queue.put(None)
for proc in self.workers.values():
if proc.is_alive():
proc.join()
proc.close()
# Process any extra results before closing
self._read_results()
self.activity_queue.close()
self.result_queue.close()
def terminate(self):
"""Terminate the executor is not doing anything."""
def _process_workloads(self, workloads):
for workload in workloads:
self.activity_queue.put(workload)
del self.queued_tasks[workload.ti.key]
with self._unread_messages:
self._unread_messages.value += len(workloads)
self._check_workers()
| LocalExecutor |
python | PyCQA__pylint | tests/functional/m/missing/missing_class_docstring.py | {
"start": 83,
"end": 134
} | class ____: # [missing-class-docstring]
pass
| Klass |
python | allegroai__clearml | clearml/backend_api/services/v2_13/workers.py | {
"start": 9632,
"end": 11478
} | class ____(NonStrictDataModel):
"""
:param worker: ID of the worker
:type worker: str
:param metrics: List of the metrics statistics for the worker
:type metrics: Sequence[MetricStats]
"""
_schema = {
"properties": {
"metrics": {
"description": "List of the metrics statistics for the worker",
"items": {"$ref": "#/definitions/metric_stats"},
"type": ["array", "null"],
},
"worker": {"description": "ID of the worker", "type": ["string", "null"]},
},
"type": "object",
}
def __init__(self, worker: Optional[str] = None, metrics: Optional[List[Any]] = None, **kwargs: Any) -> None:
super(WorkerStats, self).__init__(**kwargs)
self.worker = worker
self.metrics = metrics
@schema_property("worker")
def worker(self) -> Optional[str]:
return self._property_worker
@worker.setter
def worker(self, value: Optional[str]) -> None:
if value is None:
self._property_worker = None
return
self.assert_isinstance(value, "worker", six.string_types)
self._property_worker = value
@schema_property("metrics")
def metrics(self) -> Optional[List[Any]]:
return self._property_metrics
@metrics.setter
def metrics(self, value: Optional[List[Any]]) -> None:
if value is None:
self._property_metrics = None
return
self.assert_isinstance(value, "metrics", (list, tuple))
if any((isinstance(v, dict) for v in value)):
value = [MetricStats.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "metrics", MetricStats, is_array=True)
self._property_metrics = value
| WorkerStats |
python | tensorflow__tensorflow | tensorflow/python/distribute/values.py | {
"start": 17086,
"end": 18002
} | class ____(trace.TraceType):
"""TraceType of DistributedVariable objects."""
def __init__(self, distributed_variable):
self.distributed_variable = distributed_variable
self.components = (tuple(distributed_variable.shape.as_list()),
distributed_variable.dtype)
def is_subtype_of(self, other):
return self == other
def most_specific_common_supertype(self, others):
return self if all(self == other for other in others) else None
def placeholder_value(self, placeholder_context=None):
return self.distributed_variable
def to_tensors(self, value):
return []
def cast(self, value, _):
return value
def __hash__(self) -> int:
return hash(self.components)
def __eq__(self, other) -> bool:
if not isinstance(other, DistributedVariableTraceType):
return False
return self.components == other.components
| DistributedVariableTraceType |
python | sqlalchemy__sqlalchemy | test/ext/test_orderinglist.py | {
"start": 1480,
"end": 13733
} | class ____(fixtures.MappedTest):
def setup_test(self):
global metadata, slides_table, bullets_table, Slide, Bullet
slides_table, bullets_table = None, None
Slide, Bullet = None, None
metadata = MetaData()
def _setup(self, test_collection_class):
"""Build a relationship situation using the given
test_collection_class factory"""
global slides_table, bullets_table, Slide, Bullet
slides_table = Table(
"test_Slides",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(128)),
)
bullets_table = Table(
"test_Bullets",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("slide_id", Integer, ForeignKey("test_Slides.id")),
Column("position", Integer),
Column("text", String(128)),
)
class Slide:
def __init__(self, name):
self.name = name
def __repr__(self):
return '<Slide "%s">' % self.name
class Bullet:
def __init__(self, text):
self.text = text
def __repr__(self):
return '<Bullet "%s" pos %s>' % (self.text, self.position)
clear_mappers()
self.mapper_registry.map_imperatively(
Slide,
slides_table,
properties={
"bullets": relationship(
Bullet,
lazy="joined",
collection_class=test_collection_class,
backref="slide",
order_by=[bullets_table.c.position],
)
},
)
self.mapper_registry.map_imperatively(Bullet, bullets_table)
metadata.create_all(testing.db)
def teardown_test(self):
metadata.drop_all(testing.db)
def test_append_no_reorder(self):
self._setup(
ordering_list("position", count_from=1, reorder_on_append=False)
)
s1 = Slide("Slide #1")
self.assert_(not s1.bullets)
self.assert_(len(s1.bullets) == 0)
s1.bullets.append(Bullet("s1/b1"))
self.assert_(s1.bullets)
self.assert_(len(s1.bullets) == 1)
self.assert_(s1.bullets[0].position == 1)
s1.bullets.append(Bullet("s1/b2"))
self.assert_(len(s1.bullets) == 2)
self.assert_(s1.bullets[0].position == 1)
self.assert_(s1.bullets[1].position == 2)
bul = Bullet("s1/b100")
bul.position = 100
s1.bullets.append(bul)
self.assert_(s1.bullets[0].position == 1)
self.assert_(s1.bullets[1].position == 2)
self.assert_(s1.bullets[2].position == 100)
s1.bullets.append(Bullet("s1/b4"))
self.assert_(s1.bullets[0].position == 1)
self.assert_(s1.bullets[1].position == 2)
self.assert_(s1.bullets[2].position == 100)
self.assert_(s1.bullets[3].position == 4)
s1.bullets._reorder()
self.assert_(s1.bullets[0].position == 1)
self.assert_(s1.bullets[1].position == 2)
self.assert_(s1.bullets[2].position == 3)
self.assert_(s1.bullets[3].position == 4)
session = fixture_session()
session.add(s1)
session.flush()
id_ = s1.id
session.expunge_all()
del s1
srt = session.get(Slide, id_)
self.assert_(srt.bullets)
self.assert_(len(srt.bullets) == 4)
titles = ["s1/b1", "s1/b2", "s1/b100", "s1/b4"]
found = [b.text for b in srt.bullets]
self.assert_(titles == found)
def test_append_reorder(self):
self._setup(
ordering_list("position", count_from=1, reorder_on_append=True)
)
s1 = Slide("Slide #1")
self.assert_(not s1.bullets)
self.assert_(len(s1.bullets) == 0)
s1.bullets.append(Bullet("s1/b1"))
self.assert_(s1.bullets)
self.assert_(len(s1.bullets) == 1)
self.assert_(s1.bullets[0].position == 1)
s1.bullets.append(Bullet("s1/b2"))
self.assert_(len(s1.bullets) == 2)
self.assert_(s1.bullets[0].position == 1)
self.assert_(s1.bullets[1].position == 2)
bul = Bullet("s1/b100")
bul.position = 100
s1.bullets.append(bul)
self.assert_(s1.bullets[0].position == 1)
self.assert_(s1.bullets[1].position == 2)
self.assert_(s1.bullets[2].position == 3)
s1.bullets.append(Bullet("s1/b4"))
self.assert_(s1.bullets[0].position == 1)
self.assert_(s1.bullets[1].position == 2)
self.assert_(s1.bullets[2].position == 3)
self.assert_(s1.bullets[3].position == 4)
s1.bullets._reorder()
self.assert_(s1.bullets[0].position == 1)
self.assert_(s1.bullets[1].position == 2)
self.assert_(s1.bullets[2].position == 3)
self.assert_(s1.bullets[3].position == 4)
s1.bullets._raw_append(Bullet("raw"))
self.assert_(s1.bullets[4].position is None)
s1.bullets._reorder()
self.assert_(s1.bullets[4].position == 5)
session = fixture_session()
session.add(s1)
session.flush()
id_ = s1.id
session.expunge_all()
del s1
srt = session.get(Slide, id_)
self.assert_(srt.bullets)
self.assert_(len(srt.bullets) == 5)
titles = ["s1/b1", "s1/b2", "s1/b100", "s1/b4", "raw"]
found = [b.text for b in srt.bullets]
eq_(titles, found)
srt.bullets._raw_append(Bullet("raw2"))
srt.bullets[-1].position = 6
session.flush()
session.expunge_all()
srt = session.get(Slide, id_)
titles = ["s1/b1", "s1/b2", "s1/b100", "s1/b4", "raw", "raw2"]
found = [b.text for b in srt.bullets]
eq_(titles, found)
def test_insert(self):
self._setup(ordering_list("position"))
s1 = Slide("Slide #1")
s1.bullets.append(Bullet("1"))
s1.bullets.append(Bullet("2"))
s1.bullets.append(Bullet("3"))
s1.bullets.append(Bullet("4"))
self.assert_(s1.bullets[0].position == 0)
self.assert_(s1.bullets[1].position == 1)
self.assert_(s1.bullets[2].position == 2)
self.assert_(s1.bullets[3].position == 3)
s1.bullets.insert(2, Bullet("insert_at_2"))
self.assert_(s1.bullets[0].position == 0)
self.assert_(s1.bullets[1].position == 1)
self.assert_(s1.bullets[2].position == 2)
self.assert_(s1.bullets[3].position == 3)
self.assert_(s1.bullets[4].position == 4)
self.assert_(s1.bullets[1].text == "2")
self.assert_(s1.bullets[2].text == "insert_at_2")
self.assert_(s1.bullets[3].text == "3")
s1.bullets.insert(999, Bullet("999"))
self.assert_(len(s1.bullets) == 6)
self.assert_(s1.bullets[5].position == 5)
session = fixture_session()
session.add(s1)
session.flush()
id_ = s1.id
session.expunge_all()
del s1
srt = session.get(Slide, id_)
self.assert_(srt.bullets)
self.assert_(len(srt.bullets) == 6)
texts = ["1", "2", "insert_at_2", "3", "4", "999"]
found = [b.text for b in srt.bullets]
self.assert_(texts == found)
def test_slice(self):
self._setup(ordering_list("position"))
b = [
Bullet("1"),
Bullet("2"),
Bullet("3"),
Bullet("4"),
Bullet("5"),
Bullet("6"),
]
s1 = Slide("Slide #1")
# 1, 2, 3
s1.bullets[0:3] = iter(b[0:3])
for i in 0, 1, 2:
self.assert_(s1.bullets[i].position == i)
self.assert_(s1.bullets[i] == b[i])
# 1, 4, 5, 6, 3
s1.bullets[1:2] = b[3:6]
for li, bi in (0, 0), (1, 3), (2, 4), (3, 5), (4, 2):
self.assert_(s1.bullets[li].position == li)
self.assert_(s1.bullets[li] == b[bi])
# 1, 6, 3
del s1.bullets[1:3]
for li, bi in (0, 0), (1, 5), (2, 2):
self.assert_(s1.bullets[li].position == li)
self.assert_(s1.bullets[li] == b[bi])
session = fixture_session()
session.add(s1)
session.flush()
id_ = s1.id
session.expunge_all()
del s1
srt = session.get(Slide, id_)
self.assert_(srt.bullets)
self.assert_(len(srt.bullets) == 3)
texts = ["1", "6", "3"]
for i, text in enumerate(texts):
self.assert_(srt.bullets[i].position == i)
self.assert_(srt.bullets[i].text == text)
def test_replace(self):
self._setup(ordering_list("position"))
s1 = Slide("Slide #1")
s1.bullets = [Bullet("1"), Bullet("2"), Bullet("3")]
self.assert_(len(s1.bullets) == 3)
self.assert_(s1.bullets[2].position == 2)
session = fixture_session()
session.add(s1)
session.flush()
new_bullet = Bullet("new 2")
self.assert_(new_bullet.position is None)
# mark existing bullet as db-deleted before replacement.
# session.delete(s1.bullets[1])
s1.bullets[1] = new_bullet
self.assert_(new_bullet.position == 1)
self.assert_(len(s1.bullets) == 3)
id_ = s1.id
session.flush()
session.expunge_all()
srt = session.get(Slide, id_)
self.assert_(srt.bullets)
self.assert_(len(srt.bullets) == 3)
self.assert_(srt.bullets[1].text == "new 2")
self.assert_(srt.bullets[2].text == "3")
def test_replace_two(self):
"""test #3191"""
self._setup(ordering_list("position", reorder_on_append=True))
s1 = Slide("Slide #1")
b1, b2, b3, b4 = Bullet("1"), Bullet("2"), Bullet("3"), Bullet("4")
s1.bullets = [b1, b2, b3]
eq_([b.position for b in s1.bullets], [0, 1, 2])
s1.bullets = [b4, b2, b1]
eq_([b.position for b in s1.bullets], [0, 1, 2])
def test_funky_ordering(self):
class Pos:
def __init__(self):
self.position = None
step_factory = ordering_list(
"position", ordering_func=step_numbering(2)
)
stepped = step_factory()
stepped.append(Pos())
stepped.append(Pos())
stepped.append(Pos())
stepped.append(Pos())
for li, pos in (0, 0), (1, 2), (2, 4), (3, 6):
self.assert_(stepped[li].position == pos)
fib_factory = ordering_list(
"position", ordering_func=fibonacci_numbering("position")
)
fibbed = fib_factory()
fibbed.append(Pos())
fibbed.append(Pos())
fibbed.append(Pos())
fibbed.append(Pos())
fibbed.append(Pos())
for li, pos in (0, 1), (1, 2), (2, 3), (3, 5), (4, 8):
self.assert_(fibbed[li].position == pos)
fibbed.insert(2, Pos())
fibbed.insert(4, Pos())
fibbed.insert(6, Pos())
for li, pos in (
(0, 1),
(1, 2),
(2, 3),
(3, 5),
(4, 8),
(5, 13),
(6, 21),
(7, 34),
):
self.assert_(fibbed[li].position == pos)
alpha_factory = ordering_list("position", ordering_func=alpha_ordering)
alpha = alpha_factory()
alpha.append(Pos())
alpha.append(Pos())
alpha.append(Pos())
alpha.insert(1, Pos())
for li, pos in (0, "A"), (1, "B"), (2, "C"), (3, "D"):
self.assert_(alpha[li].position == pos)
def test_picklability(self):
from sqlalchemy.ext.orderinglist import OrderingList
olist = OrderingList("order", reorder_on_append=True)
olist.append(DummyItem())
for loads, dumps in picklers():
pck = dumps(olist)
copy = loads(pck)
self.assert_(copy == olist)
self.assert_(copy.__dict__ == olist.__dict__)
| OrderingListTest |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_tabbed_alternate.py | {
"start": 106,
"end": 18015
} | class ____(util.MdCase):
"""Test tab cases."""
extension = ['pymdownx.tabbed', 'pymdownx.superfences', 'markdown.extensions.def_list', 'pymdownx.details']
extension_configs = {'pymdownx.tabbed': {'alternate_style': True}}
def test_with_preceding_text(self):
"""Test content directly before tabs."""
expected = r'''
<p>foo
<strong>foo</strong></p>
<div class="tabbed-set tabbed-alternate" data-tabs="1:1"><input checked="checked" id="__tabbed_1_1" name="__tabbed_1" type="radio" /><div class="tabbed-labels"><label for="__tabbed_1_1">Tab</label></div>
<div class="tabbed-content">
<div class="tabbed-block"></div>
</div>
</div>
''' # noqa: E501
self.check_markdown(
r'''
foo
**foo**
=== "Tab"
''',
expected,
True
)
def test_tabbed(self):
"""Test tabbed."""
self.check_markdown(
r'''
=== "Tab"
Some *content*
And more `content`.
=== "Another Tab"
Some more content.
```
code
```
''',
r'''
<div class="tabbed-set tabbed-alternate" data-tabs="1:2"><input checked="checked" id="__tabbed_1_1" name="__tabbed_1" type="radio" /><input id="__tabbed_1_2" name="__tabbed_1" type="radio" /><div class="tabbed-labels"><label for="__tabbed_1_1">Tab</label><label for="__tabbed_1_2">Another Tab</label></div>
<div class="tabbed-content">
<div class="tabbed-block">
<p>Some <em>content</em></p>
<p>And more <code>content</code>.</p>
</div>
<div class="tabbed-block">
<p>Some more content.</p>
<div class="highlight"><pre><span></span><code>code
</code></pre></div>
</div>
</div>
</div>
''', # noqa: E501
True
)
def test_tabbed_markdown_title(self):
"""Test tabbed."""
self.check_markdown(
r'''
=== "**Tab**"
Some *content*
And more `content`.
=== "_Another Tab_"
Some more content.
```
code
```
''',
r'''
<div class="tabbed-set tabbed-alternate" data-tabs="1:2"><input checked="checked" id="__tabbed_1_1" name="__tabbed_1" type="radio" /><input id="__tabbed_1_2" name="__tabbed_1" type="radio" /><div class="tabbed-labels"><label for="__tabbed_1_1"><strong>Tab</strong></label><label for="__tabbed_1_2"><em>Another Tab</em></label></div>
<div class="tabbed-content">
<div class="tabbed-block">
<p>Some <em>content</em></p>
<p>And more <code>content</code>.</p>
</div>
<div class="tabbed-block">
<p>Some more content.</p>
<div class="highlight"><pre><span></span><code>code
</code></pre></div>
</div>
</div>
</div>
''', # noqa: E501
True
)
def test_nested_tabbed(self):
"""Test nested tabbed."""
self.check_markdown(
r'''
=== "Tab"
Some *content*
=== "Tab A"
- item 1
- item 2
=== "Tab B"
- item A
- item B
=== "Another Tab"
Some more content.
''',
r'''
<div class="tabbed-set tabbed-alternate" data-tabs="1:2"><input checked="checked" id="__tabbed_1_1" name="__tabbed_1" type="radio" /><input id="__tabbed_1_2" name="__tabbed_1" type="radio" /><div class="tabbed-labels"><label for="__tabbed_1_1">Tab</label><label for="__tabbed_1_2">Another Tab</label></div>
<div class="tabbed-content">
<div class="tabbed-block">
<p>Some <em>content</em></p>
<div class="tabbed-set tabbed-alternate" data-tabs="2:2"><input checked="checked" id="__tabbed_2_1" name="__tabbed_2" type="radio" /><input id="__tabbed_2_2" name="__tabbed_2" type="radio" /><div class="tabbed-labels"><label for="__tabbed_2_1">Tab A</label><label for="__tabbed_2_2">Tab B</label></div>
<div class="tabbed-content">
<div class="tabbed-block">
<ul>
<li>
<p>item 1</p>
</li>
<li>
<p>item 2</p>
</li>
</ul>
</div>
<div class="tabbed-block">
<ul>
<li>
<p>item A</p>
</li>
<li>
<p>item B</p>
</li>
</ul>
</div>
</div>
</div>
</div>
<div class="tabbed-block">
<p>Some more content.</p>
</div>
</div>
</div>
''', # noqa: E501
True
)
def test_tabbed_split(self):
"""Force a split of tab sets."""
self.check_markdown(
r'''
=== "Tab"
Some *content*
And more `content`.
===! "Another Tab"
Some more content.
```
code
```
''',
r'''
<div class="tabbed-set tabbed-alternate" data-tabs="1:1"><input checked="checked" id="__tabbed_1_1" name="__tabbed_1" type="radio" /><div class="tabbed-labels"><label for="__tabbed_1_1">Tab</label></div>
<div class="tabbed-content">
<div class="tabbed-block">
<p>Some <em>content</em></p>
<p>And more <code>content</code>.</p>
</div>
</div>
</div>
<div class="tabbed-set tabbed-alternate" data-tabs="2:1"><input checked="checked" id="__tabbed_2_1" name="__tabbed_2" type="radio" /><div class="tabbed-labels"><label for="__tabbed_2_1">Another Tab</label></div>
<div class="tabbed-content">
<div class="tabbed-block">
<p>Some more content.</p>
<div class="highlight"><pre><span></span><code>code
</code></pre></div>
</div>
</div>
</div>
''', # noqa: E501
True
)
def test_tabbed_break(self):
"""Test that tabs are properly terminated on blocks that are not under the tab."""
self.check_markdown(
r'''
=== "Tab"
Some *content*
And more `content`.
Content
''',
r'''
<div class="tabbed-set tabbed-alternate" data-tabs="1:1"><input checked="checked" id="__tabbed_1_1" name="__tabbed_1" type="radio" /><div class="tabbed-labels"><label for="__tabbed_1_1">Tab</label></div>
<div class="tabbed-content">
<div class="tabbed-block">
<p>Some <em>content</em></p>
<p>And more <code>content</code>.</p>
</div>
</div>
</div>
<p>Content</p>
''', # noqa: E501
True
)
def test_tabbed_select(self):
"""Test selecting a tab."""
self.check_markdown(
r'''
=== "Tab 1"
content
===+ "Tab 2"
content
=== "Tab 3"
content
''',
r'''
<div class="tabbed-set tabbed-alternate" data-tabs="1:3"><input id="__tabbed_1_1" name="__tabbed_1" type="radio" /><input checked="checked" id="__tabbed_1_2" name="__tabbed_1" type="radio" /><input id="__tabbed_1_3" name="__tabbed_1" type="radio" /><div class="tabbed-labels"><label for="__tabbed_1_1">Tab 1</label><label for="__tabbed_1_2">Tab 2</label><label for="__tabbed_1_3">Tab 3</label></div>
<div class="tabbed-content">
<div class="tabbed-block">
<p>content</p>
</div>
<div class="tabbed-block">
<p>content</p>
</div>
<div class="tabbed-block">
<p>content</p>
</div>
</div>
</div>
''', # noqa: E501
True
)
def test_tabbed_select_multiple(self):
"""Test selecting multiple tabs."""
self.check_markdown(
r'''
=== "Tab 1"
content
===+ "Tab 2"
content
===+ "Tab 3"
content
''',
r'''
<div class="tabbed-set tabbed-alternate" data-tabs="1:3"><input id="__tabbed_1_1" name="__tabbed_1" type="radio" /><input id="__tabbed_1_2" name="__tabbed_1" type="radio" /><input checked="checked" id="__tabbed_1_3" name="__tabbed_1" type="radio" /><div class="tabbed-labels"><label for="__tabbed_1_1">Tab 1</label><label for="__tabbed_1_2">Tab 2</label><label for="__tabbed_1_3">Tab 3</label></div>
<div class="tabbed-content">
<div class="tabbed-block">
<p>content</p>
</div>
<div class="tabbed-block">
<p>content</p>
</div>
<div class="tabbed-block">
<p>content</p>
</div>
</div>
</div>
''', # noqa: E501
True
)
def test_with_lists(self):
"""Test with lists."""
self.check_markdown(
'''
- List
=== "Tab"
- Paragraph
Paragraph
''',
'''
<ul>
<li>
<p>List</p>
<div class="tabbed-set tabbed-alternate" data-tabs="1:1"><input checked="checked" id="__tabbed_1_1" name="__tabbed_1" type="radio" /><div class="tabbed-labels"><label for="__tabbed_1_1">Tab</label></div>
<div class="tabbed-content">
<div class="tabbed-block">
<ul>
<li>
<p>Paragraph</p>
<p>Paragraph</p>
</li>
</ul>
</div>
</div>
</div>
</li>
</ul>
''', # noqa: E501
True
)
def test_with_big_lists(self):
"""Test details with a longer list."""
self.check_markdown(
'''
- List
=== "Tab"
- Paragraph
Paragraph
- Paragraph
paragraph
''',
'''
<ul>
<li>
<p>List</p>
<div class="tabbed-set tabbed-alternate" data-tabs="1:1"><input checked="checked" id="__tabbed_1_1" name="__tabbed_1" type="radio" /><div class="tabbed-labels"><label for="__tabbed_1_1">Tab</label></div>
<div class="tabbed-content">
<div class="tabbed-block">
<ul>
<li>
<p>Paragraph</p>
<p>Paragraph</p>
</li>
<li>
<p>Paragraph</p>
<p>paragraph</p>
</li>
</ul>
</div>
</div>
</div>
</li>
</ul>
''', # noqa: E501
True
)
def test_with_complex_lists(self):
"""Test details in a complex list scenario."""
self.check_markdown(
'''
- List
=== "Tab"
- Paragraph
=== "Tab"
1. Paragraph
Paragraph
''',
'''
<ul>
<li>
<p>List</p>
<div class="tabbed-set tabbed-alternate" data-tabs="1:1"><input checked="checked" id="__tabbed_1_1" name="__tabbed_1" type="radio" /><div class="tabbed-labels"><label for="__tabbed_1_1">Tab</label></div>
<div class="tabbed-content">
<div class="tabbed-block">
<ul>
<li>
<p>Paragraph</p>
<div class="tabbed-set tabbed-alternate" data-tabs="2:1"><input checked="checked" id="__tabbed_2_1" name="__tabbed_2" type="radio" /><div class="tabbed-labels"><label for="__tabbed_2_1">Tab</label></div>
<div class="tabbed-content">
<div class="tabbed-block">
<ol>
<li>
<p>Paragraph</p>
<p>Paragraph</p>
</li>
</ol>
</div>
</div>
</div>
</li>
</ul>
</div>
</div>
</div>
</li>
</ul>
''', # noqa: E501
True
)
def test_definition_list(self):
"""Test with definition list."""
self.check_markdown(
'''
- List
=== "Tab"
Term
: Definition
More text
: Another
definition
Even more text
''',
'''
<ul>
<li>
<p>List</p>
<div class="tabbed-set tabbed-alternate" data-tabs="1:1"><input checked="checked" id="__tabbed_1_1" name="__tabbed_1" type="radio" /><div class="tabbed-labels"><label for="__tabbed_1_1">Tab</label></div>
<div class="tabbed-content">
<div class="tabbed-block">
<dl>
<dt>Term</dt>
<dd>
<p>Definition</p>
<p>More text</p>
</dd>
<dd>
<p>Another
definition</p>
<p>Even more text</p>
</dd>
</dl>
</div>
</div>
</div>
</li>
</ul>
''', # noqa: E501
True
)
def test_with_details(self):
"""Test with definition list."""
self.check_markdown(
'''
=== "Output"
???+ note "Open styled details"
??? danger "Nested details!"
And more content again.
''',
'''
<div class="tabbed-set tabbed-alternate" data-tabs="1:1"><input checked="checked" id="__tabbed_1_1" name="__tabbed_1" type="radio" /><div class="tabbed-labels"><label for="__tabbed_1_1">Output</label></div>
<div class="tabbed-content">
<div class="tabbed-block">
<details class="note" open="open">
<summary>Open styled details</summary>
<details class="danger">
<summary>Nested details!</summary>
<p>And more content again.</p>
</details>
</details>
</div>
</div>
</div>
''', # noqa: E501
True
)
def test_tabbed_complex_list(self):
"""Test tabbed complex list scenario."""
self.check_markdown(
'''
=== "Tab with loose lists"
- Parent 1
- Child 1
- Child 2
''',
'''
<div class="tabbed-set tabbed-alternate" data-tabs="1:1"><input checked="checked" id="__tabbed_1_1" name="__tabbed_1" type="radio" /><div class="tabbed-labels"><label for="__tabbed_1_1">Tab with loose lists</label></div>
<div class="tabbed-content">
<div class="tabbed-block">
<ul>
<li>
<p>Parent 1</p>
<ul>
<li>Child 1</li>
<li>Child 2</li>
</ul>
</li>
</ul>
</div>
</div>
</div>
''', # noqa: E501
True
)
def test_tabbed_complex_list_unindented_content(self):
"""Test tabbed complex list scenario with un-indented content."""
self.check_markdown(
'''
=== "Tab with loose lists"
- Parent 1
- Child 1
- Child 2
- Parent 2
''',
'''
<div class="tabbed-set tabbed-alternate" data-tabs="1:1"><input checked="checked" id="__tabbed_1_1" name="__tabbed_1" type="radio" /><div class="tabbed-labels"><label for="__tabbed_1_1">Tab with loose lists</label></div>
<div class="tabbed-content">
<div class="tabbed-block">
<ul>
<li>
<p>Parent 1</p>
<ul>
<li>Child 1</li>
<li>Child 2</li>
</ul>
</li>
</ul>
</div>
</div>
</div>
<ul>
<li>Parent 2</li>
</ul>
''', # noqa: E501
True
)
def test_indented_code(self):
"""Test indented code."""
md = """
=== "Tab 1"
code
"""
self.check_markdown(
md,
'''
<div class="tabbed-set tabbed-alternate" data-tabs="1:1"><input checked="checked" id="__tabbed_1_1" name="__tabbed_1" type="radio" /><div class="tabbed-labels"><label for="__tabbed_1_1">Tab 1</label></div>
<div class="tabbed-content">
<div class="tabbed-block">
<pre><code>code
</code></pre>
</div>
</div>
</div>
''', # noqa: E501
True
)
| TestTab |
python | huggingface__transformers | src/transformers/models/mbart/modeling_mbart.py | {
"start": 27325,
"end": 38764
} | class ____(MBartPreTrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`MBartDecoderLayer`]
Args:
config: MBartConfig
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: MBartConfig):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = config.pad_token_id
self.max_target_positions = config.max_position_embeddings
embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
self.embed_tokens = MBartScaledWordEmbedding(
config.vocab_size, config.d_model, self.padding_idx, embed_scale=embed_scale
)
self.embed_positions = MBartLearnedPositionalEmbedding(
config.max_position_embeddings,
config.d_model,
)
self.layers = nn.ModuleList([MBartDecoderLayer(config, layer_idx=i) for i in range(config.decoder_layers)])
self.config = config
self.layernorm_embedding = nn.LayerNorm(config.d_model)
self.layer_norm = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]:
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence. It is used to update the
cache in the correct position and to infer the complete sequence length.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
input = input_ids
input_shape = input.shape
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
input = inputs_embeds[:, :, -1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input)
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing`. Setting `use_cache=False`..."
)
use_cache = False
# initialize `past_key_values`
if use_cache and past_key_values is None:
past_key_values = (
EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config))
if encoder_hidden_states is not None or self.config.is_encoder_decoder
else DynamicCache(config=self.config)
)
batch_size, seq_length = inputs_embeds.size()[:-1]
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
if cache_position is None:
cache_position = torch.arange(
past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device
)
if attention_mask is None and not is_torchdynamo_compiling():
# required mask seq length can be calculated via length of past cache
mask_seq_length = past_key_values_length + seq_length
attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device)
self_attn_cache = (
past_key_values.self_attention_cache
if isinstance(past_key_values, EncoderDecoderCache)
else past_key_values
)
causal_mask = create_causal_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=self_attn_cache,
)
encoder_attention_mask = create_bidirectional_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=encoder_attention_mask,
encoder_hidden_states=encoder_hidden_states,
)
# embed positions
position_ids = self.embed_positions(input, past_key_values_length, position_ids=cache_position)
hidden_states = inputs_embeds + position_ids.to(inputs_embeds.device)
hidden_states = self.layernorm_embedding(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://huggingface.co/papers/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (hidden_states,)
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop:
continue
layer_outputs = decoder_layer(
hidden_states,
causal_mask,
encoder_hidden_states, # as a positional argument for gradient checkpointing
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
output_attentions=output_attentions,
use_cache=use_cache,
cache_position=cache_position,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
hidden_states = self.layer_norm(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
if not return_dict:
return tuple(
v
for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns, all_cross_attentions]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
)
@auto_docstring
| MBartDecoder |
python | getsentry__sentry | tests/sentry/api/serializers/test_release.py | {
"start": 37633,
"end": 40475
} | class ____(TestCase, SnubaTestCase):
def test_simple(self) -> None:
user = self.create_user()
project = self.create_project()
project2 = self.create_project(organization=project.organization)
release_version = uuid4().hex
release = Release.objects.create(
organization_id=project.organization_id, version=release_version
)
release.add_project(project)
release.add_project(project2)
ReleaseProject.objects.filter(release=release, project=project).update(new_groups=1)
ReleaseProject.objects.filter(release=release, project=project2).update(new_groups=1)
self.store_event(
data={
"timestamp": before_now(seconds=1).isoformat(),
"release": release_version,
"environment": "prod",
},
project_id=project.id,
)
release = Release.objects.get(version=release_version)
env = Environment.objects.create(organization_id=project.organization_id, name="production")
env.add_project(project)
commit_author = CommitAuthor.objects.create(
name="stebe", email="stebe@sentry.io", organization_id=project.organization_id
)
commit = Commit.objects.create(
organization_id=project.organization_id,
repository_id=1,
key="abc",
author=commit_author,
message="waddap",
)
ReleaseCommit.objects.create(
organization_id=project.organization_id,
project_id=project.id,
release=release,
commit=commit,
order=1,
)
release.update(authors=[str(commit_author.id)], commit_count=1, last_commit_id=commit.id)
deploy = Deploy.objects.create(
organization_id=project.organization_id, release=release, environment_id=env.id
)
release.update(total_deploys=1, last_deploy_id=deploy.id)
result = serialize(release, user, GroupEventReleaseSerializer())
assert result["id"] == release.id
assert result["commitCount"] == 1
assert result["data"] == release.data
assert result["dateReleased"] == release.date_released
assert result["deployCount"] == release.total_deploys
assert result["ref"] == release.ref
assert result["lastCommit"]["id"] == commit.key
assert result["lastDeploy"]["id"] == str(deploy.id)
assert result["version"] == release.version
assert result["versionInfo"]["package"] is None
assert result["versionInfo"]["version"]["raw"] == release_version
assert result["versionInfo"]["buildHash"] == release_version
assert result["versionInfo"]["description"] == release_version[:12]
| GroupEventReleaseSerializerTest |
python | pydantic__pydantic | tests/test_json_schema.py | {
"start": 197824,
"end": 203897
} | class ____(BaseModel):
x: int
""",
module_name_prefix='C:\\',
)
foo_model = module.Foo
_, v_schema = models_json_schema([(foo_model, 'validation')])
assert v_schema == {
'$defs': {
'Foo': {
'properties': {'x': {'title': 'X', 'type': 'integer'}},
'required': ['x'],
'title': 'Foo',
'type': 'object',
}
}
}
def test_description_not_included_for_basemodel() -> None:
class Model(BaseModel):
x: BaseModel
assert 'description' not in Model.model_json_schema()['$defs']['BaseModel']
def test_recursive_json_schema_build() -> None:
"""
Schema build for this case is a bit complicated due to the recursive nature of the models.
This was reported as broken in https://github.com/pydantic/pydantic/issues/8689, which was
originally caused by the change made in https://github.com/pydantic/pydantic/pull/8583, which has
since been reverted.
"""
class AllowedValues(str, Enum):
VAL1 = 'Val1'
VAL2 = 'Val2'
class ModelA(BaseModel):
modelA_1: AllowedValues = Field(max_length=60)
class ModelB(ModelA):
modelB_1: list[ModelA]
class ModelC(BaseModel):
modelC_1: ModelB
class Model(BaseModel):
b: ModelB
c: ModelC
assert Model.model_json_schema()
def test_json_schema_annotated_with_field() -> None:
"""Ensure field specified with Annotated in create_model call is still marked as required."""
from pydantic import create_model
Model = create_model(
'test_model',
bar=(Annotated[int, Field(description='Bar description')], ...),
)
assert Model.model_json_schema() == {
'properties': {
'bar': {'description': 'Bar description', 'title': 'Bar', 'type': 'integer'},
},
'required': ['bar'],
'title': 'test_model',
'type': 'object',
}
def test_required_fields_in_annotated_with_create_model() -> None:
"""Ensure multiple field specified with Annotated in create_model call is still marked as required."""
from pydantic import create_model
Model = create_model(
'test_model',
foo=(int, ...),
bar=(Annotated[int, Field(description='Bar description')], ...),
baz=(Annotated[int, Field(description='Baz description')], ...),
)
assert Model.model_json_schema() == {
'properties': {
'foo': {'title': 'Foo', 'type': 'integer'},
'bar': {'description': 'Bar description', 'title': 'Bar', 'type': 'integer'},
'baz': {'description': 'Baz description', 'title': 'Baz', 'type': 'integer'},
},
'required': ['foo', 'bar', 'baz'],
'title': 'test_model',
'type': 'object',
}
def test_required_fields_in_annotated_with_basemodel() -> None:
"""
Ensure multiple field specified with Annotated in BaseModel is marked as required.
"""
class Model(BaseModel):
a: int = ...
b: Annotated[int, 'placeholder'] = ...
c: Annotated[int, Field()] = ...
assert Model.model_fields['a'].is_required()
assert Model.model_fields['b'].is_required()
assert Model.model_fields['c'].is_required()
@pytest.mark.parametrize(
'field_type,default_value,expected_schema',
[
(
IPvAnyAddress,
IPv4Address('127.0.0.1'),
{
'properties': {
'field': {'default': '127.0.0.1', 'format': 'ipvanyaddress', 'title': 'Field', 'type': 'string'}
},
'title': 'Model',
'type': 'object',
},
),
(
IPvAnyAddress,
IPv6Address('::1'),
{
'properties': {
'field': {'default': '::1', 'format': 'ipvanyaddress', 'title': 'Field', 'type': 'string'}
},
'title': 'Model',
'type': 'object',
},
),
],
)
def test_default_value_encoding(field_type, default_value, expected_schema):
class Model(BaseModel):
field: field_type = default_value
schema = Model.model_json_schema()
assert schema == expected_schema
def _generate_deprecated_classes():
@deprecated('MyModel is deprecated')
class MyModel(BaseModel):
pass
@deprecated('MyPydanticDataclass is deprecated')
@pydantic.dataclasses.dataclass
class MyPydanticDataclass:
pass
@deprecated('MyBuiltinDataclass is deprecated')
@dataclasses.dataclass
class MyBuiltinDataclass:
pass
@deprecated('MyTypedDict is deprecated')
class MyTypedDict(TypedDict):
pass
return [
pytest.param(MyModel, id='BaseModel'),
pytest.param(MyPydanticDataclass, id='pydantic-dataclass'),
pytest.param(MyBuiltinDataclass, id='builtin-dataclass'),
pytest.param(MyTypedDict, id='TypedDict'),
]
@pytest.mark.parametrize('cls', _generate_deprecated_classes())
def test_deprecated_classes_json_schema(cls):
assert hasattr(cls, '__deprecated__')
assert TypeAdapter(cls).json_schema()['deprecated']
@pytest.mark.parametrize('cls', _generate_deprecated_classes())
def test_deprecated_subclasses_json_schema(cls):
class Model(BaseModel):
subclass: cls
assert Model.model_json_schema() == {
'$defs': {cls.__name__: {'deprecated': True, 'properties': {}, 'title': f'{cls.__name__}', 'type': 'object'}},
'properties': {'subclass': {'$ref': f'#/$defs/{cls.__name__}'}},
'required': ['subclass'],
'title': 'Model',
'type': 'object',
}
@pytest.mark.parametrize('cls', _generate_deprecated_classes())
def test_deprecated_class_usage_warns(cls):
if issubclass(cls, dict):
pytest.skip('TypedDict does not generate a DeprecationWarning on usage')
with pytest.warns(DeprecationWarning, match=f'{cls.__name__} is deprecated'):
cls()
@dataclasses.dataclass
| Foo |
python | apache__airflow | devel-common/src/tests_common/test_utils/mock_operators.py | {
"start": 1383,
"end": 1722
} | class ____(BaseOperator):
"""Operator for testing purposes."""
template_fields: Sequence[str] = ("arg1", "arg2")
def __init__(self, arg1: str = "", arg2: str = "", **kwargs):
super().__init__(**kwargs)
self.arg1 = arg1
self.arg2 = arg2
def execute(self, context: Context):
pass
| MockOperator |
python | Textualize__textual | tests/notifications/test_all_levels_notifications.py | {
"start": 387,
"end": 743
} | class ____(App[None]):
def on_mount(self) -> None:
self.notify("test", timeout=60)
self.push_screen(NotifyScreen())
async def test_all_levels_of_notification() -> None:
"""All levels within the DOM should be able to notify."""
async with NotifyApp().run_test() as pilot:
assert len(pilot.app._notifications) == 3
| NotifyApp |
python | pydantic__pydantic | pydantic/v1/networks.py | {
"start": 11978,
"end": 12331
} | class ____(AnyHttpUrl):
tld_required = True
# https://stackoverflow.com/questions/417142/what-is-the-maximum-length-of-a-url-in-different-browsers
max_length = 2083
hidden_parts = {'port'}
@staticmethod
def get_default_parts(parts: 'Parts') -> 'Parts':
return {'port': '80' if parts['scheme'] == 'http' else '443'}
| HttpUrl |
python | ethereum__web3.py | web3/contract/base_contract.py | {
"start": 13550,
"end": 17083
} | class ____(Generic[TContractEvent]):
"""
Class containing contract event objects
This is available via:
.. code-block:: python
>>> mycontract.events
<web3.contract.ContractEvents object at 0x108afde10>
To get list of all supported events in the contract ABI.
This allows you to iterate over :class:`ContractEvent` proxy classes.
.. code-block:: python
>>> for e in mycontract.events: print(e)
<class 'web3._utils.datatypes.LogAnonymous'>
...
"""
def __init__(
self,
abi: ABI,
w3: Union["Web3", "AsyncWeb3[Any]"],
contract_event_type: type[TContractEvent],
address: ChecksumAddress | None = None,
) -> None:
self.abi = abi
self.w3 = w3
self.address = address
self.contract_event_type = contract_event_type
_events: Sequence[ABIEvent] = None
if self.abi:
_events = sorted(
filter_abi_by_type("event", self.abi),
key=lambda evt: (evt["name"], len(evt.get("inputs", []))),
)
for event in _events:
abi_signature = abi_to_signature(event)
event_factory = contract_event_type.factory(
abi_signature,
w3=self.w3,
contract_abi=self.abi,
address=self.address,
abi=event,
)
# Set event name on instance if it does not already exist
if event["name"] not in self.__dict__:
setattr(self, event["name"], event_factory)
# Set underscore prefixed event signature on instance
# Handles ambiguity in overloaded contract events
setattr(self, f"_{abi_signature}", event_factory)
if _events:
self._events = _events
def __hasattr__(self, event_name: str) -> bool:
try:
return event_name in self.__dict__["_events"]
except ABIEventNotFound:
return False
def __getattr__(self, event_name: str) -> TContractEvent:
if super().__getattribute__("abi") is None:
raise NoABIFound(
"There is no ABI found for this contract.",
)
elif "_events" not in self.__dict__ or len(self._events) == 0:
raise NoABIEventsFound(
"The abi for this contract contains no event definitions. ",
"Are you sure you provided the correct contract abi?",
)
elif get_name_from_abi_element_identifier(event_name) not in [
get_name_from_abi_element_identifier(event["name"])
for event in self._events
]:
raise ABIEventNotFound(
f"The event '{event_name}' was not found in this contract's abi. ",
"Are you sure you provided the correct contract abi?",
)
if "(" not in event_name:
event_name = _get_any_abi_signature_with_name(event_name, self._events)
else:
event_name = f"_{event_name}"
return super().__getattribute__(event_name)
def __getitem__(self, event_name: str) -> TContractEvent:
return getattr(self, event_name)
def __iter__(self) -> Iterable[TContractEvent]:
if not hasattr(self, "_events") or not self._events:
return
for event in self._events:
yield self[abi_to_signature(event)]
| BaseContractEvents |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/math_ops/cwise_ops_binary_test.py | {
"start": 34850,
"end": 41497
} | class ____(test.TestCase):
def _compareScalar(self, func, x, y, dtype):
with test_util.use_gpu():
out = func(
ops.convert_to_tensor(np.array([x]).astype(dtype)),
ops.convert_to_tensor(np.array([y]).astype(dtype)))
ret = self.evaluate(out)
return ret[0]
def testScalarCompareScalar(self):
dtypes = [np.float16, np.float32, np.float64, np.int32, np.int64]
data = [-1, 0, 1]
for t in dtypes:
for x in data:
for y in data:
self.assertEqual(self._compareScalar(math_ops.less, x, y, t), x < y)
self.assertEqual(
self._compareScalar(math_ops.less_equal, x, y, t), x <= y)
self.assertEqual(
self._compareScalar(math_ops.greater, x, y, t), x > y)
self.assertEqual(
self._compareScalar(math_ops.greater_equal, x, y, t), x >= y)
self.assertEqual(self._compareScalar(math_ops.equal, x, y, t), x == y)
self.assertEqual(
self._compareScalar(math_ops.not_equal, x, y, t), x != y)
data = [-1, 0, 1, -1j, 1j, 1 + 1j, 1 - 1j]
for t in [np.complex64, np.complex128]:
for x in data:
for y in data:
self.assertEqual(self._compareScalar(math_ops.equal, x, y, t), x == y)
self.assertEqual(
self._compareScalar(math_ops.not_equal, x, y, t), x != y)
def _compare(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with test_util.use_gpu():
out = tf_func(ops.convert_to_tensor(x), ops.convert_to_tensor(y))
tf_ans = self.evaluate(out)
self.assertAllEqual(np_ans, tf_ans)
def testTensorCompareTensor(self):
x = np.linspace(-15, 15, 6).reshape(1, 3, 2) # pylint: disable=too-many-function-args
y = np.linspace(20, -10, 6).reshape(1, 3, 2) # pylint: disable=too-many-function-args
for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(xt, yt, np.less, math_ops.less)
self._compare(xt, yt, np.less_equal, math_ops.less_equal)
self._compare(xt, yt, np.greater, math_ops.greater)
self._compare(xt, yt, np.greater_equal, math_ops.greater_equal)
self._compare(xt, yt, np.equal, math_ops.equal)
self._compare(xt, yt, np.not_equal, math_ops.not_equal)
# Complex types do not support ordering but do support equality tests.
for t in [np.complex64, np.complex128]:
xt = x.astype(t)
xt -= 1j * xt
yt = y.astype(t)
yt -= 1j * yt
self._compare(xt, yt, np.equal, math_ops.equal)
self._compare(xt, yt, np.not_equal, math_ops.not_equal)
def _compareBCast(self, xs, ys, dtype, np_func, tf_func):
x = np.linspace(-15, 15, np.prod(xs)).astype(dtype).reshape(xs)
y = np.linspace(20, -10, np.prod(ys)).astype(dtype).reshape(ys)
if dtype in (np.complex64, np.complex128):
x -= 1j * x
y -= 1j * y
self._compare(x, y, np_func, tf_func)
self._compare(y, x, np_func, tf_func)
def _testBCastByFunc(self, np_func, tf_func, include_complex=False):
shapes = [
([1, 3, 2], [1]),
([1, 3, 2], [2]),
([1, 3, 2], [3, 2]),
([1, 3, 2], [3, 1]),
([1, 3, 2], [1, 3, 2]),
([1, 3, 2], [2, 3, 1]),
([1, 3, 2], [2, 1, 1]),
([1, 3, 2], [1, 3, 1]),
([2, 1, 5], [2, 3, 1]),
([2, 0, 5], [2, 0, 1]),
([2, 3, 0], [2, 3, 1]),
]
dtypes = [
np.float16,
np.float32,
np.float64,
np.int32,
np.int64,
]
if include_complex:
dtypes.extend([np.complex64, np.complex128])
for (xs, ys) in shapes:
for dtype in dtypes:
self._compareBCast(xs, ys, dtype, np_func, tf_func)
def testBCastLess(self):
self._testBCastByFunc(np.less, math_ops.less)
def testBCastLessEqual(self):
self._testBCastByFunc(np.less_equal, math_ops.less_equal)
def testBCastGreater(self):
self._testBCastByFunc(np.greater, math_ops.greater)
def testBCastGreaterEqual(self):
self._testBCastByFunc(np.greater_equal, math_ops.greater_equal)
def testBCastEqual(self):
self._testBCastByFunc(np.equal, math_ops.equal, include_complex=True)
def testBCastNotEqual(self):
self._testBCastByFunc(
np.not_equal, math_ops.not_equal, include_complex=True)
def testShapeMismatch(self):
dtypes = [np.float16, np.float32, np.float64, np.int32, np.int64]
funcs = [
math_ops.less, math_ops.less_equal, math_ops.greater,
math_ops.greater_equal, math_ops.equal, math_ops.not_equal
]
x = np.arange(0, 10).reshape([2, 5])
y = np.arange(0, 10).reshape([5, 2])
for t in dtypes:
for f in funcs:
with self.assertRaisesIncompatibleShapesError(
(ValueError, errors.InvalidArgumentError)):
f(x.astype(t), y.astype(t))
def testEqualDType(self):
dtypes = [
np.float16,
np.float32,
np.float64,
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
np.bool_,
]
x = np.asarray([0, 1, 2, 3, 4])
y = np.asarray([0, 1, 2, 3, 4])
for dtype in dtypes:
xt = x.astype(dtype)
yt = y.astype(dtype)
cmp_eq = math_ops.equal(xt, yt)
cmp_ne = math_ops.not_equal(xt, yt)
values = self.evaluate([cmp_eq, cmp_ne])
self.assertAllEqual(
[[True, True, True, True, True], [False, False, False, False, False]],
values)
for dtype in [np.complex64, np.complex128]:
xt = x.astype(dtype)
xt -= 1j * xt
yt = y.astype(dtype)
yt -= 1j * yt
cmp_eq = math_ops.equal(xt, yt)
cmp_ne = math_ops.not_equal(xt, yt)
values = self.evaluate([cmp_eq, cmp_ne])
self.assertAllEqual(
[[True, True, True, True, True], [False, False, False, False, False]],
values)
@test_util.disable_tfrt("b/169901260")
def testEqualQuantizeDType(self):
dtypes = [
dtypes_lib.qint8,
dtypes_lib.qint16,
dtypes_lib.quint8,
dtypes_lib.quint16,
dtypes_lib.qint32,
]
x = np.asarray([0, 1, 2, 3, 4])
y = np.asarray([0, 1, 2, 3, 4])
for dtype in dtypes:
xt = x.astype(dtype.as_numpy_dtype)
yt = y.astype(dtype.as_numpy_dtype)
cmp_eq = math_ops.equal(xt, yt)
cmp_ne = math_ops.not_equal(xt, yt)
values = self.evaluate([cmp_eq, cmp_ne])
self.assertAllEqual(
[[True, True, True, True, True], [False, False, False, False, False]],
values)
if __name__ == "__main__":
test.main()
| ComparisonOpTest |
python | dagster-io__dagster | helm/dagster/schema/schema/charts/dagster/subschema/run_launcher.py | {
"start": 2920,
"end": 3144
} | class ____(BaseModel):
celeryK8sRunLauncher: Optional[CeleryK8sRunLauncherConfig] = None
k8sRunLauncher: Optional[K8sRunLauncherConfig] = None
customRunLauncher: Optional[ConfigurableClass] = None
| RunLauncherConfig |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.