language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
pytest-dev__pytest
|
src/_pytest/capture.py
|
{
"start": 9807,
"end": 10198
}
|
class ____(CaptureBase[str]):
EMPTY_BUFFER = ""
def __init__(self, fd: int) -> None:
pass
def start(self) -> None:
pass
def done(self) -> None:
pass
def suspend(self) -> None:
pass
def resume(self) -> None:
pass
def snap(self) -> str:
return ""
def writeorg(self, data: str) -> None:
pass
|
NoCapture
|
python
|
ray-project__ray
|
python/ray/data/_internal/operator_event_exporter.py
|
{
"start": 3166,
"end": 5231
}
|
class ____(OperatorEventExporter):
"""Operator event exporter implementation that uses the Ray export event logger.
This exporter writes operator event to log files using Ray's export event system.
"""
def __init__(self, logger: logging.Logger):
"""Initialize with a configured export event logger.
Args:
logger: The export event logger to use for writing events.
"""
self._export_logger = logger
def export_operator_event(self, operator_event: OperatorEvent) -> None:
"""Export operator event using the export event logger.
Args:
operator_event: OperatorEvent object containing operator event details.
"""
operator_event_proto = operator_event_to_proto(operator_event)
self._export_logger.send_event(operator_event_proto)
@classmethod
def create_if_enabled(cls) -> Optional["LoggerOperatorEventExporter"]:
"""Create a logger-based exporter if the export API is enabled.
Returns:
A LoggerOperatorEventExporter instance, none otherwise.
"""
from ray.core.generated.export_event_pb2 import ExportEvent
is_operator_event_export_api_enabled = check_export_api_enabled(
ExportEvent.SourceType.EXPORT_DATASET_OPERATOR_EVENT
)
if not is_operator_event_export_api_enabled:
# The export API is not enabled, so we shouldn't create an exporter
return None
log_directory = os.path.join(
ray._private.worker._global_node.get_session_dir_path(), "logs"
)
try:
logger = get_export_event_logger(
EventLogType.DATASET_OPERATOR_EVENT,
log_directory,
)
return LoggerOperatorEventExporter(logger)
except Exception:
logger.exception(
"Unable to initialize the export event logger, so no operator export "
"events will be written."
)
return None
|
LoggerOperatorEventExporter
|
python
|
pandas-dev__pandas
|
pandas/tests/arithmetic/test_timedelta64.py
|
{
"start": 26940,
"end": 53513
}
|
class ____:
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
def test_sub_nat_retain_unit(self):
ser = pd.to_timedelta(Series(["00:00:01"])).astype("m8[s]")
result = ser - NaT
expected = Series([NaT], dtype="m8[s]")
tm.assert_series_equal(result, expected)
# TODO: moved from tests.indexes.timedeltas.test_arithmetic; needs
# parametrization+de-duplication
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(["00:00:01"]))
s2 = pd.to_timedelta(Series(["00:00:02"]))
sn = pd.to_timedelta(Series([NaT], dtype="m8[ns]"))
df1 = DataFrame(["00:00:01"]).apply(pd.to_timedelta)
df2 = DataFrame(["00:00:02"]).apply(pd.to_timedelta)
dfn = DataFrame([NaT._value]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta("00:00:01")
scalar2 = pd.to_timedelta("00:00:02")
timedelta_NaT = pd.to_timedelta("NaT")
actual = scalar1 + scalar1
assert actual == scalar2
actual = scalar2 - scalar1
assert actual == scalar1
actual = s1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - s1
tm.assert_series_equal(actual, s1)
actual = s1 + scalar1
tm.assert_series_equal(actual, s2)
actual = scalar1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - scalar1
tm.assert_series_equal(actual, s1)
actual = -scalar1 + s2
tm.assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
msg = "unsupported operand type"
with pytest.raises(TypeError, match=msg):
s1 + np.nan
with pytest.raises(TypeError, match=msg):
np.nan + s1
with pytest.raises(TypeError, match=msg):
s1 - np.nan
with pytest.raises(TypeError, match=msg):
-np.nan + s1
actual = s1 + NaT
tm.assert_series_equal(actual, sn)
actual = s2 - NaT
tm.assert_series_equal(actual, sn)
actual = s1 + df1
tm.assert_frame_equal(actual, df2)
actual = s2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + s1
tm.assert_frame_equal(actual, df2)
actual = df2 - s1
tm.assert_frame_equal(actual, df1)
actual = df1 + df1
tm.assert_frame_equal(actual, df2)
actual = df2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + scalar1
tm.assert_frame_equal(actual, df2)
actual = df2 - scalar1
tm.assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
tm.assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
tm.assert_frame_equal(actual, dfn)
msg = "cannot subtract a datelike from|unsupported operand type"
with pytest.raises(TypeError, match=msg):
df1 + np.nan
with pytest.raises(TypeError, match=msg):
df1 - np.nan
actual = df1 + NaT # NaT is datetime, not timedelta
tm.assert_frame_equal(actual, dfn)
actual = df1 - NaT
tm.assert_frame_equal(actual, dfn)
# TODO: moved from tests.series.test_operators, needs splitting, cleanup,
# de-duplication, box-parametrization...
def test_operators_timedelta64(self):
# series ops
v1 = pd.date_range("2012-1-1", periods=3, freq="D", unit="ns")
v2 = pd.date_range("2012-1-2", periods=3, freq="D", unit="ns")
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24, rs.index).astype("int64").astype("timedelta64[ns]")
tm.assert_series_equal(rs, xp)
assert rs.dtype == "timedelta64[ns]"
df = DataFrame({"A": v1})
td = Series([timedelta(days=i) for i in range(3)], dtype="m8[ns]")
assert td.dtype == "timedelta64[ns]"
# series on the rhs
result = df["A"] - df["A"].shift()
assert result.dtype == "timedelta64[ns]"
result = df["A"] + td
assert result.dtype == "M8[ns]"
# scalar Timestamp on rhs
maxa = df["A"].max()
assert isinstance(maxa, Timestamp)
resultb = df["A"] - df["A"].max()
assert resultb.dtype == "timedelta64[ns]"
# timestamp on lhs
result = resultb + df["A"]
values = [Timestamp("20111230"), Timestamp("20120101"), Timestamp("20120103")]
expected = Series(values, dtype="M8[ns]", name="A")
tm.assert_series_equal(result, expected)
# datetimes on rhs
result = df["A"] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name="A", dtype="m8[ns]"
)
tm.assert_series_equal(result, expected)
assert result.dtype == "m8[ns]"
d = datetime(2001, 1, 1, 3, 4)
resulta = df["A"] - d
assert resulta.dtype == "m8[ns]"
# roundtrip
resultb = resulta + d
tm.assert_series_equal(df["A"], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df["A"] + td
resultb = resulta - td
tm.assert_series_equal(resultb, df["A"])
assert resultb.dtype == "M8[ns]"
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df["A"] + td
resultb = resulta - td
tm.assert_series_equal(df["A"], resultb)
assert resultb.dtype == "M8[ns]"
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
assert rs[2] == value
def test_timedelta64_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta("1s")])
nat_series_dtype_timedelta = Series([NaT, NaT], dtype="timedelta64[ns]")
single_nat_dtype_timedelta = Series([NaT], dtype="timedelta64[ns]")
# subtraction
tm.assert_series_equal(timedelta_series - NaT, nat_series_dtype_timedelta)
tm.assert_series_equal(-NaT + timedelta_series, nat_series_dtype_timedelta)
tm.assert_series_equal(
timedelta_series - single_nat_dtype_timedelta, nat_series_dtype_timedelta
)
tm.assert_series_equal(
-single_nat_dtype_timedelta + timedelta_series, nat_series_dtype_timedelta
)
# addition
tm.assert_series_equal(
nat_series_dtype_timedelta + NaT, nat_series_dtype_timedelta
)
tm.assert_series_equal(
NaT + nat_series_dtype_timedelta, nat_series_dtype_timedelta
)
tm.assert_series_equal(
nat_series_dtype_timedelta + single_nat_dtype_timedelta,
nat_series_dtype_timedelta,
)
tm.assert_series_equal(
single_nat_dtype_timedelta + nat_series_dtype_timedelta,
nat_series_dtype_timedelta,
)
tm.assert_series_equal(timedelta_series + NaT, nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + timedelta_series, nat_series_dtype_timedelta)
tm.assert_series_equal(
timedelta_series + single_nat_dtype_timedelta, nat_series_dtype_timedelta
)
tm.assert_series_equal(
single_nat_dtype_timedelta + timedelta_series, nat_series_dtype_timedelta
)
tm.assert_series_equal(
nat_series_dtype_timedelta + NaT, nat_series_dtype_timedelta
)
tm.assert_series_equal(
NaT + nat_series_dtype_timedelta, nat_series_dtype_timedelta
)
tm.assert_series_equal(
nat_series_dtype_timedelta + single_nat_dtype_timedelta,
nat_series_dtype_timedelta,
)
tm.assert_series_equal(
single_nat_dtype_timedelta + nat_series_dtype_timedelta,
nat_series_dtype_timedelta,
)
# multiplication
tm.assert_series_equal(
nat_series_dtype_timedelta * 1.0, nat_series_dtype_timedelta
)
tm.assert_series_equal(
1.0 * nat_series_dtype_timedelta, nat_series_dtype_timedelta
)
tm.assert_series_equal(timedelta_series * 1, timedelta_series)
tm.assert_series_equal(1 * timedelta_series, timedelta_series)
tm.assert_series_equal(timedelta_series * 1.5, Series([NaT, Timedelta("1.5s")]))
tm.assert_series_equal(1.5 * timedelta_series, Series([NaT, Timedelta("1.5s")]))
tm.assert_series_equal(timedelta_series * np.nan, nat_series_dtype_timedelta)
tm.assert_series_equal(np.nan * timedelta_series, nat_series_dtype_timedelta)
# division
tm.assert_series_equal(timedelta_series / 2, Series([NaT, Timedelta("0.5s")]))
tm.assert_series_equal(timedelta_series / 2.0, Series([NaT, Timedelta("0.5s")]))
tm.assert_series_equal(timedelta_series / np.nan, nat_series_dtype_timedelta)
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
@pytest.mark.parametrize("cls", [Timestamp, datetime, np.datetime64])
def test_td64arr_add_sub_datetimelike_scalar(
self, cls, box_with_array, tz_naive_fixture
):
# GH#11925, GH#29558, GH#23215
tz = tz_naive_fixture
dt_scalar = Timestamp("2012-01-01", tz=tz)
if cls is datetime:
ts = dt_scalar.to_pydatetime()
elif cls is np.datetime64:
if tz_naive_fixture is not None:
pytest.skip(f"{cls} doesn't support {tz_naive_fixture}")
ts = dt_scalar.to_datetime64()
else:
ts = dt_scalar
tdi = timedelta_range("1 day", periods=3)
expected = pd.date_range("2012-01-02", periods=3, tz=tz, unit="ns")
if tz is not None and not timezones.is_utc(expected.tz):
# Day is no longer preserved by timedelta add/sub in pandas3 because
# it represents Calendar-Day instead of 24h
expected = expected._with_freq(None)
tdarr = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(ts + tdarr, expected)
tm.assert_equal(tdarr + ts, expected)
expected2 = pd.date_range("2011-12-31", periods=3, freq="-1D", tz=tz, unit="ns")
if tz is not None and not timezones.is_utc(expected2.tz):
# Day is no longer preserved by timedelta add/sub in pandas3 because
# it represents Calendar-Day instead of 24h
expected2 = expected2._with_freq(None)
expected2 = tm.box_expected(expected2, box_with_array)
tm.assert_equal(ts - tdarr, expected2)
tm.assert_equal(ts + (-tdarr), expected2)
msg = "cannot subtract a datelike"
with pytest.raises(TypeError, match=msg):
tdarr - ts
def test_td64arr_add_datetime64_nat(self, box_with_array):
# GH#23215
other = np.datetime64("NaT")
tdi = timedelta_range("1 day", periods=3)
expected = DatetimeIndex(["NaT", "NaT", "NaT"], dtype="M8[ns]")
tdser = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(tdser + other, expected)
tm.assert_equal(other + tdser, expected)
def test_td64arr_sub_dt64_array(self, box_with_array):
dti = pd.date_range("2016-01-01", periods=3)
tdi = TimedeltaIndex(["-1 Day"] * 3)
dtarr = dti.values
expected = DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
msg = "cannot subtract a datelike from"
with pytest.raises(TypeError, match=msg):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_td64arr_add_dt64_array(self, box_with_array):
dti = pd.date_range("2016-01-01", periods=3)
tdi = TimedeltaIndex(["-1 Day"] * 3)
dtarr = dti.values
expected = DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# Invalid __add__/__sub__ operations
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "h"])
@pytest.mark.parametrize("tdi_freq", [None, "h"])
def test_td64arr_sub_periodlike(
self, box_with_array, box_with_array2, tdi_freq, pi_freq
):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(["1 hours", "2 hours"], freq=tdi_freq)
dti = Timestamp("2018-03-07 17:16:40") + tdi
pi = dti.to_period(pi_freq)
per = pi[0]
tdi = tm.box_expected(tdi, box_with_array)
pi = tm.box_expected(pi, box_with_array2)
msg = "cannot subtract|unsupported operand type"
with pytest.raises(TypeError, match=msg):
tdi - pi
# GH#13078 subtraction of Period scalar not supported
with pytest.raises(TypeError, match=msg):
tdi - per
@pytest.mark.parametrize(
"other",
[
# GH#12624 for str case
"a",
# GH#19123
1,
1.5,
np.array(2),
],
)
def test_td64arr_addsub_numeric_scalar_invalid(self, box_with_array, other):
# vector-like others are tested in test_td64arr_add_sub_numeric_arr_invalid
tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
tdarr = tm.box_expected(tdser, box_with_array)
assert_invalid_addsub_type(tdarr, other)
@pytest.mark.parametrize(
"vec",
[
np.array([1, 2, 3]),
Index([1, 2, 3]),
Series([1, 2, 3]),
DataFrame([[1, 2, 3]]),
],
ids=lambda x: type(x).__name__,
)
def test_td64arr_addsub_numeric_arr_invalid(
self, box_with_array, vec, any_real_numpy_dtype
):
tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
tdarr = tm.box_expected(tdser, box_with_array)
vector = vec.astype(any_real_numpy_dtype)
assert_invalid_addsub_type(tdarr, vector)
def test_td64arr_add_sub_int(self, box_with_array, one):
# Variants of `one` for #19012, deprecated GH#22535
rng = timedelta_range("1 days 09:00:00", freq="h", periods=10)
tdarr = tm.box_expected(rng, box_with_array)
msg = "Addition/subtraction of integers"
assert_invalid_addsub_type(tdarr, one, msg)
# TODO: get inplace ops into assert_invalid_addsub_type
with pytest.raises(TypeError, match=msg):
tdarr += one
with pytest.raises(TypeError, match=msg):
tdarr -= one
def test_td64arr_add_sub_integer_array(self, box_with_array):
# GH#19959, deprecated GH#22535
# GH#22696 for DataFrame case, check that we don't dispatch to numpy
# implementation, which treats int64 as m8[ns]
box = box_with_array
xbox = np.ndarray if box is pd.array else box
rng = timedelta_range("1 days 09:00:00", freq="h", periods=3)
tdarr = tm.box_expected(rng, box)
other = tm.box_expected([4, 3, 2], xbox)
msg = "Addition/subtraction of integers and integer-arrays"
assert_invalid_addsub_type(tdarr, other, msg)
def test_td64arr_addsub_integer_array_no_freq(self, box_with_array):
# GH#19959
box = box_with_array
xbox = np.ndarray if box is pd.array else box
tdi = TimedeltaIndex(["1 Day", "NaT", "3 Hours"])
tdarr = tm.box_expected(tdi, box)
other = tm.box_expected([14, -1, 16], xbox)
msg = "Addition/subtraction of integers"
assert_invalid_addsub_type(tdarr, other, msg)
# ------------------------------------------------------------------
# Operations with timedelta-like others
def test_td64arr_add_sub_td64_array(self, box_with_array):
box = box_with_array
dti = pd.date_range("2016-01-01", periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
tm.assert_equal(result, expected)
expected_sub = 0 * tdi
result = tdi - tdarr
tm.assert_equal(result, expected_sub)
result = tdarr - tdi
tm.assert_equal(result, expected_sub)
def test_td64arr_add_sub_tdi(self, box_with_array, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
box = box_with_array
exname = get_expected_name(box, names)
tdi = TimedeltaIndex(["0 days", "1 day"], name=names[1])
tdi = np.array(tdi) if box in [tm.to_array, pd.array] else tdi
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[0])
expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)], name=exname)
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
assert_dtype(result, "timedelta64[ns]")
result = ser + tdi
tm.assert_equal(result, expected)
assert_dtype(result, "timedelta64[ns]")
expected = Series(
[Timedelta(hours=-3), Timedelta(days=1, hours=-4)], name=exname
)
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
assert_dtype(result, "timedelta64[ns]")
result = ser - tdi
tm.assert_equal(result, -expected)
assert_dtype(result, "timedelta64[ns]")
@pytest.mark.parametrize("tdnat", [np.timedelta64("NaT"), NaT])
def test_td64arr_add_sub_td64_nat(self, box_with_array, tdnat):
# GH#18808, GH#23320 special handling for timedelta64("NaT")
box = box_with_array
tdi = TimedeltaIndex([NaT, Timedelta("1s")])
expected = TimedeltaIndex(["NaT"] * 2)
obj = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = obj + tdnat
tm.assert_equal(result, expected)
result = tdnat + obj
tm.assert_equal(result, expected)
result = obj - tdnat
tm.assert_equal(result, expected)
result = tdnat - obj
tm.assert_equal(result, expected)
def test_td64arr_add_timedeltalike(self, two_hours, box_with_array):
# only test adding/sub offsets as + is now numeric
# GH#10699 for Tick cases
box = box_with_array
rng = timedelta_range("1 days", "10 days")
expected = timedelta_range("1 days 02:00:00", "10 days 02:00:00", freq="D")
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng + two_hours
tm.assert_equal(result, expected)
result = two_hours + rng
tm.assert_equal(result, expected)
def test_td64arr_sub_timedeltalike(self, two_hours, box_with_array):
# only test adding/sub offsets as - is now numeric
# GH#10699 for Tick cases
box = box_with_array
rng = timedelta_range("1 days", "10 days")
expected = timedelta_range("0 days 22:00:00", "9 days 22:00:00")
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng - two_hours
tm.assert_equal(result, expected)
result = two_hours - rng
tm.assert_equal(result, -expected)
# ------------------------------------------------------------------
# __add__/__sub__ with DateOffsets and arrays of DateOffsets
def test_td64arr_add_sub_offset_index(
self, performance_warning, names, box_with_array
):
# GH#18849, GH#19744
box = box_with_array
exname = get_expected_name(box, names)
tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"], name=names[0])
other = Index([offsets.Hour(n=1), offsets.Minute(n=-2)], name=names[1])
other = np.array(other) if box in [tm.to_array, pd.array] else other
expected = TimedeltaIndex(
[tdi[n] + other[n] for n in range(len(tdi))], freq="infer", name=exname
)
expected_sub = TimedeltaIndex(
[tdi[n] - other[n] for n in range(len(tdi))], freq="infer", name=exname
)
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box).astype(object)
expected_sub = tm.box_expected(expected_sub, box).astype(object)
with tm.assert_produces_warning(performance_warning):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(performance_warning):
res2 = other + tdi
tm.assert_equal(res2, expected)
with tm.assert_produces_warning(performance_warning):
res_sub = tdi - other
tm.assert_equal(res_sub, expected_sub)
def test_td64arr_add_sub_offset_array(self, performance_warning, box_with_array):
# GH#18849, GH#18824
box = box_with_array
tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"])
other = np.array([offsets.Hour(n=1), offsets.Minute(n=-2)])
expected = TimedeltaIndex(
[tdi[n] + other[n] for n in range(len(tdi))], freq="infer"
)
expected_sub = TimedeltaIndex(
[tdi[n] - other[n] for n in range(len(tdi))], freq="infer"
)
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box).astype(object)
with tm.assert_produces_warning(performance_warning):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(performance_warning):
res2 = other + tdi
tm.assert_equal(res2, expected)
expected_sub = tm.box_expected(expected_sub, box_with_array).astype(object)
with tm.assert_produces_warning(performance_warning):
res_sub = tdi - other
tm.assert_equal(res_sub, expected_sub)
def test_td64arr_with_offset_series(
self, performance_warning, names, box_with_array
):
# GH#18849
box = box_with_array
box2 = Series if box in [Index, tm.to_array, pd.array] else box
exname = get_expected_name(box, names)
tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"], name=names[0])
other = Series([offsets.Hour(n=1), offsets.Minute(n=-2)], name=names[1])
expected_add = Series(
[tdi[n] + other[n] for n in range(len(tdi))], name=exname, dtype=object
)
obj = tm.box_expected(tdi, box)
expected_add = tm.box_expected(expected_add, box2).astype(object)
with tm.assert_produces_warning(performance_warning):
res = obj + other
tm.assert_equal(res, expected_add)
with tm.assert_produces_warning(performance_warning):
res2 = other + obj
tm.assert_equal(res2, expected_add)
expected_sub = Series(
[tdi[n] - other[n] for n in range(len(tdi))], name=exname, dtype=object
)
expected_sub = tm.box_expected(expected_sub, box2).astype(object)
with tm.assert_produces_warning(performance_warning):
res3 = obj - other
tm.assert_equal(res3, expected_sub)
@pytest.mark.parametrize("obox", [np.array, Index, Series])
def test_td64arr_addsub_anchored_offset_arraylike(
self, performance_warning, obox, box_with_array
):
# GH#18824
tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"])
tdi = tm.box_expected(tdi, box_with_array)
anchored = obox([offsets.MonthEnd(), offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
msg = "has incorrect type|cannot add the type MonthEnd"
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(performance_warning):
tdi + anchored
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(performance_warning):
anchored + tdi
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(performance_warning):
tdi - anchored
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(performance_warning):
anchored - tdi
# ------------------------------------------------------------------
# Unsorted
def test_td64arr_add_sub_object_array(self, performance_warning, box_with_array):
box = box_with_array
xbox = np.ndarray if box is pd.array else box
tdi = timedelta_range("1 day", periods=3, freq="D")
tdarr = tm.box_expected(tdi, box)
other = np.array([Timedelta(days=1), offsets.Day(2), Timestamp("2000-01-04")])
with tm.assert_produces_warning(performance_warning):
result = tdarr + other
expected = Index(
[Timedelta(days=2), Timedelta(days=4), Timestamp("2000-01-07")]
)
expected = tm.box_expected(expected, xbox).astype(object)
tm.assert_equal(result, expected)
msg = "unsupported operand type|cannot subtract a datelike"
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(performance_warning):
tdarr - other
with tm.assert_produces_warning(performance_warning):
result = other - tdarr
expected = Index([Timedelta(0), Timedelta(0), Timestamp("2000-01-01")])
expected = tm.box_expected(expected, xbox).astype(object)
tm.assert_equal(result, expected)
|
TestTimedeltaArraylikeAddSubOps
|
python
|
openai__openai-python
|
src/openai/types/realtime/session_update_event_param.py
|
{
"start": 579,
"end": 1160
}
|
class ____(TypedDict, total=False):
session: Required[Session]
"""Update the Realtime session.
Choose either a realtime session or a transcription session.
"""
type: Required[Literal["session.update"]]
"""The event type, must be `session.update`."""
event_id: str
"""Optional client-generated ID used to identify this event.
This is an arbitrary string that a client may assign. It will be passed back if
there is an error with the event, but the corresponding `session.updated` event
will not include it.
"""
|
SessionUpdateEventParam
|
python
|
kamyu104__LeetCode-Solutions
|
Python/count-collisions-on-a-road.py
|
{
"start": 52,
"end": 465
}
|
class ____(object):
def countCollisions(self, directions):
"""
:type directions: str
:rtype: int
"""
result = cnt = 0
smooth = 1
for x in directions:
if x == 'R':
cnt += 1
elif x == 'S' or (cnt or not smooth):
result += cnt+int(x == 'L')
cnt = smooth = 0
return result
|
Solution
|
python
|
facebook__pyre-check
|
client/language_server/connections.py
|
{
"start": 9427,
"end": 14359
}
|
class ____(AsyncBytesWriter):
"""
An implementation of `AsyncBytesWriter` based on `asyncio.StreamWriter`.
"""
stream_writer: asyncio.StreamWriter
def __init__(self, stream_writer: asyncio.StreamWriter) -> None:
self.stream_writer = stream_writer
async def write(self, data: bytes) -> None:
self.stream_writer.write(data)
await self.stream_writer.drain()
async def close(self) -> None:
self.stream_writer.close()
await self._stream_writer_wait_closed()
async def _stream_writer_wait_closed(self) -> None:
"""
StreamWriter does not have a `wait_closed` method prior to python
3.7. For 3.6 compatibility we have to hack it with an async busy
loop that waits
- first for the transport to be aware that it is closing
- then for the socket to become unmapped
This approach is inspired by the solution in qemu.aqmp.util.
"""
if sys.version_info >= (3, 7):
return await self.stream_writer.wait_closed()
while not self.stream_writer.transport.is_closing():
await asyncio.sleep(0)
transport_socket: sys.IO = self.stream_writer.transport.get_extra_info("socket")
if transport_socket is not None:
while transport_socket.fileno() != -1:
await asyncio.sleep(0)
@contextlib.asynccontextmanager
async def _connect_async_bytes(
socket_path: Path, buffer_size: Optional[int] = None
) -> AsyncIterator[Tuple[AsyncBytesReader, AsyncBytesWriter]]:
"""
Connect to the socket at given path. Once connected, create an input and
an output stream from the socket. Both the input stream and the output
stream are in raw binary mode. The API is intended to be used like this:
```
async with connect(socket_path) as (input_stream, output_stream):
# Read from input_stream and write into output_stream here
...
```
The optional `buffer_size` argument determines the size of the input buffer
used by the returned reader instance. If not specified, a default value of
64kb will be used.
Socket creation, connection, and closure will be automatically handled
inside this context manager. If any of the socket operations fail, raise
`ConnectionFailure`.
"""
writer: Optional[AsyncBytesWriter] = None
try:
limit = buffer_size if buffer_size is not None else 2**16
stream_reader, stream_writer = await asyncio.open_unix_connection(
str(socket_path), limit=limit
)
reader = StreamBytesReader(stream_reader)
writer = StreamBytesWriter(stream_writer)
yield reader, writer
except OSError as error:
raise ConnectionFailure() from error
finally:
if writer is not None:
await writer.close()
@contextlib.asynccontextmanager
async def connect_async(
socket_path: Path, buffer_size: Optional[int] = None
) -> AsyncIterator[Tuple[AsyncTextReader, AsyncTextWriter]]:
"""
This is a line-oriented higher-level API than `connect`. It can be used
when the caller does not want to deal with the complexity of binary I/O.
The behavior is the same as `connect`, except the streams that are created
operates in text mode. Read/write APIs of the streams uses UTF-8 encoded
`str` instead of `bytes`.
"""
async with _connect_async_bytes(socket_path, buffer_size) as (
bytes_reader,
bytes_writer,
):
yield (
AsyncTextReader(bytes_reader, encoding="utf-8"),
AsyncTextWriter(bytes_writer, encoding="utf-8"),
)
async def create_async_stdin_stdout() -> Tuple[AsyncTextReader, AsyncTextWriter]:
"""
By default, `sys.stdin` and `sys.stdout` are synchronous channels: reading
from `sys.stdin` or writing to `sys.stdout` will block until the read/write
succeed, which is very different from the async socket channels created via
`connect_async`.
This function creates wrappers around `sys.stdin` and `sys.stdout` and makes
them behave in the same way as other async socket channels. This makes it
easier to write low-level-I/O-agonstic code, where the high-level logic does
not need to worry about whether the underlying async I/O channel comes from
sockets or from stdin/stdout.
"""
loop = asyncio.get_event_loop()
stream_reader = asyncio.StreamReader(loop=loop)
await loop.connect_read_pipe(
lambda: asyncio.StreamReaderProtocol(stream_reader), sys.stdin
)
w_transport, w_protocol = await loop.connect_write_pipe(
asyncio.streams.FlowControlMixin, sys.stdout
)
stream_writer = asyncio.StreamWriter(w_transport, w_protocol, stream_reader, loop)
return (
AsyncTextReader(StreamBytesReader(stream_reader)),
AsyncTextWriter(StreamBytesWriter(stream_writer)),
)
|
StreamBytesWriter
|
python
|
numba__numba
|
numba/cuda/errors.py
|
{
"start": 416,
"end": 1724
}
|
class ____(LoweringError):
pass
_launch_help_url = ("https://numba.readthedocs.io/en/stable/cuda/"
"kernels.html#kernel-invocation")
missing_launch_config_msg = """
Kernel launch configuration was not specified. Use the syntax:
kernel_function[blockspergrid, threadsperblock](arg0, arg1, ..., argn)
See {} for help.
""".format(_launch_help_url)
def normalize_kernel_dimensions(griddim, blockdim):
"""
Normalize and validate the user-supplied kernel dimensions.
"""
def check_dim(dim, name):
if not isinstance(dim, (tuple, list)):
dim = [dim]
else:
dim = list(dim)
if len(dim) > 3:
raise ValueError('%s must be a sequence of 1, 2 or 3 integers, '
'got %r' % (name, dim))
for v in dim:
if not isinstance(v, numbers.Integral):
raise TypeError('%s must be a sequence of integers, got %r'
% (name, dim))
while len(dim) < 3:
dim.append(1)
return tuple(dim)
if None in (griddim, blockdim):
raise ValueError(missing_launch_config_msg)
griddim = check_dim(griddim, 'griddim')
blockdim = check_dim(blockdim, 'blockdim')
return griddim, blockdim
|
CudaLoweringError
|
python
|
pandas-dev__pandas
|
asv_bench/benchmarks/rolling.py
|
{
"start": 5794,
"end": 6754
}
|
class ____:
params = (
[({"window": 10}, "rolling"), ({"window": 1000}, "rolling"), ({}, "expanding")],
["corr", "cov"],
[True, False],
)
param_names = ["window_kwargs", "method", "pairwise"]
def setup(self, kwargs_window, method, pairwise):
N = 10**4
n_groups = 20
kwargs, window = kwargs_window
groups = [i for _ in range(N // n_groups) for i in range(n_groups)]
arr = np.random.random(N)
self.df = pd.DataFrame(arr)
self.window = getattr(self.df, window)(**kwargs)
self.window_group = getattr(
pd.DataFrame({"A": groups, "B": arr}).groupby("A"), window
)(**kwargs)
def time_pairwise(self, kwargs_window, method, pairwise):
getattr(self.window, method)(self.df, pairwise=pairwise)
def time_groupby(self, kwargs_window, method, pairwise):
getattr(self.window_group, method)(self.df, pairwise=pairwise)
|
Pairwise
|
python
|
dask__dask
|
dask/_expr.py
|
{
"start": 39384,
"end": 39839
}
|
class ____(Expr):
_parameters = ["expr"]
def _simplify_down(self):
return self.expr.finalize_compute()
def _convert_dask_keys(keys):
from dask._task_spec import List, TaskRef
assert isinstance(keys, list)
new_keys = []
for key in keys:
if isinstance(key, list):
new_keys.append(_convert_dask_keys(key))
else:
new_keys.append(TaskRef(key))
return List(*new_keys)
|
FinalizeCompute
|
python
|
kamyu104__LeetCode-Solutions
|
Python/rotate-array.py
|
{
"start": 1874,
"end": 2314
}
|
class ____(object):
"""
:type nums: List[int]
:type k: int
:rtype: void Do not return anything, modify nums in-place instead.
"""
def rotate(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: void Do not return anything, modify nums in-place instead.
"""
nums[:] = nums[len(nums) - k:] + nums[:len(nums) - k]
# Time: O(k * n)
# Space: O(1)
|
Solution4
|
python
|
tensorflow__tensorflow
|
tensorflow/python/data/kernel_tests/list_files_test.py
|
{
"start": 10954,
"end": 12814
}
|
class ____(
ListFilesTest,
checkpoint_test_base.CheckpointTestBase,
parameterized.TestCase):
def setUp(self):
super().setUp()
# Bypasses the default value for `warm_start`, which is not supported for
# global shuffling:
# https://github.com/tensorflow/tensorflow/blob/29561af231863afb3b6b89e3aa8a6a550c2b7bb0/tensorflow/python/data/ops/options.py#L633
test_mode.toggle_test_mode(False)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations(),
combinations.combine(
repetitions=[1, 2],
reshuffle_each_iteration=[True, False],
symbolic_checkpoint=[True, False])))
def test(
self,
verify_fn: Callable[..., None],
repetitions: int,
reshuffle_each_iteration: bool,
symbolic_checkpoint: bool):
filenames = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k']
self._touchTempFiles(filenames)
def _build_dataset() -> dataset_ops.Dataset:
dataset = dataset_ops.Dataset.list_files(path.join(self.tmp_dir, '*'),
shuffle=False)
dataset = dataset.prefetch(buffer_size=dataset_ops.AUTOTUNE)
if repetitions > 1:
dataset = dataset.repeat(repetitions)
dataset = global_shuffle_op._global_shuffle(
dataset, seed=42, reshuffle_each_iteration=reshuffle_each_iteration)
options = options_lib.Options()
options.experimental_symbolic_checkpoint = symbolic_checkpoint
return dataset.with_options(options)
verify_fn(
self,
_build_dataset,
num_outputs=len(filenames) * repetitions,
assert_items_equal=reshuffle_each_iteration)
if __name__ == '__main__':
test.main()
|
ListFilesGlobalShuffleCheckpointTest
|
python
|
ray-project__ray
|
python/ray/tests/test_resource_demand_scheduler.py
|
{
"start": 59423,
"end": 134727
}
|
class ____(unittest.TestCase):
def setUp(self):
_NODE_PROVIDERS["mock"] = lambda config: self.create_provider
self.provider = None
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
self.provider = None
del _NODE_PROVIDERS["mock"]
_clear_provider_cache()
shutil.rmtree(self.tmpdir)
ray.shutdown()
def waitForNodes(self, expected, comparison=None, tag_filters=None):
if tag_filters is None:
tag_filters = {}
MAX_ITER = 50
for i in range(MAX_ITER):
n = len(self.provider.non_terminated_nodes(tag_filters))
if comparison is None:
comparison = self.assertEqual
try:
comparison(n, expected)
return
except Exception:
if i == MAX_ITER - 1:
raise
time.sleep(0.1)
def create_provider(self, config, cluster_name):
assert self.provider
return self.provider
def write_config(self, config):
path = self.tmpdir + "/simple.yaml"
with open(path, "w") as f:
f.write(yaml.dump(config))
return path
def testGetOrCreateMultiNodeType(self):
config_path = self.write_config(MULTI_WORKER_CLUSTER)
self.provider = MockProvider()
runner = MockProcessRunner()
runner.respond_to_call("json .Config.Env", ["[]"])
get_or_create_head_node(
MULTI_WORKER_CLUSTER,
printable_config_file=config_path,
no_restart=False,
restart_only=False,
yes=True,
override_cluster_name=None,
_provider=self.provider,
_runner=runner,
)
self.waitForNodes(1)
runner.assert_has_call("1.2.3.4", "init_cmd")
runner.assert_has_call("1.2.3.4", "setup_cmd")
runner.assert_has_call("1.2.3.4", "start_ray_head")
self.assertEqual(self.provider.mock_nodes["0"].node_type, "empty_node")
self.assertEqual(
self.provider.mock_nodes["0"].node_config.get("FooProperty"), 42
)
self.assertEqual(self.provider.mock_nodes["0"].node_config.get("TestProp"), 1)
self.assertEqual(
self.provider.mock_nodes["0"].tags.get(TAG_RAY_USER_NODE_TYPE), "empty_node"
)
def testGetOrCreateMultiNodeTypeCustomHeadResources(self):
config = copy.deepcopy(MULTI_WORKER_CLUSTER)
config["available_node_types"]["empty_node"]["resources"] = {
"empty_resource_name": 1000
}
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner()
runner.respond_to_call("json .Config.Env", ["[]"])
get_or_create_head_node(
config,
printable_config_file=config_path,
no_restart=False,
restart_only=False,
yes=True,
override_cluster_name=None,
_provider=self.provider,
_runner=runner,
)
self.waitForNodes(1)
runner.assert_has_call("1.2.3.4", "init_cmd")
runner.assert_has_call("1.2.3.4", "setup_cmd")
runner.assert_has_call("1.2.3.4", "start_ray_head")
runner.assert_has_call("1.2.3.4", "empty_resource_name")
self.assertEqual(self.provider.mock_nodes["0"].node_type, "empty_node")
self.assertEqual(
self.provider.mock_nodes["0"].node_config.get("FooProperty"), 42
)
self.assertEqual(self.provider.mock_nodes["0"].node_config.get("TestProp"), 1)
self.assertEqual(
self.provider.mock_nodes["0"].tags.get(TAG_RAY_USER_NODE_TYPE), "empty_node"
)
def testSummary(self):
config = copy.deepcopy(MULTI_WORKER_CLUSTER)
config["available_node_types"]["m4.large"]["min_workers"] = 2
config["max_workers"] = 10
config["docker"] = {}
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner()
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_USER_NODE_TYPE: "empty_node",
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
},
1,
)
head_ip = self.provider.non_terminated_node_ips({})[0]
lm = LoadMetrics()
autoscaler = MockAutoscaler(
config_path,
lm,
MockGcsClient(),
max_failures=0,
max_launch_batch=1,
max_concurrent_launches=10,
process_runner=runner,
update_interval_s=0,
)
assert len(self.provider.non_terminated_nodes({})) == 1
autoscaler.update()
self.waitForNodes(3)
for ip in self.provider.non_terminated_node_ips({}):
lm.update(ip, mock_node_id(), {"CPU": 2}, {"CPU": 0}, 0)
lm.update(head_ip, mock_node_id(), {"CPU": 16}, {"CPU": 1}, 0)
autoscaler.update()
while True:
if (
len(
self.provider.non_terminated_nodes(
{TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE}
)
)
== 3
):
break
# After this section, the p2.xlarge is now in the setup process.
runner.ready_to_run.clear()
lm.update(
head_ip,
mock_node_id(),
{"CPU": 16},
{"CPU": 1},
0,
waiting_bundles=[{"GPU": 1}],
)
autoscaler.update()
self.waitForNodes(4)
self.provider.ready_to_create.clear()
lm.set_resource_requests([{"CPU": 64}] * 2)
autoscaler.update()
# TODO (Alex): We should find a more robust way of simulating a node
# failure here.
obj = ("172.0.0.4", "m4.4xlarge")
autoscaler.node_tracker._add_node_mapping(4, obj)
print(f"Head ip: {head_ip}")
summary = autoscaler.summary()
assert summary.active_nodes["m4.large"] == 2
assert summary.active_nodes["empty_node"] == 1
assert len(summary.active_nodes) == 2, summary.active_nodes
assert summary.pending_nodes == [
("172.0.0.3", "p2.xlarge", STATUS_WAITING_FOR_SSH)
]
assert summary.pending_launches == {"m4.16xlarge": 2}
assert summary.failed_nodes == [("172.0.0.4", "m4.4xlarge")]
assert summary.pending_resources == {
"GPU": 1,
"CPU": 144,
}, summary.pending_resources
# Check dict conversion
summary_dict = asdict(summary)
assert summary_dict["active_nodes"]["m4.large"] == 2
assert summary_dict["active_nodes"]["empty_node"] == 1
assert len(summary_dict["active_nodes"]) == 2, summary_dict["active_nodes"]
assert summary_dict["pending_nodes"] == [
("172.0.0.3", "p2.xlarge", STATUS_WAITING_FOR_SSH)
]
assert summary_dict["pending_launches"] == {"m4.16xlarge": 2}
assert summary_dict["failed_nodes"] == [("172.0.0.4", "m4.4xlarge")]
assert summary.node_type_mapping == {
"172.0.0.0": "empty_node",
"172.0.0.1": "m4.large",
"172.0.0.2": "m4.large",
"172.0.0.3": "p2.xlarge",
}
# Ensure summary is json-serializable
json.dumps(summary_dict)
# Make sure we return something (and don't throw exceptions). Let's not
# get bogged down with a full cli test here.
assert len(autoscaler.info_string()) > 1
def testScaleUpMinSanity(self):
config = copy.deepcopy(MULTI_WORKER_CLUSTER)
config["available_node_types"]["m4.large"]["min_workers"] = 2
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner()
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "empty_node",
},
1,
)
autoscaler = MockAutoscaler(
config_path,
LoadMetrics(),
MockGcsClient(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
)
assert len(self.provider.non_terminated_nodes({})) == 1
autoscaler.update()
self.waitForNodes(3)
autoscaler.update()
self.waitForNodes(3)
def testScaleUpMinSanityWithHeadNode(self):
"""Make sure when min_workers is used with head node it does not count
head_node in min_workers."""
config = copy.deepcopy(MULTI_WORKER_CLUSTER)
config["available_node_types"]["empty_node"]["min_workers"] = 2
config["available_node_types"]["empty_node"]["max_workers"] = 2
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner()
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "empty_node",
},
1,
)
autoscaler = MockAutoscaler(
config_path,
LoadMetrics(),
MockGcsClient(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
)
assert len(self.provider.non_terminated_nodes({})) == 1
autoscaler.update()
self.waitForNodes(3)
autoscaler.update()
self.waitForNodes(3)
def testPlacementGroup(self):
# Note this is mostly an integration test. See
# testPlacementGroupScaling for more comprehensive tests.
config = copy.deepcopy(MULTI_WORKER_CLUSTER)
config["min_workers"] = 0
config["max_workers"] = 999
config["head_node_type"] = "m4.4xlarge"
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner()
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: "head",
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "m4.4xlarge",
},
1,
)
head_ip = self.provider.non_terminated_node_ips({})[0]
lm = LoadMetrics()
autoscaler = MockAutoscaler(
config_path,
lm,
MockGcsClient(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
)
head_ip = self.provider.non_terminated_node_ips({})[0]
assert len(self.provider.non_terminated_nodes({})) == 1
autoscaler.update()
self.waitForNodes(1)
pending_placement_groups = [
PlacementGroupTableData(
state=PlacementGroupTableData.RESCHEDULING,
strategy=PlacementStrategy.STRICT_SPREAD,
bundles=[Bundle(unit_resources={"GPU": 2})] * 3,
),
PlacementGroupTableData(
state=PlacementGroupTableData.RESCHEDULING,
strategy=PlacementStrategy.PACK,
bundles=([Bundle(unit_resources={"GPU": 2})] * 5),
),
]
# Since placement groups are implemented with custom resources, this is
# an example of the accompanying resource demands. Note the resource
# demand autoscaler will be unable to fulfill these demands, but we
# should still handle the other infeasible/waiting bundles.
placement_group_resource_demands = [
{
"GPU_group_0_6c2506ac733bc37496295b02c4fad446": 0.0101,
"GPU_group_6c2506ac733bc37496295b02c4fad446": 0.0101,
}
]
lm.update(
head_ip,
mock_node_id(),
{"CPU": 16},
{"CPU": 16},
DUMMY_IDLE_DURATION_S,
infeasible_bundles=placement_group_resource_demands,
waiting_bundles=[{"GPU": 8}],
pending_placement_groups=pending_placement_groups,
)
autoscaler.update()
self.waitForNodes(5)
for i in range(1, 5):
assert self.provider.mock_nodes[str(i)].node_type == "p2.8xlarge"
pending_placement_groups = [
PlacementGroupTableData(
state=PlacementGroupTableData.RESCHEDULING,
strategy=PlacementStrategy.STRICT_PACK,
bundles=([Bundle(unit_resources={"GPU": 2})] * 4),
),
PlacementGroupTableData(
state=PlacementGroupTableData.RESCHEDULING,
strategy=PlacementStrategy.SPREAD,
bundles=([Bundle(unit_resources={"GPU": 2})] * 2),
),
]
def testScaleUpMinWorkers(self):
config = copy.deepcopy(MULTI_WORKER_CLUSTER)
config["max_workers"] = 50
config["idle_timeout_minutes"] = 1
config["available_node_types"]["m4.large"]["min_workers"] = 1
config["available_node_types"]["p2.8xlarge"]["min_workers"] = 1
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner()
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "empty_node",
},
1,
)
lm = LoadMetrics()
autoscaler = MockAutoscaler(
config_path,
lm,
MockGcsClient(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
)
assert len(self.provider.non_terminated_nodes({})) == 1
autoscaler.update()
self.waitForNodes(3)
assert len(self.provider.mock_nodes) == 3
assert {
self.provider.mock_nodes["1"].node_type,
self.provider.mock_nodes["2"].node_type,
} == {"p2.8xlarge", "m4.large"}
self.provider.create_node(
{},
{
TAG_RAY_USER_NODE_TYPE: "p2.8xlarge",
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_NODE_KIND: NODE_KIND_WORKER,
},
2,
)
self.provider.create_node(
{},
{
TAG_RAY_USER_NODE_TYPE: "m4.16xlarge",
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_NODE_KIND: NODE_KIND_WORKER,
},
2,
)
assert len(self.provider.non_terminated_nodes({})) == 7
# Make sure that after idle_timeout_minutes we don't kill idle
# min workers.
for node_id in self.provider.non_terminated_nodes({}):
lm.ray_nodes_last_used_time_by_ip[self.provider.internal_ip(node_id)] = -60
fill_in_node_ids(self.provider, lm)
autoscaler.update()
self.waitForNodes(3)
cnt = 0
# [1:] skips the head node.
for id in list(self.provider.mock_nodes.keys())[1:]:
if (
self.provider.mock_nodes[id].state == "running"
or self.provider.mock_nodes[id].state == "pending"
):
assert self.provider.mock_nodes[id].node_type in {
"p2.8xlarge",
"m4.large",
}
cnt += 1
assert cnt == 2
def testScaleUpIgnoreUsed(self):
config = copy.deepcopy(MULTI_WORKER_CLUSTER)
# Commenting out this line causes the test case to fail?!?!
config["min_workers"] = 0
config["target_utilization_fraction"] = 1.0
config["head_node_type"] = "p2.xlarge"
config_path = self.write_config(config)
self.provider = MockProvider()
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: "head",
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "p2.xlarge",
},
1,
)
head_ip = self.provider.non_terminated_node_ips({})[0]
self.provider.finish_starting_nodes()
runner = MockProcessRunner()
lm = LoadMetrics()
autoscaler = MockAutoscaler(
config_path,
lm,
MockGcsClient(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
)
autoscaler.update()
self.waitForNodes(1)
lm.update(head_ip, mock_node_id(), {"CPU": 4, "GPU": 1}, {}, 0)
self.waitForNodes(1)
lm.update(
head_ip,
mock_node_id(),
{"CPU": 4, "GPU": 1},
{"GPU": 0},
0,
waiting_bundles=[{"GPU": 1}],
)
autoscaler.update()
self.waitForNodes(2)
assert self.provider.mock_nodes["1"].node_type == "p2.xlarge"
def testRequestBundlesAccountsForHeadNode(self):
config = copy.deepcopy(MULTI_WORKER_CLUSTER)
config["head_node_type"] = "p2.8xlarge"
config["min_workers"] = 0
config["max_workers"] = 50
config_path = self.write_config(config)
self.provider = MockProvider()
self.provider.create_node(
{},
{
TAG_RAY_USER_NODE_TYPE: "p2.8xlarge",
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_NODE_KIND: "head",
},
1,
)
runner = MockProcessRunner()
autoscaler = MockAutoscaler(
config_path,
LoadMetrics(),
MockGcsClient(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
)
assert len(self.provider.non_terminated_nodes({})) == 1
# These requests fit on the head node.
autoscaler.update()
self.waitForNodes(1)
autoscaler.load_metrics.set_resource_requests([{"CPU": 1}])
autoscaler.update()
self.waitForNodes(1)
assert len(self.provider.mock_nodes) == 1
autoscaler.load_metrics.set_resource_requests([{"GPU": 8}])
autoscaler.update()
self.waitForNodes(1)
# This request requires an additional worker node.
autoscaler.load_metrics.set_resource_requests([{"GPU": 8}] * 2)
autoscaler.update()
self.waitForNodes(2)
assert self.provider.mock_nodes["1"].node_type == "p2.8xlarge"
def testRequestBundles(self):
config = copy.deepcopy(MULTI_WORKER_CLUSTER)
config["min_workers"] = 0
config["max_workers"] = 50
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner()
runner.respond_to_call("json .Config.Env", ["[]" for i in range(6)])
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "empty_node",
},
1,
)
autoscaler = MockAutoscaler(
config_path,
LoadMetrics(),
MockGcsClient(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
)
assert len(self.provider.non_terminated_nodes({})) == 1
autoscaler.update()
self.waitForNodes(1)
autoscaler.load_metrics.set_resource_requests([{"CPU": 1}])
autoscaler.update()
self.waitForNodes(2)
assert self.provider.mock_nodes["1"].node_type == "m4.large"
autoscaler.load_metrics.set_resource_requests([{"GPU": 8}])
autoscaler.update()
self.waitForNodes(3)
assert self.provider.mock_nodes["2"].node_type == "p2.8xlarge"
autoscaler.load_metrics.set_resource_requests([{"CPU": 32}] * 4)
autoscaler.update()
self.waitForNodes(5)
assert self.provider.mock_nodes["3"].node_type == "m4.16xlarge"
assert self.provider.mock_nodes["4"].node_type == "m4.16xlarge"
def testResourcePassing(self):
config = copy.deepcopy(MULTI_WORKER_CLUSTER)
config["min_workers"] = 0
config["max_workers"] = 50
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner()
runner.respond_to_call("json .Config.Env", ["[]" for i in range(2)])
self.provider.create_node(
{},
{
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_USER_NODE_TYPE: "empty_node",
},
1,
)
autoscaler = MockAutoscaler(
config_path,
LoadMetrics(),
MockGcsClient(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
)
assert len(self.provider.non_terminated_nodes({})) == 1
autoscaler.update()
self.waitForNodes(0, tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER})
autoscaler.load_metrics.set_resource_requests([{"CPU": 1}])
autoscaler.update()
self.waitForNodes(1, tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER})
assert self.provider.mock_nodes["1"].node_type == "m4.large"
autoscaler.load_metrics.set_resource_requests([{"GPU": 8}])
autoscaler.update()
self.waitForNodes(2, tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER})
assert self.provider.mock_nodes["2"].node_type == "p2.8xlarge"
# TODO (Alex): Autoscaler creates the node during one update then
# starts the updater in the next update. The sleep is largely
# unavoidable because the updater runs in its own thread and we have no
# good way of ensuring that the commands are sent in time.
autoscaler.update()
sleep(0.1)
# These checks are done separately because we have no guarantees on the
# order the dict is serialized in.
runner.assert_has_call("172.0.0.1", "RAY_OVERRIDE_RESOURCES=")
runner.assert_has_call("172.0.0.1", '"CPU":2')
runner.assert_has_call("172.0.0.2", "RAY_OVERRIDE_RESOURCES=")
runner.assert_has_call("172.0.0.2", '"CPU":32')
runner.assert_has_call("172.0.0.2", '"GPU":8')
def testScaleUpLoadMetrics(self):
config = copy.deepcopy(MULTI_WORKER_CLUSTER)
config["min_workers"] = 0
config["max_workers"] = 50
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner()
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "empty_node",
},
1,
)
lm = LoadMetrics()
autoscaler = MockAutoscaler(
config_path,
lm,
MockGcsClient(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
)
assert len(self.provider.non_terminated_nodes({})) == 1
autoscaler.update()
self.waitForNodes(0, tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER})
autoscaler.update()
lm.update(
"1.2.3.4",
mock_node_id(),
{},
{},
DUMMY_IDLE_DURATION_S,
waiting_bundles=[{"GPU": 1}],
infeasible_bundles=[{"CPU": 16}],
)
autoscaler.update()
self.waitForNodes(1, tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER})
nodes = {
self.provider.mock_nodes["1"].node_type,
}
assert nodes == {"p2.xlarge"}
def testCommandPassing(self):
t = "custom"
config = copy.deepcopy(MULTI_WORKER_CLUSTER)
config["available_node_types"]["p2.8xlarge"]["worker_setup_commands"] = [
"new_worker_setup_command"
]
config["available_node_types"]["p2.xlarge"]["initialization_commands"] = [
"new_worker_initialization_cmd"
]
config["available_node_types"]["p2.xlarge"]["resources"][t] = 1
# Commenting out this line causes the test case to fail?!?!
config["min_workers"] = 0
config["max_workers"] = 10
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner()
runner.respond_to_call("json .Config.Env", ["[]" for i in range(4)])
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "empty_node",
},
1,
)
lm = LoadMetrics()
lm.update("172.0.0.0", mock_node_id(), {"CPU": 0}, {"CPU": 0}, 0)
autoscaler = MockAutoscaler(
config_path,
lm,
MockGcsClient(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
)
assert len(self.provider.non_terminated_nodes({})) == 1
autoscaler.update()
self.waitForNodes(1)
autoscaler.load_metrics.set_resource_requests([{"CPU": 1}])
autoscaler.update()
self.waitForNodes(2)
assert self.provider.mock_nodes["1"].node_type == "m4.large"
autoscaler.load_metrics.set_resource_requests([{"GPU": 8}])
autoscaler.update()
self.waitForNodes(3)
assert self.provider.mock_nodes["2"].node_type == "p2.8xlarge"
autoscaler.load_metrics.set_resource_requests([{"GPU": 1}] * 9)
autoscaler.update()
self.waitForNodes(4)
assert self.provider.mock_nodes["3"].node_type == "p2.xlarge"
autoscaler.update()
sleep(0.1)
runner.assert_has_call(
self.provider.mock_nodes["2"].internal_ip, "new_worker_setup_command"
)
runner.assert_not_has_call(
self.provider.mock_nodes["2"].internal_ip, "setup_cmd"
)
runner.assert_not_has_call(
self.provider.mock_nodes["2"].internal_ip, "worker_setup_cmd"
)
runner.assert_has_call(
self.provider.mock_nodes["3"].internal_ip, "new_worker_initialization_cmd"
)
runner.assert_not_has_call(
self.provider.mock_nodes["3"].internal_ip, "init_cmd"
)
def testDockerWorkers(self):
config = copy.deepcopy(MULTI_WORKER_CLUSTER)
config["available_node_types"]["p2.8xlarge"]["docker"] = {
"worker_image": "p2.8x_image:latest",
"worker_run_options": ["p2.8x-run-options"],
}
config["available_node_types"]["p2.xlarge"]["docker"] = {
"worker_image": "p2x_image:nightly"
}
config["docker"]["run_options"] = ["head-and-worker-run-options"]
config["docker"]["worker_run_options"] = ["standard-run-options"]
config["docker"]["image"] = "default-image:nightly"
config["docker"]["worker_image"] = "default-image:nightly"
# Commenting out this line causes the test case to fail?!?!
config["min_workers"] = 0
config["max_workers"] = 10
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner()
runner.respond_to_call("json .Config.Env", ["[]" for i in range(5)])
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "empty_node",
},
1,
)
autoscaler = MockAutoscaler(
config_path,
LoadMetrics(),
MockGcsClient(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
)
assert len(self.provider.non_terminated_nodes({})) == 1
autoscaler.update()
self.waitForNodes(1)
autoscaler.load_metrics.set_resource_requests([{"CPU": 1}])
autoscaler.update()
self.waitForNodes(2)
assert self.provider.mock_nodes["1"].node_type == "m4.large"
autoscaler.load_metrics.set_resource_requests([{"GPU": 8}])
autoscaler.update()
self.waitForNodes(3)
assert self.provider.mock_nodes["2"].node_type == "p2.8xlarge"
autoscaler.load_metrics.set_resource_requests([{"GPU": 1}] * 9)
autoscaler.update()
self.waitForNodes(4)
assert self.provider.mock_nodes["3"].node_type == "p2.xlarge"
autoscaler.update()
# Fill up m4, p2.8, p2 and request 2 more CPUs
autoscaler.load_metrics.set_resource_requests(
[{"CPU": 2}, {"CPU": 16}, {"CPU": 32}, {"CPU": 2}]
)
autoscaler.update()
self.waitForNodes(5)
assert self.provider.mock_nodes["4"].node_type == "m4.large"
autoscaler.update()
sleep(0.1)
runner.assert_has_call(
self.provider.mock_nodes["2"].internal_ip, "p2.8x-run-options"
)
runner.assert_has_call(
self.provider.mock_nodes["2"].internal_ip, "head-and-worker-run-options"
)
runner.assert_has_call(
self.provider.mock_nodes["2"].internal_ip, "p2.8x_image:latest"
)
runner.assert_not_has_call(
self.provider.mock_nodes["2"].internal_ip, "default-image:nightly"
)
runner.assert_not_has_call(
self.provider.mock_nodes["2"].internal_ip, "standard-run-options"
)
runner.assert_has_call(
self.provider.mock_nodes["3"].internal_ip, "p2x_image:nightly"
)
runner.assert_has_call(
self.provider.mock_nodes["3"].internal_ip, "standard-run-options"
)
runner.assert_has_call(
self.provider.mock_nodes["3"].internal_ip, "head-and-worker-run-options"
)
runner.assert_not_has_call(
self.provider.mock_nodes["3"].internal_ip, "p2.8x-run-options"
)
runner.assert_has_call(
self.provider.mock_nodes["4"].internal_ip, "default-image:nightly"
)
runner.assert_has_call(
self.provider.mock_nodes["4"].internal_ip, "standard-run-options"
)
runner.assert_has_call(
self.provider.mock_nodes["4"].internal_ip, "head-and-worker-run-options"
)
runner.assert_not_has_call(
self.provider.mock_nodes["4"].internal_ip, "p2.8x-run-options"
)
runner.assert_not_has_call(
self.provider.mock_nodes["4"].internal_ip, "p2x_image:nightly"
)
def testUpdateConfig(self):
config = copy.deepcopy(MULTI_WORKER_CLUSTER)
config["available_node_types"]["m4.large"]["min_workers"] = 2
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner()
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "empty_node",
},
1,
)
lm = LoadMetrics()
autoscaler = MockAutoscaler(
config_path,
lm,
MockGcsClient(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
)
assert len(self.provider.non_terminated_nodes({})) == 1
autoscaler.update()
self.waitForNodes(2, tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER})
config["available_node_types"]["m4.large"]["min_workers"] = 0
config["available_node_types"]["m4.large"]["node_config"]["field_changed"] = 1
config_path = self.write_config(config)
fill_in_node_ids(self.provider, lm)
autoscaler.update()
self.waitForNodes(0, tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER})
def testEmptyDocker(self):
config = copy.deepcopy(MULTI_WORKER_CLUSTER)
del config["docker"]
config["min_workers"] = 0
config["max_workers"] = 10
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner()
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "empty_node",
},
1,
)
autoscaler = MockAutoscaler(
config_path,
LoadMetrics(),
MockGcsClient(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
)
assert len(self.provider.non_terminated_nodes({})) == 1
autoscaler.update()
self.waitForNodes(1)
autoscaler.load_metrics.set_resource_requests([{"CPU": 1}])
autoscaler.update()
self.waitForNodes(2)
assert self.provider.mock_nodes["1"].node_type == "m4.large"
autoscaler.load_metrics.set_resource_requests([{"GPU": 8}])
autoscaler.update()
self.waitForNodes(3)
assert self.provider.mock_nodes["2"].node_type == "p2.8xlarge"
def testRequestResourcesIdleTimeout(self):
"""Test request_resources() with and without idle timeout."""
config = copy.deepcopy(MULTI_WORKER_CLUSTER)
config["max_workers"] = 4
config["idle_timeout_minutes"] = 0
config["available_node_types"] = {
"empty_node": {
"node_config": {},
"resources": {"CPU": 2},
"max_workers": 1,
},
"def_worker": {
"node_config": {},
"resources": {"CPU": 2, "WORKER": 1},
"max_workers": 3,
},
}
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner()
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "empty_node",
},
1,
)
lm = LoadMetrics()
runner.respond_to_call("json .Config.Env", ["[]" for i in range(3)])
autoscaler = MockAutoscaler(
config_path,
lm,
MockGcsClient(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
)
autoscaler.update()
self.waitForNodes(0, tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER})
autoscaler.load_metrics.set_resource_requests([{"CPU": 0.2, "WORKER": 1.0}])
autoscaler.update()
self.waitForNodes(1, tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER})
non_terminated_nodes = autoscaler.provider.non_terminated_nodes({})
assert len(non_terminated_nodes) == 2
node_id = non_terminated_nodes[1]
node_ip = autoscaler.provider.non_terminated_node_ips({})[1]
# A hack to check if the node was terminated when it shouldn't.
autoscaler.provider.mock_nodes[node_id].state = "unterminatable"
lm.update(
node_ip,
mock_node_id(),
config["available_node_types"]["def_worker"]["resources"],
config["available_node_types"]["def_worker"]["resources"],
DUMMY_IDLE_DURATION_S,
waiting_bundles=[{"CPU": 0.2, "WORKER": 1.0}],
)
autoscaler.update()
# this fits on request_resources()!
self.waitForNodes(1, tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER})
autoscaler.load_metrics.set_resource_requests([{"CPU": 0.2, "WORKER": 1.0}] * 2)
autoscaler.update()
self.waitForNodes(2, tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER})
autoscaler.load_metrics.set_resource_requests([{"CPU": 0.2, "WORKER": 1.0}])
lm.update(
node_ip,
mock_node_id(),
config["available_node_types"]["def_worker"]["resources"],
{},
0,
waiting_bundles=[{"CPU": 0.2, "WORKER": 1.0}],
)
autoscaler.update()
self.waitForNodes(2, tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER})
lm.update(
node_ip,
mock_node_id(),
config["available_node_types"]["def_worker"]["resources"],
config["available_node_types"]["def_worker"]["resources"],
DUMMY_IDLE_DURATION_S,
waiting_bundles=[{"CPU": 0.2, "WORKER": 1.0}],
)
autoscaler.update()
# Still 2 as the second node did not show up a heart beat.
self.waitForNodes(2, tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER})
# If node {node_id} was terminated any time then it's state will be set
# to terminated.
assert autoscaler.provider.mock_nodes[node_id].state == "unterminatable"
lm.update(
"172.0.0.2",
mock_node_id(),
config["available_node_types"]["def_worker"]["resources"],
config["available_node_types"]["def_worker"]["resources"],
DUMMY_IDLE_DURATION_S,
waiting_bundles=[{"CPU": 0.2, "WORKER": 1.0}],
)
autoscaler.update()
# Now it is 1 because it showed up in last used (heart beat).
# The remaining one is 127.0.0.1.
self.waitForNodes(1, tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER})
def testRequestResourcesRaceConditionsLong(self):
"""Test request_resources(), race conditions & demands/min_workers.
Tests when request_resources() is called simultaneously with resource
demands and min_workers constraint in multiple orders upscaling and
downscaling.
"""
config = copy.deepcopy(MULTI_WORKER_CLUSTER)
config["max_workers"] = 4
config["idle_timeout_minutes"] = 0
config["available_node_types"] = {
"empty_node": {
"node_config": {},
"resources": {"CPU": 2},
"max_workers": 1,
},
"def_worker": {
"node_config": {},
"resources": {"CPU": 2, "WORKER": 1},
"max_workers": 3,
"min_workers": 1,
},
}
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner()
runner.respond_to_call("json .Config.Env", ["[]" for i in range(3)])
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "empty_node",
},
1,
)
lm = LoadMetrics()
autoscaler = MockAutoscaler(
config_path,
lm,
MockGcsClient(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
)
autoscaler.load_metrics.set_resource_requests([{"CPU": 0.2, "WORKER": 1.0}])
autoscaler.update()
# 1 min worker for both min_worker and request_resources()
self.waitForNodes(1, tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER})
non_terminated_nodes = autoscaler.provider.non_terminated_nodes({})
assert len(non_terminated_nodes) == 2
node_id = non_terminated_nodes[1]
node_ip = autoscaler.provider.non_terminated_node_ips({})[1]
# A hack to check if the node was terminated when it shouldn't.
autoscaler.provider.mock_nodes[node_id].state = "unterminatable"
lm.update(
node_ip,
mock_node_id(),
config["available_node_types"]["def_worker"]["resources"],
config["available_node_types"]["def_worker"]["resources"],
DUMMY_IDLE_DURATION_S,
waiting_bundles=[{"CPU": 0.2, "WORKER": 1.0}],
)
autoscaler.load_metrics.set_resource_requests([{"CPU": 0.2, "WORKER": 1.0}] * 2)
autoscaler.update()
# 2 requested_resource, 1 min worker, 1 free node -> 2 nodes total
self.waitForNodes(2, tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER})
autoscaler.load_metrics.set_resource_requests([{"CPU": 0.2, "WORKER": 1.0}])
autoscaler.update()
# Still 2 because the second one is not connected and hence
# request_resources occupies the connected node.
self.waitForNodes(2, tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER})
autoscaler.load_metrics.set_resource_requests([{"CPU": 0.2, "WORKER": 1.0}] * 3)
lm.update(
node_ip,
mock_node_id(),
config["available_node_types"]["def_worker"]["resources"],
{},
0,
waiting_bundles=[{"CPU": 0.2, "WORKER": 1.0}] * 3,
)
autoscaler.update()
self.waitForNodes(3, tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER})
autoscaler.load_metrics.set_resource_requests([])
lm.update(
"172.0.0.2",
mock_node_id(),
config["available_node_types"]["def_worker"]["resources"],
config["available_node_types"]["def_worker"]["resources"],
DUMMY_IDLE_DURATION_S,
)
lm.update(
"172.0.0.3",
mock_node_id(),
config["available_node_types"]["def_worker"]["resources"],
config["available_node_types"]["def_worker"]["resources"],
DUMMY_IDLE_DURATION_S,
)
lm.update(
node_ip,
mock_node_id(),
config["available_node_types"]["def_worker"]["resources"],
{},
0,
)
print("============ Should scale down from here =============", node_id)
autoscaler.update()
self.waitForNodes(1, tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER})
# If node {node_id} was terminated any time then it's state will be set
# to terminated.
assert autoscaler.provider.mock_nodes[node_id].state == "unterminatable"
def testRequestResourcesRaceConditionWithMinWorker(self):
"""Test request_resources() with min_workers.
Tests when request_resources() is called simultaneously with adding
min_workers constraint.
"""
config = copy.deepcopy(MULTI_WORKER_CLUSTER)
config["available_node_types"] = {
"empty_node": {
"node_config": {},
"resources": {"CPU": 2},
"max_workers": 1,
},
"def_worker": {
"node_config": {},
"resources": {"CPU": 2, "WORKER": 1},
"max_workers": 3,
"min_workers": 1,
},
}
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner()
runner.respond_to_call("json .Config.Env", ["[]" for i in range(2)])
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "empty_node",
},
1,
)
lm = LoadMetrics()
autoscaler = MockAutoscaler(
config_path,
lm,
MockGcsClient(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
)
autoscaler.load_metrics.set_resource_requests([{"CPU": 2, "WORKER": 1.0}] * 2)
autoscaler.update()
# 2 min worker for both min_worker and request_resources(), not 3.
self.waitForNodes(2, tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER})
def testRequestResourcesRaceConditionWithResourceDemands(self):
"""Test request_resources() with resource_demands.
Tests when request_resources() is called simultaneously with resource
demands in multiple orders.
"""
config = copy.deepcopy(MULTI_WORKER_CLUSTER)
config["available_node_types"].update(
{
"empty_node": {
"node_config": {},
"resources": {"CPU": 2, "GPU": 1},
"max_workers": 1,
},
"def_worker": {
"node_config": {},
"resources": {"CPU": 2, "GPU": 1, "WORKER": 1},
"max_workers": 3,
},
}
)
config["idle_timeout_minutes"] = 0
config_path = self.write_config(config)
self.provider = MockProvider()
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: "head",
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "empty_node",
},
1,
)
runner = MockProcessRunner()
runner.respond_to_call("json .Config.Env", ["[]" for i in range(2)])
lm = LoadMetrics()
autoscaler = MockAutoscaler(
config_path,
lm,
MockGcsClient(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
)
lm.update(
"127.0.0.0",
mock_node_id(),
{"CPU": 2, "GPU": 1},
{"CPU": 2},
0,
waiting_bundles=[{"CPU": 2}],
)
autoscaler.load_metrics.set_resource_requests([{"CPU": 2, "GPU": 1}] * 2)
autoscaler.update()
# 1 head, 1 worker.
self.waitForNodes(2)
lm.update(
"127.0.0.0",
mock_node_id(),
{"CPU": 2, "GPU": 1},
{"CPU": 2},
0,
waiting_bundles=[{"CPU": 2}],
)
# make sure it stays consistent.
for _ in range(10):
autoscaler.update()
self.waitForNodes(2)
def format_pg(pg):
strategy = pg["strategy"]
bundles = pg["bundles"]
shape_strs = []
for bundle, count in bundles:
shape_strs.append(f"{bundle} * {count}")
bundles_str = ", ".join(shape_strs)
return f"{bundles_str} ({strategy})"
def test_memory_string_formatting():
assert ray.autoscaler._private.util.format_memory(0) == "0B"
assert (
ray.autoscaler._private.util.format_memory(0.0) == "0B"
), "Bytes aren't decimals"
assert ray.autoscaler._private.util.format_memory(1) == "1B"
assert ray.autoscaler._private.util.format_memory(1023) == "1023B"
assert ray.autoscaler._private.util.format_memory(1024) == "1.00KiB"
assert ray.autoscaler._private.util.format_memory(1025) == "1.00KiB"
assert ray.autoscaler._private.util.format_memory(1037) == "1.01KiB"
assert ray.autoscaler._private.util.format_memory(1200) == "1.17KiB"
assert ray.autoscaler._private.util.format_memory(2**20 - 10) == "1023.99KiB"
assert ray.autoscaler._private.util.format_memory(2**20 - 1) == "1024.00KiB"
assert ray.autoscaler._private.util.format_memory(2**20) == "1.00MiB"
assert ray.autoscaler._private.util.format_memory(2**30) == "1.00GiB"
assert ray.autoscaler._private.util.format_memory(5.001 * 2**30) == "5.00GiB"
assert (
ray.autoscaler._private.util.format_memory(5.004 * 2**30) == "5.00GiB"
), "rounds down"
assert (
ray.autoscaler._private.util.format_memory(5.005 * 2**30) == "5.00GiB"
), "rounds down"
assert ray.autoscaler._private.util.format_memory(2**40) == "1.00TiB"
def test_info_string():
lm_summary = LoadMetricsSummary(
usage={
"CPU": (530.0, 544.0),
"GPU": (2, 2),
"AcceleratorType:V100": (0, 2),
"memory": (2 * 2**30, 2**33),
"object_store_memory": (3.14 * 2**30, 2**34),
"accelerator_type:T4": (1, 1),
},
resource_demand=[({"CPU": 1}, 150)],
pg_demand=[({"bundles": [({"CPU": 4}, 5)], "strategy": "PACK"}, 420)],
request_demand=[({"CPU": 16}, 100)],
node_types=[],
)
autoscaler_summary = AutoscalerSummary(
active_nodes={"p3.2xlarge": 2, "m4.4xlarge": 20},
pending_nodes=[
("1.2.3.4", "m4.4xlarge", STATUS_WAITING_FOR_SSH),
("1.2.3.5", "m4.4xlarge", STATUS_WAITING_FOR_SSH),
],
idle_nodes=[],
pending_launches={"m4.4xlarge": 2},
failed_nodes=[("1.2.3.6", "p3.2xlarge")],
)
expected = """
======== Autoscaler status: 2020-12-28 01:02:03 ========
Node status
--------------------------------------------------------
Active:
2 p3.2xlarge
20 m4.4xlarge
Idle:
(no idle nodes)
Pending:
m4.4xlarge, 2 launching
1.2.3.4: m4.4xlarge, waiting-for-ssh
1.2.3.5: m4.4xlarge, waiting-for-ssh
Recent failures:
p3.2xlarge: NodeTerminated (ip: 1.2.3.6)
Resources
--------------------------------------------------------
Total Usage:
0/2 AcceleratorType:V100
530.0/544.0 CPU
2/2 GPU
2.00GiB/8.00GiB memory
3.14GiB/16.00GiB object_store_memory
From request_resources:
{'CPU': 16}: 100 from request_resources()
Pending Demands:
{'CPU': 1}: 150+ pending tasks/actors
{'CPU': 4} * 5 (PACK): 420+ pending placement groups
""".strip()
actual = format_info_string(
lm_summary,
autoscaler_summary,
time=datetime(year=2020, month=12, day=28, hour=1, minute=2, second=3),
)
print(actual)
assert expected == actual
def test_info_string_multiple_constraints():
lm_summary = LoadMetricsSummary(
usage={
"CPU": (530.0, 544.0),
"GPU": (2, 2),
"memory": (2 * 2**30, 2**33),
"object_store_memory": (3.14 * 2**30, 2**34),
},
resource_demand=[({"CPU": 1}, 150)],
pg_demand=[({"bundles": [({"CPU": 4}, 5)], "strategy": "PACK"}, 420)],
request_demand=[({"CPU": 16}, 100), ({"CPU": 1, "GPU": 16}, 10)],
node_types=[],
)
autoscaler_summary = AutoscalerSummary(
active_nodes={"p3.2xlarge": 2},
pending_nodes=[],
idle_nodes=[],
pending_launches={},
failed_nodes=[],
)
expected = """
======== Autoscaler status: 2020-12-28 01:02:03 ========
Node status
--------------------------------------------------------
Active:
2 p3.2xlarge
Idle:
(no idle nodes)
Pending:
(no pending nodes)
Recent failures:
(no failures)
Resources
--------------------------------------------------------
Total Usage:
530.0/544.0 CPU
2/2 GPU
2.00GiB/8.00GiB memory
3.14GiB/16.00GiB object_store_memory
From request_resources:
{'CPU': 16}: 100 from request_resources()
{'CPU': 1, 'GPU': 16}: 10 from request_resources()
Pending Demands:
{'CPU': 1}: 150+ pending tasks/actors
{'CPU': 4} * 5 (PACK): 420+ pending placement groups
""".strip()
actual = format_info_string(
lm_summary,
autoscaler_summary,
time=datetime(year=2020, month=12, day=28, hour=1, minute=2, second=3),
)
print(actual)
assert expected == actual
def test_info_string_verbose():
lm_summary = LoadMetricsSummary(
usage={
"CPU": (530.0, 544.0),
"GPU": (2, 2),
"accelerator_type:V100": (1, 2),
"memory": (2 * 2**30, 2**33),
"object_store_memory": (3.14 * 2**30, 2**34),
},
resource_demand=[({"CPU": 1}, 150)],
pg_demand=[({"bundles": [({"CPU": 4}, 5)], "strategy": "PACK"}, 420)],
request_demand=[({"CPU": 16}, 100)],
node_types=[],
usage_by_node={
"192.168.1.1": {
"CPU": (5.0, 20.0),
"GPU": (0.7, 1),
"accelerator_type:V100": (0.1, 1),
"memory": (2**30, 2**32),
"object_store_memory": (3.14 * 2**30, 2**32),
},
"192.168.1.2": {
"CPU": (15.0, 20.0),
"GPU": (0.3, 1),
"accelerator_type:V100": (0.9, 1),
"memory": (2**30, 1.5 * 2**33),
"object_store_memory": (0, 2**32),
},
},
)
autoscaler_summary = AutoscalerSummary(
active_nodes=[],
idle_nodes={"p3.2xlarge": 2, "m4.4xlarge": 20},
pending_nodes=[
("1.2.3.4", "m4.4xlarge", STATUS_WAITING_FOR_SSH),
("1.2.3.5", "m4.4xlarge", STATUS_WAITING_FOR_SSH),
],
pending_launches={"m4.4xlarge": 2},
failed_nodes=[("1.2.3.6", "p3.2xlarge")],
node_activities={
"192.168.1.1": (
"m4.4xlarge",
["CPU in use.", "GPU in use.", "Active workers."],
),
"192.168.1.2": ("m4.4xlarge", ["GPU in use.", "Active workers."]),
},
)
expected = """
======== Autoscaler status: 2020-12-28 01:02:03 ========
GCS request time: 3.141500s
Node Provider non_terminated_nodes time: 1.618000s
Node status
--------------------------------------------------------
Active:
(no active nodes)
Idle:
2 p3.2xlarge
20 m4.4xlarge
Pending:
m4.4xlarge, 2 launching
1.2.3.4: m4.4xlarge, waiting-for-ssh
1.2.3.5: m4.4xlarge, waiting-for-ssh
Recent failures:
p3.2xlarge: NodeTerminated (ip: 1.2.3.6)
Resources
--------------------------------------------------------
Total Usage:
530.0/544.0 CPU
2/2 GPU
1/2 accelerator_type:V100
2.00GiB/8.00GiB memory
3.14GiB/16.00GiB object_store_memory
From request_resources:
{'CPU': 16}: 100 from request_resources()
Pending Demands:
{'CPU': 1}: 150+ pending tasks/actors
{'CPU': 4} * 5 (PACK): 420+ pending placement groups
Node: 192.168.1.1
Usage:
5.0/20.0 CPU
0.7/1 GPU
0.1/1 accelerator_type:V100
1.00GiB/4.00GiB memory
3.14GiB/4.00GiB object_store_memory
Activity:
CPU in use.
GPU in use.
Active workers.
Node: 192.168.1.2
Usage:
15.0/20.0 CPU
0.3/1 GPU
0.9/1 accelerator_type:V100
1.00GiB/12.00GiB memory
0B/4.00GiB object_store_memory
Activity:
GPU in use.
Active workers.
""".strip()
actual = format_info_string(
lm_summary,
autoscaler_summary,
time=datetime(year=2020, month=12, day=28, hour=1, minute=2, second=3),
gcs_request_time=3.1415,
non_terminated_nodes_time=1.618,
verbose=True,
)
print(actual)
assert expected == actual
def test_info_string_verbose_node_types():
lm_summary = LoadMetricsSummary(
usage={
"CPU": (530.0, 544.0),
"GPU": (2, 2),
"accelerator_type:V100": (1, 2),
"memory": (2 * 2**30, 2**33),
"object_store_memory": (3.14 * 2**30, 2**34),
},
resource_demand=[({"CPU": 1}, 150)],
pg_demand=[({"bundles": [({"CPU": 4}, 5)], "strategy": "PACK"}, 420)],
request_demand=[({"CPU": 16}, 100)],
node_types=[],
usage_by_node={
"192.168.1.1": {
"CPU": (5.0, 20.0),
"GPU": (0.7, 1),
"accelerator_type:V100": (0.1, 1),
"memory": (2**30, 2**32),
"object_store_memory": (3.14 * 2**30, 2**32),
},
"192.168.1.2": {
"CPU": (15.0, 20.0),
"GPU": (0.3, 1),
"accelerator_type:V100": (0.9, 1),
"memory": (2**30, 1.5 * 2**33),
"object_store_memory": (0, 2**32),
},
},
)
autoscaler_summary = AutoscalerSummary(
active_nodes={"p3.2xlarge": 2, "m4.4xlarge": 20},
pending_nodes=[
("1.2.3.4", "m4.4xlarge", STATUS_WAITING_FOR_SSH),
("1.2.3.5", "m4.4xlarge", STATUS_WAITING_FOR_SSH),
],
idle_nodes=[],
pending_launches={"m4.4xlarge": 2},
failed_nodes=[("1.2.3.6", "p3.2xlarge")],
node_type_mapping={
"192.168.1.1": "head-node",
"192.168.1.2": "gpu-worker",
},
)
expected = """
======== Autoscaler status: 2020-12-28 01:02:03 ========
GCS request time: 3.141500s
Node Provider non_terminated_nodes time: 1.618000s
Autoscaler iteration time: 3.141500s
Node status
--------------------------------------------------------
Active:
2 p3.2xlarge
20 m4.4xlarge
Idle:
(no idle nodes)
Pending:
m4.4xlarge, 2 launching
1.2.3.4: m4.4xlarge, waiting-for-ssh
1.2.3.5: m4.4xlarge, waiting-for-ssh
Recent failures:
p3.2xlarge: NodeTerminated (ip: 1.2.3.6)
Resources
--------------------------------------------------------
Total Usage:
530.0/544.0 CPU
2/2 GPU
1/2 accelerator_type:V100
2.00GiB/8.00GiB memory
3.14GiB/16.00GiB object_store_memory
From request_resources:
{'CPU': 16}: 100 from request_resources()
Pending Demands:
{'CPU': 1}: 150+ pending tasks/actors
{'CPU': 4} * 5 (PACK): 420+ pending placement groups
Node: 192.168.1.1 (head-node)
Usage:
5.0/20.0 CPU
0.7/1 GPU
0.1/1 accelerator_type:V100
1.00GiB/4.00GiB memory
3.14GiB/4.00GiB object_store_memory
Node: 192.168.1.2 (gpu-worker)
Usage:
15.0/20.0 CPU
0.3/1 GPU
0.9/1 accelerator_type:V100
1.00GiB/12.00GiB memory
0B/4.00GiB object_store_memory
""".strip()
actual = format_info_string(
lm_summary,
autoscaler_summary,
time=datetime(year=2020, month=12, day=28, hour=1, minute=2, second=3),
gcs_request_time=3.1415,
non_terminated_nodes_time=1.618,
autoscaler_update_time=3.1415,
verbose=True,
)
print(actual)
assert expected == actual
def test_info_string_verbose_no_breakdown():
"""
Test the verbose string but with node reporting feature flagged off.
"""
lm_summary = LoadMetricsSummary(
usage={
"CPU": (530.0, 544.0),
"GPU": (2, 2),
"AcceleratorType:V100": (1, 2),
"memory": (2 * 2**30, 2**33),
"object_store_memory": (3.14 * 2**30, 2**34),
},
resource_demand=[({"CPU": 1}, 150)],
pg_demand=[({"bundles": [({"CPU": 4}, 5)], "strategy": "PACK"}, 420)],
request_demand=[({"CPU": 16}, 100)],
node_types=[],
usage_by_node=None,
)
autoscaler_summary = AutoscalerSummary(
active_nodes=[],
idle_nodes={"p3.2xlarge": 2, "m4.4xlarge": 20},
pending_nodes=[
("1.2.3.4", "m4.4xlarge", STATUS_WAITING_FOR_SSH),
("1.2.3.5", "m4.4xlarge", STATUS_WAITING_FOR_SSH),
],
pending_launches={"m4.4xlarge": 2},
failed_nodes=[("1.2.3.6", "p3.2xlarge")],
)
expected = """
======== Autoscaler status: 2020-12-28 01:02:03 ========
GCS request time: 3.141500s
Node Provider non_terminated_nodes time: 1.618000s
Node status
--------------------------------------------------------
Active:
(no active nodes)
Idle:
2 p3.2xlarge
20 m4.4xlarge
Pending:
m4.4xlarge, 2 launching
1.2.3.4: m4.4xlarge, waiting-for-ssh
1.2.3.5: m4.4xlarge, waiting-for-ssh
Recent failures:
p3.2xlarge: NodeTerminated (ip: 1.2.3.6)
Resources
--------------------------------------------------------
Total Usage:
1/2 AcceleratorType:V100
530.0/544.0 CPU
2/2 GPU
2.00GiB/8.00GiB memory
3.14GiB/16.00GiB object_store_memory
From request_resources:
{'CPU': 16}: 100 from request_resources()
Pending Demands:
{'CPU': 1}: 150+ pending tasks/actors
{'CPU': 4} * 5 (PACK): 420+ pending placement groups
""".strip()
actual = format_info_string(
lm_summary,
autoscaler_summary,
time=datetime(year=2020, month=12, day=28, hour=1, minute=2, second=3),
gcs_request_time=3.1415,
non_terminated_nodes_time=1.618,
verbose=True,
)
print(actual)
assert expected == actual
def test_info_string_with_launch_failures():
lm_summary = LoadMetricsSummary(
usage={
"CPU": (530.0, 544.0),
"GPU": (2, 2),
"AcceleratorType:V100": (0, 2),
"memory": (2 * 2**30, 2**33),
"object_store_memory": (3.14 * 2**30, 2**34),
},
resource_demand=[({"CPU": 1}, 150)],
pg_demand=[({"bundles": [({"CPU": 4}, 5)], "strategy": "PACK"}, 420)],
request_demand=[({"CPU": 16}, 100)],
node_types=[],
)
base_timestamp = datetime(
year=2012, month=12, day=21, hour=13, minute=3, second=1
).timestamp()
autoscaler_summary = AutoscalerSummary(
active_nodes={"p3.2xlarge": 2, "m4.4xlarge": 20},
pending_nodes=[
("1.2.3.4", "m4.4xlarge", STATUS_WAITING_FOR_SSH),
("1.2.3.5", "m4.4xlarge", STATUS_WAITING_FOR_SSH),
],
idle_nodes=[],
pending_launches={"m4.4xlarge": 2},
failed_nodes=[("1.2.3.6", "p3.2xlarge")],
node_availability_summary=NodeAvailabilitySummary(
node_availabilities={
"A100": NodeAvailabilityRecord(
node_type="A100",
is_available=False,
last_checked_timestamp=base_timestamp + 1,
unavailable_node_information=UnavailableNodeInformation(
category="InstanceLimitExceeded",
description=":)",
),
),
"Inferentia-Spot": NodeAvailabilityRecord(
node_type="Inferentia-Spot",
is_available=False,
last_checked_timestamp=base_timestamp,
unavailable_node_information=UnavailableNodeInformation(
category="InsufficientInstanceCapacity",
description="mo nodes mo problems",
),
),
}
),
)
expected = """
======== Autoscaler status: 2020-12-28 01:02:03 ========
Node status
--------------------------------------------------------
Active:
2 p3.2xlarge
20 m4.4xlarge
Idle:
(no idle nodes)
Pending:
m4.4xlarge, 2 launching
1.2.3.4: m4.4xlarge, waiting-for-ssh
1.2.3.5: m4.4xlarge, waiting-for-ssh
Recent failures:
A100: InstanceLimitExceeded (latest_attempt: 13:03:02)
Inferentia-Spot: InsufficientInstanceCapacity (latest_attempt: 13:03:01)
p3.2xlarge: NodeTerminated (ip: 1.2.3.6)
Resources
--------------------------------------------------------
Total Usage:
0/2 AcceleratorType:V100
530.0/544.0 CPU
2/2 GPU
2.00GiB/8.00GiB memory
3.14GiB/16.00GiB object_store_memory
From request_resources:
{'CPU': 16}: 100 from request_resources()
Pending Demands:
{'CPU': 1}: 150+ pending tasks/actors
{'CPU': 4} * 5 (PACK): 420+ pending placement groups
""".strip()
actual = format_info_string(
lm_summary,
autoscaler_summary,
time=datetime(year=2020, month=12, day=28, hour=1, minute=2, second=3),
)
print(actual)
assert expected == actual
def test_info_string_with_launch_failures_verbose():
lm_summary = LoadMetricsSummary(
usage={
"CPU": (530.0, 544.0),
"GPU": (2, 2),
"AcceleratorType:V100": (0, 2),
"memory": (2 * 2**30, 2**33),
"object_store_memory": (3.14 * 2**30, 2**34),
},
resource_demand=[({"CPU": 1}, 150)],
pg_demand=[({"bundles": [({"CPU": 4}, 5)], "strategy": "PACK"}, 420)],
request_demand=[({"CPU": 16}, 100)],
node_types=[],
)
base_timestamp = datetime(
year=2012, month=12, day=21, hour=13, minute=3, second=1
).timestamp()
autoscaler_summary = AutoscalerSummary(
active_nodes=[],
idle_nodes={"p3.2xlarge": 2, "m4.4xlarge": 20},
pending_nodes=[
("1.2.3.4", "m4.4xlarge", STATUS_WAITING_FOR_SSH),
("1.2.3.5", "m4.4xlarge", STATUS_WAITING_FOR_SSH),
],
pending_launches={"m4.4xlarge": 2},
failed_nodes=[("1.2.3.6", "p3.2xlarge")],
node_availability_summary=NodeAvailabilitySummary(
node_availabilities={
"A100": NodeAvailabilityRecord(
node_type="A100",
is_available=False,
last_checked_timestamp=base_timestamp + 1,
unavailable_node_information=UnavailableNodeInformation(
category="InstanceLimitExceeded",
description="you should fix it",
),
),
"Inferentia-Spot": NodeAvailabilityRecord(
node_type="Inferentia-Spot",
is_available=False,
last_checked_timestamp=base_timestamp,
unavailable_node_information=UnavailableNodeInformation(
category="InsufficientInstanceCapacity",
description="desc",
),
),
}
),
)
expected = """
======== Autoscaler status: 2020-12-28 01:02:03 ========
Node status
--------------------------------------------------------
Active:
(no active nodes)
Idle:
2 p3.2xlarge
20 m4.4xlarge
Pending:
m4.4xlarge, 2 launching
1.2.3.4: m4.4xlarge, waiting-for-ssh
1.2.3.5: m4.4xlarge, waiting-for-ssh
Recent failures:
A100: InstanceLimitExceeded (latest_attempt: 13:03:02) - you should fix it
Inferentia-Spot: InsufficientInstanceCapacity (latest_attempt: 13:03:01) - desc
p3.2xlarge: NodeTerminated (ip: 1.2.3.6)
Resources
--------------------------------------------------------
Total Usage:
0/2 AcceleratorType:V100
530.0/544.0 CPU
2/2 GPU
2.00GiB/8.00GiB memory
3.14GiB/16.00GiB object_store_memory
From request_resources:
{'CPU': 16}: 100 from request_resources()
Pending Demands:
{'CPU': 1}: 150+ pending tasks/actors
{'CPU': 4} * 5 (PACK): 420+ pending placement groups
""".strip()
actual = format_info_string(
lm_summary,
autoscaler_summary,
time=datetime(year=2020, month=12, day=28, hour=1, minute=2, second=3),
verbose=True,
)
print(actual)
assert expected == actual
def test_info_string_failed_node_cap():
lm_summary = LoadMetricsSummary(
usage={
"CPU": (530.0, 544.0),
"GPU": (2, 2),
"AcceleratorType:V100": (0, 2),
"memory": (2 * 2**30, 2**33),
"object_store_memory": (3.14 * 2**30, 2**34),
"CPU_group_4a82a217aadd8326a3a49f02700ac5c2": (2.0, 2.0),
},
resource_demand=[
({"CPU": 2.0}, 150),
({"CPU_group_4a82a217aadd8326a3a49f02700ac5c2": 2.0}, 3),
({"GPU_group_0_4a82a2add8326a3a49f02700ac5c2": 0.5}, 100),
],
pg_demand=[({"bundles": [({"CPU": 4}, 5)], "strategy": "PACK"}, 420)],
request_demand=[({"CPU": 16}, 100)],
node_types=[],
)
autoscaler_summary = AutoscalerSummary(
active_nodes=[],
idle_nodes={"p3.2xlarge": 2, "m4.4xlarge": 20},
pending_nodes=[
("1.2.3.4", "m4.4xlarge", STATUS_WAITING_FOR_SSH),
("1.2.3.5", "m4.4xlarge", STATUS_WAITING_FOR_SSH),
],
pending_launches={"m4.4xlarge": 2},
failed_nodes=[(f"1.2.3.{i}", "p3.2xlarge") for i in range(100)],
)
expected = """
======== Autoscaler status: 2020-12-28 01:02:03 ========
Node status
--------------------------------------------------------
Active:
(no active nodes)
Idle:
2 p3.2xlarge
20 m4.4xlarge
Pending:
m4.4xlarge, 2 launching
1.2.3.4: m4.4xlarge, waiting-for-ssh
1.2.3.5: m4.4xlarge, waiting-for-ssh
Recent failures:
p3.2xlarge: NodeTerminated (ip: 1.2.3.99)
p3.2xlarge: NodeTerminated (ip: 1.2.3.98)
p3.2xlarge: NodeTerminated (ip: 1.2.3.97)
p3.2xlarge: NodeTerminated (ip: 1.2.3.96)
p3.2xlarge: NodeTerminated (ip: 1.2.3.95)
p3.2xlarge: NodeTerminated (ip: 1.2.3.94)
p3.2xlarge: NodeTerminated (ip: 1.2.3.93)
p3.2xlarge: NodeTerminated (ip: 1.2.3.92)
p3.2xlarge: NodeTerminated (ip: 1.2.3.91)
p3.2xlarge: NodeTerminated (ip: 1.2.3.90)
p3.2xlarge: NodeTerminated (ip: 1.2.3.89)
p3.2xlarge: NodeTerminated (ip: 1.2.3.88)
p3.2xlarge: NodeTerminated (ip: 1.2.3.87)
p3.2xlarge: NodeTerminated (ip: 1.2.3.86)
p3.2xlarge: NodeTerminated (ip: 1.2.3.85)
p3.2xlarge: NodeTerminated (ip: 1.2.3.84)
p3.2xlarge: NodeTerminated (ip: 1.2.3.83)
p3.2xlarge: NodeTerminated (ip: 1.2.3.82)
p3.2xlarge: NodeTerminated (ip: 1.2.3.81)
Resources
--------------------------------------------------------
Total Usage:
0/2 AcceleratorType:V100
530.0/544.0 CPU (2.0 used of 2.0 reserved in placement groups)
2/2 GPU
2.00GiB/8.00GiB memory
3.14GiB/16.00GiB object_store_memory
From request_resources:
{'CPU': 16}: 100 from request_resources()
Pending Demands:
{'CPU': 2.0}: 153+ pending tasks/actors (3+ using placement groups)
{'GPU': 0.5}: 100+ pending tasks/actors (100+ using placement groups)
{'CPU': 4} * 5 (PACK): 420+ pending placement groups
"""
actual = format_info_string(
lm_summary,
autoscaler_summary,
time=datetime(year=2020, month=12, day=28, hour=1, minute=2, second=3),
)
print(actual)
assert expected.strip() == actual
def test_placement_group_match_string():
assert (
is_placement_group_resource("bundle_group_ffe7d420752c6e8658638d19ecf2b68c")
is True
)
assert (
is_placement_group_resource("CPU_group_0_625ace126f848864c46f50dced5e0ef7")
is True
)
assert (
is_placement_group_resource("CPU_group_625ace126f848864c46f50dced5e0ef7")
is True
)
assert is_placement_group_resource("CPU") is False
assert is_placement_group_resource("GPU") is False
assert is_placement_group_resource("custom_resource") is False
assert is_placement_group_resource("ip:192.168.1.1") is False
provider = MockProvider()
new_types = copy.deepcopy(TYPES_A)
scheduler = ResourceDemandScheduler(
provider,
new_types,
3,
head_node_type="p2.8xlarge",
upscaling_speed=1,
)
provider.create_node(
{},
{
TAG_RAY_USER_NODE_TYPE: "p2.8xlarge",
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
},
1,
)
nodes = provider.non_terminated_nodes({})
ips = provider.non_terminated_node_ips({})
utilizations = {ip: {"GPU": 8} for ip in ips}
with mock.patch(
"ray.autoscaler._private.resource_demand_scheduler.logger"
) as logger_mock:
to_launch, rem = scheduler.get_nodes_to_launch(
nodes,
{},
[{"CPU_group_0_625ace126f848864c46f50dced5e0ef7": 8}],
utilizations,
[],
{},
[],
EMPTY_AVAILABILITY_SUMMARY,
)
logger_mock.warning.assert_not_called()
assert to_launch == {}
assert rem == [{"CPU_group_0_625ace126f848864c46f50dced5e0ef7": 8}]
with mock.patch(
"ray.autoscaler._private.resource_demand_scheduler.logger"
) as logger_mock:
to_launch, rem = scheduler.get_nodes_to_launch(
nodes,
{},
[{"non-existent-custom": 8}],
utilizations,
[],
{},
[],
EMPTY_AVAILABILITY_SUMMARY,
)
logger_mock.warning.assert_called()
assert to_launch == {}
assert rem == [{"non-existent-custom": 8}]
def _launch_nothing_utilization_scorer_plugin(
node_resources, # noqa
resources, # noqa
node_type, # noqa
*,
node_availability_summary, # noqa
):
assert node_availability_summary is not None
return None
@pytest.fixture
def launch_nothing_utilization_score_plugin():
os.environ[AUTOSCALER_UTILIZATION_SCORER_KEY] = (
"ray.tests.test_resource_demand_scheduler."
"_launch_nothing_utilization_scorer_plugin"
)
try:
yield None
finally:
del os.environ[AUTOSCALER_UTILIZATION_SCORER_KEY]
def test_utilization_score_plugin_1(launch_nothing_utilization_score_plugin):
assert launch_nothing_utilization_score_plugin is None, "Keep mypy happy."
provider = MockProvider()
new_types = copy.deepcopy(TYPES_A)
scheduler = ResourceDemandScheduler(
provider,
new_types,
3,
head_node_type="p2.8xlarge",
upscaling_speed=1,
)
provider.create_node(
{},
{
TAG_RAY_USER_NODE_TYPE: "p2.8xlarge",
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
},
1,
)
nodes = provider.non_terminated_nodes({})
ips = provider.non_terminated_node_ips({})
utilizations = {ip: {"GPU": 8} for ip in ips}
to_launch, rem = scheduler.get_nodes_to_launch(
nodes,
{},
[{"GPU": 8}] * 2,
utilizations,
[],
{},
[],
EMPTY_AVAILABILITY_SUMMARY,
)
assert to_launch == {}
def _lexical_scorer_plugin(
node_resources, # noqa
resources, # noqa
node_type, # noqa
*,
node_availability_summary, # noqa
):
assert node_availability_summary is not None
if (
_resource_based_utilization_scorer(
node_resources,
resources,
node_availability_summary=node_availability_summary,
)
is not None
):
return node_type
else:
return None
@pytest.fixture
def lexical_score_plugin():
os.environ[
AUTOSCALER_UTILIZATION_SCORER_KEY
] = "ray.tests.test_resource_demand_scheduler._lexical_scorer_plugin"
try:
yield None
finally:
del os.environ[AUTOSCALER_UTILIZATION_SCORER_KEY]
def test_utilization_score_plugin_2(lexical_score_plugin):
assert lexical_score_plugin is None, "Keep mypy happy."
provider = MockProvider()
new_types = copy.deepcopy(TYPES_A)
new_types["z2.8xlarge"] = new_types["p2.8xlarge"]
scheduler = ResourceDemandScheduler(
provider,
new_types,
3,
head_node_type="p2.8xlarge",
upscaling_speed=1,
)
provider.create_node(
{},
{
TAG_RAY_USER_NODE_TYPE: "p2.8xlarge",
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
},
1,
)
nodes = provider.non_terminated_nodes({})
ips = provider.non_terminated_node_ips({})
utilizations = {ip: {"GPU": 8} for ip in ips}
to_launch, rem = scheduler.get_nodes_to_launch(
nodes,
{},
[{"GPU": 8}] * 2,
utilizations,
[],
{},
[],
EMPTY_AVAILABILITY_SUMMARY,
)
assert to_launch == {"z2.8xlarge": 1}
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
|
AutoscalingTest
|
python
|
pytest-dev__pytest
|
src/_pytest/logging.py
|
{
"start": 14030,
"end": 23308
}
|
class ____:
"""Provides access and control of log capturing."""
def __init__(self, item: nodes.Node, *, _ispytest: bool = False) -> None:
check_ispytest(_ispytest)
self._item = item
self._initial_handler_level: int | None = None
# Dict of log name -> log level.
self._initial_logger_levels: dict[str | None, int] = {}
self._initial_disabled_logging_level: int | None = None
def _finalize(self) -> None:
"""Finalize the fixture.
This restores the log levels and the disabled logging levels changed by :meth:`set_level`.
"""
# Restore log levels.
if self._initial_handler_level is not None:
self.handler.setLevel(self._initial_handler_level)
for logger_name, level in self._initial_logger_levels.items():
logger = logging.getLogger(logger_name)
logger.setLevel(level)
# Disable logging at the original disabled logging level.
if self._initial_disabled_logging_level is not None:
logging.disable(self._initial_disabled_logging_level)
self._initial_disabled_logging_level = None
@property
def handler(self) -> LogCaptureHandler:
"""Get the logging handler used by the fixture."""
return self._item.stash[caplog_handler_key]
def get_records(
self, when: Literal["setup", "call", "teardown"]
) -> list[logging.LogRecord]:
"""Get the logging records for one of the possible test phases.
:param when:
Which test phase to obtain the records from.
Valid values are: "setup", "call" and "teardown".
:returns: The list of captured records at the given stage.
.. versionadded:: 3.4
"""
return self._item.stash[caplog_records_key].get(when, [])
@property
def text(self) -> str:
"""The formatted log text."""
return _remove_ansi_escape_sequences(self.handler.stream.getvalue())
@property
def records(self) -> list[logging.LogRecord]:
"""The list of log records."""
return self.handler.records
@property
def record_tuples(self) -> list[tuple[str, int, str]]:
"""A list of a stripped down version of log records intended
for use in assertion comparison.
The format of the tuple is:
(logger_name, log_level, message)
"""
return [(r.name, r.levelno, r.getMessage()) for r in self.records]
@property
def messages(self) -> list[str]:
"""A list of format-interpolated log messages.
Unlike 'records', which contains the format string and parameters for
interpolation, log messages in this list are all interpolated.
Unlike 'text', which contains the output from the handler, log
messages in this list are unadorned with levels, timestamps, etc,
making exact comparisons more reliable.
Note that traceback or stack info (from :func:`logging.exception` or
the `exc_info` or `stack_info` arguments to the logging functions) is
not included, as this is added by the formatter in the handler.
.. versionadded:: 3.7
"""
return [r.getMessage() for r in self.records]
def clear(self) -> None:
"""Reset the list of log records and the captured log text."""
self.handler.clear()
def _force_enable_logging(
self, level: int | str, logger_obj: logging.Logger
) -> int:
"""Enable the desired logging level if the global level was disabled via ``logging.disabled``.
Only enables logging levels greater than or equal to the requested ``level``.
Does nothing if the desired ``level`` wasn't disabled.
:param level:
The logger level caplog should capture.
All logging is enabled if a non-standard logging level string is supplied.
Valid level strings are in :data:`logging._nameToLevel`.
:param logger_obj: The logger object to check.
:return: The original disabled logging level.
"""
original_disable_level: int = logger_obj.manager.disable
if isinstance(level, str):
# Try to translate the level string to an int for `logging.disable()`
level = logging.getLevelName(level) # type: ignore[deprecated]
if not isinstance(level, int):
# The level provided was not valid, so just un-disable all logging.
logging.disable(logging.NOTSET)
elif not logger_obj.isEnabledFor(level):
# Each level is `10` away from other levels.
# https://docs.python.org/3/library/logging.html#logging-levels
disable_level = max(level - 10, logging.NOTSET)
logging.disable(disable_level)
return original_disable_level
def set_level(self, level: int | str, logger: str | None = None) -> None:
"""Set the threshold level of a logger for the duration of a test.
Logging messages which are less severe than this level will not be captured.
.. versionchanged:: 3.4
The levels of the loggers changed by this function will be
restored to their initial values at the end of the test.
Will enable the requested logging level if it was disabled via :func:`logging.disable`.
:param level: The level.
:param logger: The logger to update. If not given, the root logger.
"""
logger_obj = logging.getLogger(logger)
# Save the original log-level to restore it during teardown.
self._initial_logger_levels.setdefault(logger, logger_obj.level)
logger_obj.setLevel(level)
if self._initial_handler_level is None:
self._initial_handler_level = self.handler.level
self.handler.setLevel(level)
initial_disabled_logging_level = self._force_enable_logging(level, logger_obj)
if self._initial_disabled_logging_level is None:
self._initial_disabled_logging_level = initial_disabled_logging_level
@contextmanager
def at_level(self, level: int | str, logger: str | None = None) -> Generator[None]:
"""Context manager that sets the level for capturing of logs. After
the end of the 'with' statement the level is restored to its original
value.
Will enable the requested logging level if it was disabled via :func:`logging.disable`.
:param level: The level.
:param logger: The logger to update. If not given, the root logger.
"""
logger_obj = logging.getLogger(logger)
orig_level = logger_obj.level
logger_obj.setLevel(level)
handler_orig_level = self.handler.level
self.handler.setLevel(level)
original_disable_level = self._force_enable_logging(level, logger_obj)
try:
yield
finally:
logger_obj.setLevel(orig_level)
self.handler.setLevel(handler_orig_level)
logging.disable(original_disable_level)
@contextmanager
def filtering(self, filter_: logging.Filter) -> Generator[None]:
"""Context manager that temporarily adds the given filter to the caplog's
:meth:`handler` for the 'with' statement block, and removes that filter at the
end of the block.
:param filter_: A custom :class:`logging.Filter` object.
.. versionadded:: 7.5
"""
self.handler.addFilter(filter_)
try:
yield
finally:
self.handler.removeFilter(filter_)
@fixture
def caplog(request: FixtureRequest) -> Generator[LogCaptureFixture]:
"""Access and control log capturing.
Captured logs are available through the following properties/methods::
* caplog.messages -> list of format-interpolated log messages
* caplog.text -> string containing formatted log output
* caplog.records -> list of logging.LogRecord instances
* caplog.record_tuples -> list of (logger_name, level, message) tuples
* caplog.clear() -> clear captured records and formatted log output string
"""
result = LogCaptureFixture(request.node, _ispytest=True)
yield result
result._finalize()
def get_log_level_for_setting(config: Config, *setting_names: str) -> int | None:
for setting_name in setting_names:
log_level = config.getoption(setting_name)
if log_level is None:
log_level = config.getini(setting_name)
if log_level:
break
else:
return None
if isinstance(log_level, str):
log_level = log_level.upper()
try:
return int(getattr(logging, log_level, log_level))
except ValueError as e:
# Python logging does not recognise this as a logging level
raise UsageError(
f"'{log_level}' is not recognized as a logging level name for "
f"'{setting_name}'. Please consider passing the "
"logging level num instead."
) from e
# run after terminalreporter/capturemanager are configured
@hookimpl(trylast=True)
def pytest_configure(config: Config) -> None:
config.pluginmanager.register(LoggingPlugin(config), "logging-plugin")
|
LogCaptureFixture
|
python
|
plotly__plotly.py
|
plotly/graph_objs/scatterternary/marker/_gradient.py
|
{
"start": 233,
"end": 4928
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatterternary.marker"
_path_str = "scatterternary.marker.gradient"
_valid_props = {"color", "colorsrc", "type", "typesrc"}
@property
def color(self):
"""
Sets the final color of the gradient fill: the center color for
radial, the right for horizontal, or the bottom for vertical.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
@property
def type(self):
"""
Sets the type of gradient used to fill the markers
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['radial', 'horizontal', 'vertical', 'none']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["type"]
@type.setter
def type(self, val):
self["type"] = val
@property
def typesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `type`.
The 'typesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["typesrc"]
@typesrc.setter
def typesrc(self, val):
self["typesrc"] = val
@property
def _prop_descriptions(self):
return """\
color
Sets the final color of the gradient fill: the center
color for radial, the right for horizontal, or the
bottom for vertical.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
type
Sets the type of gradient used to fill the markers
typesrc
Sets the source reference on Chart Studio Cloud for
`type`.
"""
def __init__(
self, arg=None, color=None, colorsrc=None, type=None, typesrc=None, **kwargs
):
"""
Construct a new Gradient object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scatterternary
.marker.Gradient`
color
Sets the final color of the gradient fill: the center
color for radial, the right for horizontal, or the
bottom for vertical.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
type
Sets the type of gradient used to fill the markers
typesrc
Sets the source reference on Chart Studio Cloud for
`type`.
Returns
-------
Gradient
"""
super().__init__("gradient")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatterternary.marker.Gradient
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterternary.marker.Gradient`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("colorsrc", arg, colorsrc)
self._set_property("type", arg, type)
self._set_property("typesrc", arg, typesrc)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Gradient
|
python
|
openai__openai-python
|
src/openai/_module_client.py
|
{
"start": 3805,
"end": 5033
}
|
class ____(LazyProxy["Conversations"]):
@override
def __load__(self) -> Conversations:
return _load_client().conversations
chat: Chat = ChatProxy().__as_proxied__()
beta: Beta = BetaProxy().__as_proxied__()
files: Files = FilesProxy().__as_proxied__()
audio: Audio = AudioProxy().__as_proxied__()
evals: Evals = EvalsProxy().__as_proxied__()
images: Images = ImagesProxy().__as_proxied__()
models: Models = ModelsProxy().__as_proxied__()
videos: Videos = VideosProxy().__as_proxied__()
batches: Batches = BatchesProxy().__as_proxied__()
uploads: Uploads = UploadsProxy().__as_proxied__()
webhooks: Webhooks = WebhooksProxy().__as_proxied__()
realtime: Realtime = RealtimeProxy().__as_proxied__()
responses: Responses = ResponsesProxy().__as_proxied__()
embeddings: Embeddings = EmbeddingsProxy().__as_proxied__()
containers: Containers = ContainersProxy().__as_proxied__()
completions: Completions = CompletionsProxy().__as_proxied__()
moderations: Moderations = ModerationsProxy().__as_proxied__()
fine_tuning: FineTuning = FineTuningProxy().__as_proxied__()
vector_stores: VectorStores = VectorStoresProxy().__as_proxied__()
conversations: Conversations = ConversationsProxy().__as_proxied__()
|
ConversationsProxy
|
python
|
huggingface__transformers
|
src/transformers/models/chinese_clip/modeling_chinese_clip.py
|
{
"start": 2098,
"end": 4152
}
|
class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
Contrastive loss for image-text similarity.
logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
similarity scores.
logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
similarity scores.
text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
The text embeddings obtained by applying the projection layer to the pooled output of
[`ChineseCLIPTextModel`].
image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
The image embeddings obtained by applying the projection layer to the pooled output of
[`ChineseCLIPVisionModel`].
text_model_output (`BaseModelOutputWithPoolingAndCrossAttentions`):
The output of the [`ChineseCLIPTextModel`].
vision_model_output (`BaseModelOutputWithPoolingAndCrossAttentions`):
The output of the [`ChineseCLIPVisionModel`].
"""
loss: Optional[torch.FloatTensor] = None
logits_per_image: Optional[torch.FloatTensor] = None
logits_per_text: Optional[torch.FloatTensor] = None
text_embeds: Optional[torch.FloatTensor] = None
image_embeds: Optional[torch.FloatTensor] = None
text_model_output: BaseModelOutputWithPoolingAndCrossAttentions = None
vision_model_output: BaseModelOutputWithPoolingAndCrossAttentions = None
def to_tuple(self) -> tuple[Any]:
return tuple(
self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
for k in self.keys()
)
# Copied from transformers.models.align.modeling_align.AlignTextEmbeddings with Align->ChineseCLIP
|
ChineseCLIPOutput
|
python
|
pytorch__pytorch
|
test/profiler/test_profiler_tree.py
|
{
"start": 2141,
"end": 2763
}
|
class ____(torch.Tensor):
@staticmethod
def __new__(cls, elem):
t = torch.Tensor._make_subclass(cls, elem, elem.requires_grad)
t.elem = elem
return t
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
def unwrap(x):
return x.elem if isinstance(x, TorchDispatchTensor) else x
def wrap(x):
return TorchDispatchTensor(x) if isinstance(x, torch.Tensor) else x
args = tree_map(unwrap, args)
kwargs = tree_map(unwrap, kwargs or {})
return tree_map(wrap, func(*args, **kwargs))
|
TorchDispatchTensor
|
python
|
tensorflow__tensorflow
|
tensorflow/python/checkpoint/checkpoint_management_test.py
|
{
"start": 1601,
"end": 5192
}
|
class ____(test.TestCase):
@staticmethod
@contextlib.contextmanager
def tempWorkingDir(temppath):
cwd = os.getcwd()
os.chdir(temppath)
try:
yield
finally:
os.chdir(cwd)
@staticmethod
@contextlib.contextmanager
def tempDir():
tempdir = tempfile.mkdtemp()
try:
yield tempdir
finally:
shutil.rmtree(tempdir)
@test_util.run_deprecated_v1
def testNameCollision(self):
# Make sure we have a clean directory to work in.
with self.tempDir() as tempdir:
# Jump to that directory until this test is done.
with self.tempWorkingDir(tempdir):
# Save training snapshots to a relative path.
traindir = "train"
os.mkdir(traindir)
# Collides with the default name of the checkpoint state file.
filepath = os.path.join(traindir, "checkpoint")
with self.cached_session() as sess:
unused_a = variables.Variable(0.0) # So that Saver saves something.
self.evaluate(variables.global_variables_initializer())
# Should fail.
saver = saver_module.Saver(sharded=False)
with self.assertRaisesRegex(ValueError, "collides with"):
saver.save(sess, filepath)
# Succeeds: the file will be named "checkpoint-<step>".
saver.save(sess, filepath, global_step=1)
self.assertIsNotNone(
checkpoint_management.latest_checkpoint(traindir))
# Succeeds: the file will be named "checkpoint-<i>-of-<n>".
saver = saver_module.Saver(sharded=True)
saver.save(sess, filepath)
self.assertIsNotNone(
checkpoint_management.latest_checkpoint(traindir))
# Succeeds: the file will be named "checkpoint-<step>-<i>-of-<n>".
saver = saver_module.Saver(sharded=True)
saver.save(sess, filepath, global_step=1)
self.assertIsNotNone(
checkpoint_management.latest_checkpoint(traindir))
@test_util.run_deprecated_v1
def testRelativePath(self):
# Make sure we have a clean directory to work in.
with self.tempDir() as tempdir:
# Jump to that directory until this test is done.
with self.tempWorkingDir(tempdir):
# Save training snapshots to a relative path.
traindir = "train"
os.mkdir(traindir)
filename = "snapshot"
filepath = os.path.join(traindir, filename)
with self.cached_session() as sess:
# Build a simple graph.
v0 = variables.Variable(0.0)
inc = v0.assign_add(1.0)
save = saver_module.Saver({"v0": v0})
# Record a short training history.
self.evaluate(variables.global_variables_initializer())
save.save(sess, filepath, global_step=0)
self.evaluate(inc)
save.save(sess, filepath, global_step=1)
self.evaluate(inc)
save.save(sess, filepath, global_step=2)
with self.cached_session() as sess:
# Build a new graph with different initialization.
v0 = variables.Variable(-1.0)
# Create a new saver.
save = saver_module.Saver({"v0": v0})
self.evaluate(variables.global_variables_initializer())
# Get the most recent checkpoint name from the training history file.
name = checkpoint_management.latest_checkpoint(traindir)
self.assertIsNotNone(name)
# Restore "v0" from that checkpoint.
save.restore(sess, name)
self.assertEqual(self.evaluate(v0), 2.0)
|
LatestCheckpointWithRelativePaths
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/pyflakes/F821_26.py
|
{
"start": 1352,
"end": 1483
}
|
class ____(list["Tree | Leaf"]): ... # always okay
# Annotations are treated as assignments in .pyi files, but not in .py files
|
Tree2
|
python
|
zarr-developers__zarr-python
|
tests/package_with_entrypoint/__init__.py
|
{
"start": 1149,
"end": 1588
}
|
class ____(CodecPipeline):
def __init__(self, batch_size: int = 1) -> None:
pass
async def encode(
self, chunks_and_specs: Iterable[tuple[CodecInput | None, ArraySpec]]
) -> Iterable[Buffer | None]:
return [None]
async def decode(
self, chunks_and_specs: Iterable[tuple[CodecInput | None, ArraySpec]]
) -> Iterable[NDBuffer | None]:
return np.array(1)
|
TestEntrypointCodecPipeline
|
python
|
tensorflow__tensorflow
|
tensorflow/python/tpu/tpu_embedding_for_serving.py
|
{
"start": 2055,
"end": 23825
}
|
class ____(tpu_embedding_base.TPUEmbeddingBase):
"""The TPUEmbedding mid level API running on CPU for serving.
Note: This class is intended to be used for embedding tables that are trained
on TPU and to be served on CPU. Therefore the class should be only initialized
under non-TPU strategy. Otherwise an error will be raised.
You can first train your model using the TPUEmbedding class and save the
checkpoint. Then use this class to restore the checkpoint to do serving.
First train a model and save the checkpoint.
```python
model = model_fn(...)
strategy = tf.distribute.TPUStrategy(...)
with strategy.scope():
embedding = tf.tpu.experimental.embedding.TPUEmbedding(
feature_config=feature_config,
optimizer=tf.tpu.experimental.embedding.SGD(0.1))
# Your custom training code.
checkpoint = tf.train.Checkpoint(model=model, embedding=embedding)
checkpoint.save(...)
```
Then restore the checkpoint and do serving.
```python
# Restore the model on CPU.
model = model_fn(...)
embedding = tf.tpu.experimental.embedding.TPUEmbeddingForServing(
feature_config=feature_config,
optimizer=tf.tpu.experimental.embedding.SGD(0.1))
checkpoint = tf.train.Checkpoint(model=model, embedding=embedding)
checkpoint.restore(...)
result = embedding(...)
table = embedding.embedding_table
```
NOTE: This class can also be used to do embedding training on CPU. But it
requires the conversion between keras optimizer and embedding optimizers so
that the slot variables can stay consistent between them.
"""
def __init__(
self,
feature_config: Union[tpu_embedding_v2_utils.FeatureConfig, Iterable], # pylint:disable=g-bare-generic
optimizer: Optional[tpu_embedding_v2_utils._Optimizer],
experimental_sparsecore_restore_info: Optional[Dict[str, Any]] = None,
): # pylint:disable=protected-access
"""Creates the TPUEmbeddingForServing mid level API object.
```python
embedding = tf.tpu.experimental.embedding.TPUEmbeddingForServing(
feature_config=tf.tpu.experimental.embedding.FeatureConfig(
table=tf.tpu.experimental.embedding.TableConfig(
dim=...,
vocabulary_size=...)))
```
Args:
feature_config: A nested structure of
`tf.tpu.experimental.embedding.FeatureConfig` configs.
optimizer: An instance of one of `tf.tpu.experimental.embedding.SGD`,
`tf.tpu.experimental.embedding.Adagrad` or
`tf.tpu.experimental.embedding.Adam`. When not created under TPUStrategy
may be set to None to avoid the creation of the optimizer slot
variables, useful for optimizing memory consumption when exporting the
model for serving where slot variables aren't needed.
experimental_sparsecore_restore_info: Information from the sparse core
training, required to restore from checkpoint for serving (like number
of TPU devices used `num_tpu_devices`.)
Raises:
RuntimeError: If created under TPUStrategy.
"""
super(TPUEmbeddingForServing, self).__init__(feature_config, optimizer)
self._strategy = distribute_lib.get_strategy()
if isinstance(
self._strategy, (tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV2)
):
raise RuntimeError("Serving on TPU is not yet supported.")
@property
def variables(
self,
) -> Dict[
tpu_embedding_v2_utils.TableConfig, Dict[str, tf_variables.Variable]
]:
"""Returns a dict of variables, keyed by `TableConfig`, then by slot name."""
self._maybe_build()
return self._variables
@property
def embedding_tables(
self,
) -> Dict[tpu_embedding_v2_utils.TableConfig, tf_variables.Variable]:
"""Returns a dict of embedding tables, keyed by `TableConfig`."""
self._maybe_build()
# Only return the tables and not the slot variables.
return {
table: self._variables[table.name]["parameters"]
for table in self._table_config
}
def _maybe_build(self):
if not self._built:
# This can be called while tracing a function, so we wrap the
# initialization code with init_scope so it runs eagerly, this means that
# it will not be included the function graph generated by tracing so that
# we can be sure that we only initialize the TPU for embeddings exactly
# once.
with ops.init_scope():
self.build()
# TODO(silkyarora) Update the tests for all TPU embedding to expect this
# possibly empty information in checkpoints.
def _maybe_delete_sc_layouts_from_checkpoint(self):
# Remove the sparse_core_table_layouts from the checkpoint, it is only
# required for sparsecore.
if (
hasattr(
self,
tpu_embedding_v3_utils.SPARSECORE_LAYOUTS_CHECKPOINT_KEY,
)
and not self._get_sparse_core_table_layouts_str()
):
delattr(
self,
tpu_embedding_v3_utils.SPARSECORE_LAYOUTS_CHECKPOINT_KEY,
)
def build(self):
"""Create variables and slots variables for TPU embeddings."""
super().build()
self._maybe_delete_sc_layouts_from_checkpoint()
def _track_restore_info_for_cpu(self) -> None:
def getter(name, shape, dtype, initializer, trainable):
del shape
# _add_variable_with_custom_getter clears the shape sometimes, so we
# take the global shape from outside the getter.
initial_value = functools.partial(initializer, dtype=dtype)
return tf_variables.Variable(
name=name,
initial_value=initial_value,
shape=None,
dtype=dtype,
trainable=trainable,
)
def empty_string(dtype: dtypes.DType):
return tf_constant("", dtype=dtype)
# _add_variable_with_custom_getter is used here to restore from checkpoint
# at creation time. The layouts from sparse core must be restored from
# checkpoint and before any other tables are restored
setattr(
self,
tpu_embedding_v3_utils.SPARSECORE_LAYOUTS_CHECKPOINT_KEY,
self._add_variable_with_custom_getter(
name=tpu_embedding_v3_utils.SPARSECORE_LAYOUTS_CHECKPOINT_KEY,
initializer=empty_string,
dtype=dtypes.string,
getter=getter,
trainable=False,
),
)
def _get_sparse_core_table_layouts_str(self) -> bytes:
layouts_str = getattr(
self,
tpu_embedding_v3_utils.SPARSECORE_LAYOUTS_CHECKPOINT_KEY,
)
return layouts_str.read_value().numpy()
def _trackable_children(
self, save_type=trackable_base.SaveType.CHECKPOINT, **kwargs: Any
):
# Remove the trackables added to make sparsecore checkpoint restore work.
# These are not required for serializing the model.
tc = super()._trackable_children(save_type, **kwargs)
if save_type == trackable_base.SaveType.SAVEDMODEL:
if tpu_embedding_v3_utils.SPARSECORE_LAYOUTS_CHECKPOINT_KEY in tc:
tc.pop(tpu_embedding_v3_utils.SPARSECORE_LAYOUTS_CHECKPOINT_KEY, None)
sclt = [
k
for k, v in tc.items()
if isinstance(
v, tpu_embedding_v3_utils.SparseCoreStackedTableTrackable
)
]
for k in sclt:
tc.pop(k, None)
return tc
def _create_variables_from_stacked_tables(self):
sc_layouts = sparse_core_layout_pb2.SparseCoreTableLayouts()
sc_layouts.ParseFromString(self._get_sparse_core_table_layouts_str())
stacked_table_name_to_layouts = {}
for layout in sc_layouts.tables:
stacked_tables_list = stacked_table_name_to_layouts.setdefault(
layout.stacked_table_name, []
)
stacked_tables_list.append(layout)
table_to_config = {table.name: table for table in self._table_config}
variables = {}
for stacked_table_name, layouts in stacked_table_name_to_layouts.items():
logging.info(
"Loading stacked table state variables(%s) for %s tables",
stacked_table_name,
len(layouts),
)
stacked_var_trackable = (
tpu_embedding_v3_utils.SparseCoreStackedTableTrackable(
layouts, table_to_config
)
)
# The stacked table is added as trackable to the embedding so that the
# checkpoint key corresponsing to stacked table is read.
self._track_trackable(stacked_var_trackable, stacked_table_name)
variables.update(stacked_var_trackable.get_vars())
return variables
def _create_variables_and_slots(
self,
) -> Dict[str, Dict[str, tf_variables.Variable]]:
"""Create variables for TPU embeddings.
Returns:
A dict of dicts. The outer dict is keyed by the table names and the inner
dicts are keyed by 'parameters' and the slot variable names.
"""
self._track_restore_info_for_cpu()
variables = {}
# If there are stacked variables from SC checkpoint process those
# first
stacked_variables = self._create_variables_from_stacked_tables()
for table in self._table_config:
if table.name in stacked_variables:
variables[table.name] = {"parameters": stacked_variables[table.name]}
else:
variables[table.name] = self._create_variables(table, trainable=True)
return variables
def embedding_lookup(
self, features: Any, weights: Optional[Any] = None
) -> Any:
"""Apply standard lookup ops on CPU.
Args:
features: A nested structure of `tf.Tensor`s, `tf.SparseTensor`s or
`tf.RaggedTensor`s, with the same structure as `feature_config`. Inputs
will be downcast to `tf.int32`. Only one type out of `tf.SparseTensor`
or `tf.RaggedTensor` is supported per call.
weights: If not `None`, a nested structure of `tf.Tensor`s,
`tf.SparseTensor`s or `tf.RaggedTensor`s, matching the above, except
that the tensors should be of float type (and they will be downcast to
`tf.float32`). For `tf.SparseTensor`s we assume the `indices` are the
same for the parallel entries from `features` and similarly for
`tf.RaggedTensor`s we assume the row_splits are the same.
Returns:
A nested structure of Tensors with the same structure as input features.
"""
return cpu_embedding_lookup(
features, weights, self.embedding_tables, self._feature_config
)
def _ragged_embedding_lookup_with_reduce(
table: tf_variables.Variable,
ragged: ragged_tensor.RaggedTensor,
weights: ragged_tensor.RaggedTensor,
combiner: str,
) -> core.Tensor:
"""Compute a ragged lookup followed by a reduce on axis 1.
Args:
table: The embedding table.
ragged: A RaggedTensor of ids to look up.
weights: A RaggedTensor of weights (or None).
combiner: One of "mean", "sum", "sqrtn".
Returns:
A Tensor.
"""
if weights is None:
weights = array_ops.ones_like(ragged, dtype=table.dtype)
weights = array_ops.expand_dims(weights, axis=2)
ragged_result = embedding_ops.embedding_lookup(table, ragged)
ragged_result = math_ops.reduce_sum(ragged_result * weights, axis=1)
if combiner == "mean":
ragged_result = math_ops.div_no_nan(
ragged_result, math_ops.reduce_sum(weights, axis=1)
)
elif combiner == "sqrtn":
ragged_result = math_ops.div_no_nan(
ragged_result,
math_ops.sqrt(math_ops.reduce_sum(weights * weights, axis=1)),
)
return ragged_result
@tf_export("tpu.experimental.embedding.serving_embedding_lookup")
def cpu_embedding_lookup(
inputs: Any,
weights: Optional[Any],
tables: Dict[tpu_embedding_v2_utils.TableConfig, tf_variables.Variable],
feature_config: Union[
tpu_embedding_v2_utils.FeatureConfig, Iterable # pylint:disable=g-bare-generic
],
) -> Any:
"""Apply standard lookup ops with `tf.tpu.experimental.embedding` configs.
This function is a utility which allows using the
`tf.tpu.experimental.embedding` config objects with standard lookup functions.
This can be used when exporting a model which uses
`tf.tpu.experimental.embedding.TPUEmbedding` for serving on CPU. In particular
`tf.tpu.experimental.embedding.TPUEmbedding` only supports lookups on TPUs and
should not be part of your serving graph.
Note that TPU specific options (such as `max_sequence_length`) in the
configuration objects will be ignored.
In the following example we take a trained model (see the documentation for
`tf.tpu.experimental.embedding.TPUEmbedding` for the context) and create a
saved model with a serving function that will perform the embedding lookup and
pass the results to your model:
```python
model = model_fn(...)
embedding = tf.tpu.experimental.embedding.TPUEmbedding(
feature_config=feature_config,
batch_size=1024,
optimizer=tf.tpu.experimental.embedding.SGD(0.1))
checkpoint = tf.train.Checkpoint(model=model, embedding=embedding)
checkpoint.restore(...)
@tf.function(input_signature=[{'feature_one': tf.TensorSpec(...),
'feature_two': tf.TensorSpec(...),
'feature_three': tf.TensorSpec(...)}])
def serve_tensors(embedding_features):
embedded_features = tf.tpu.experimental.embedding.serving_embedding_lookup(
embedding_features, None, embedding.embedding_tables,
feature_config)
return model(embedded_features)
model.embedding_api = embedding
tf.saved_model.save(model,
export_dir=...,
signatures={'serving_default': serve_tensors})
```
NOTE: It's important to assign the embedding API object to a member of your
model as `tf.saved_model.save` only supports saving variables as one
`Trackable` object. Since the model's weights are in `model` and the
embedding table are managed by `embedding`, we assign `embedding` to an
attribute of `model` so that tf.saved_model.save can find the embedding
variables.
NOTE: The same `serve_tensors` function and `tf.saved_model.save` call will
work directly from training.
Args:
inputs: a nested structure of Tensors, SparseTensors or RaggedTensors.
weights: a nested structure of Tensors, SparseTensors or RaggedTensors or
None for no weights. If not None, structure must match that of inputs, but
entries are allowed to be None.
tables: a dict of mapping TableConfig objects to Variables.
feature_config: a nested structure of FeatureConfig objects. The keys of
feature_config is a superset of inputs.
Returns:
A nested structure of Tensors with the same structure as inputs.
"""
flat_inputs = nest.flatten_with_joined_string_paths(inputs)
flat_weights = [None] * len(flat_inputs)
if weights is not None:
nest.assert_same_structure(inputs, weights)
flat_weights = nest.flatten(weights)
flat_features = dict(nest.flatten_with_joined_string_paths(feature_config))
input_keys = {key for key, _ in flat_inputs}
if not input_keys.issubset(flat_features.keys()):
raise ValueError(
"Inputs are not a subset of feature_config. Inputs keys are {}, but"
" feature_config keys are {}".format(input_keys, flat_features.keys())
)
outputs = []
for (path, inp), weight in zip(flat_inputs, flat_weights):
feature = flat_features[path]
table = tables[feature.table]
if weight is not None:
if isinstance(inp, tensor.Tensor):
raise ValueError(
"Weight specified for {}, but input is dense.".format(path)
)
elif type(weight) is not type(inp):
raise ValueError(
"Weight for {} is of type {} but it does not match type of the "
"input which is {}.".format(path, type(weight), type(inp))
)
elif feature.max_sequence_length > 0:
raise ValueError(
"Weight specified for {}, but this is a sequence feature.".format(
path
)
)
if isinstance(inp, tensor.Tensor):
if feature.max_sequence_length > 0:
raise ValueError(
"Feature {} is a sequence feature but a dense tensor "
"was passed.".format(path)
)
outputs.append(embedding_ops.embedding_lookup_v2(table, inp))
elif isinstance(inp, sparse_tensor.SparseTensor):
outputs.append(
_embedding_lookup_for_sparse_tensor(inp, weight, table, feature)
)
elif isinstance(inp, ragged_tensor.RaggedTensor):
outputs.append(
_embedding_lookup_for_ragged_tensor(inp, weight, table, feature)
)
else:
raise ValueError(
"Input {} is type {}. Tensor, SparseTensor or "
"RaggedTensor expected.".format(path, type(inp))
)
return nest.pack_sequence_as(inputs, outputs)
def _embedding_lookup_for_sparse_tensor(
inp: sparse_tensor.SparseTensor,
weight: Optional[sparse_tensor.SparseTensor],
table: tf_variables.Variable,
feature: tpu_embedding_v2_utils.FeatureConfig,
) -> tensor.Tensor:
"""Embedding lookup for sparse tensor based on its feature config.
Args:
inp: a single SparseTensor input.
weight: None or SparseTensor which has the same shape of the input.
table: a table variable.
feature: a feature config.
Returns:
Embedding lookup result.
"""
inp_rank = inp.shape.rank
# The input rank can be None for sequence input tensor.
if (
not feature.output_shape
and feature.max_sequence_length > 0
and (inp_rank is None or inp_rank == 2)
):
batch_size = math_ops.cast(array_ops.shape(inp)[0], dtype=dtypes.int64)
sparse_shape = array_ops_stack.stack(
[batch_size, feature.max_sequence_length], axis=0
)
# TPU Embedding truncates sequences to max_sequence_length, and if we
# don't truncate, scatter_nd will error out if the index was out of
# bounds.
truncated_inp = sparse_ops.sparse_slice(
inp, start=[0, 0], size=sparse_shape
)
dense_output_shape = array_ops_stack.stack(
[batch_size, feature.max_sequence_length, feature.table.dim], axis=0
)
return array_ops.scatter_nd(
truncated_inp.indices,
array_ops.gather(table.read_value(), truncated_inp.values),
dense_output_shape,
)
else:
if feature.max_sequence_length > 0:
logging.warning(
(
"max_sequence_length setting will be ignored because the rank of"
" the input tensor is %d which is not 2."
),
inp_rank,
)
if (
not feature.validate_weights_and_indices
and inp_rank is not None
and inp_rank <= 2
):
return embedding_ops.embedding_lookup_sparse_v2(
table, inp, sp_weights=weight, combiner=feature.table.combiner
)
else:
return embedding_ops.safe_embedding_lookup_sparse_v2(
table, inp, sparse_weights=weight, combiner=feature.table.combiner
)
def _embedding_lookup_for_ragged_tensor(
inp: ragged_tensor.RaggedTensor,
weight: Optional[ragged_tensor.RaggedTensor],
table: tf_variables.Variable,
feature: tpu_embedding_v2_utils.FeatureConfig,
) -> tensor.Tensor:
"""Embedding lookup for ragged tensor based on its feature config.
Args:
inp: a single rank 2 RaggedTensor input.
weight: None or RaggedTensor which has the same shape of the input.
table: a table variable.
feature: a feature config.
Returns:
Embedding lookup result.
Raises:
ValueError: if input ragged tensor is not rank 2 or output shape set in the
feature config doesn't match with the first dim size of the input.
"""
if inp.shape.rank != 2:
raise ValueError(
"Only rank 2 ragged tensor is supported, but got rank {}".format(
inp.shape.rank
)
)
batch_size = inp.shape[0]
if feature.output_shape:
output_batch_size = math_ops.reduce_prod(feature.output_shape)
# If the output batch size matches the data batch size, treat it as
# normal ragged input.
if output_batch_size == batch_size:
ragged_output = _ragged_embedding_lookup_with_reduce(
table, inp, weight, feature.table.combiner
)
ragged_output = array_ops.reshape(
ragged_output, shape=feature.output_shape + [feature.table.dim]
)
# If the data batch size is a factor of the output batch size, the
# divide result will be the sequence length. Ignore the weights and
# combiner.
elif output_batch_size > batch_size and output_batch_size % batch_size == 0:
ragged_output = embedding_ops.embedding_lookup_v2(table, inp)
# Pad or truncate in the sequence dimension
ragged_output = ragged_output.to_tensor(
shape=[batch_size, output_batch_size // batch_size, feature.table.dim]
)
# Reshape to desire output shape.
ragged_output = array_ops.reshape(
ragged_output, feature.output_shape + [feature.table.dim]
)
else:
raise ValueError(
"Output shape set in the FeatureConfig should be the factor of "
"the input data batch size. But instead got output shape {}, "
"input data batch size {}".format(feature.output_shape, batch_size)
)
else:
if feature.max_sequence_length > 0:
output_shape = [
batch_size,
feature.max_sequence_length,
feature.table.dim,
]
ragged_lookup = embedding_ops.embedding_lookup_v2(table, inp)
# Unlike scatter_nd, RaggedTensor.to_tensor truncates to the given
# shape.
ragged_output = ragged_lookup.to_tensor(shape=output_shape)
else:
ragged_output = _ragged_embedding_lookup_with_reduce(
table, inp, weight, feature.table.combiner
)
return ragged_output
|
TPUEmbeddingForServing
|
python
|
ray-project__ray
|
python/ray/tune/utils/log.py
|
{
"start": 163,
"end": 1462
}
|
class ____(Enum):
V0_MINIMAL = 0
V1_EXPERIMENT = 1
V2_TRIAL_NORM = 2
V3_TRIAL_DETAILS = 3
def __int__(self):
return self.value
verbosity: Union[int, Verbosity] = Verbosity.V3_TRIAL_DETAILS
@DeveloperAPI
def set_verbosity(level: Union[int, Verbosity]):
global verbosity
if isinstance(level, int):
verbosity = Verbosity(level)
else:
verbosity = level
@DeveloperAPI
def has_verbosity(level: Union[int, Verbosity]) -> bool:
"""Return True if passed level exceeds global verbosity level."""
global verbosity
log_level = int(level)
verbosity_level = int(verbosity)
return verbosity_level >= log_level
@DeveloperAPI
def disable_ipython():
"""Disable output of IPython HTML objects."""
try:
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.clear_instance()
except Exception:
pass
_log_cache_count: Dict[str, Tuple[str, float]] = {}
def _dedup_logs(domain: str, value: str, repeat_after_s: int = 5) -> bool:
cur_val, ts = _log_cache_count.get(domain, (None, None))
if value == cur_val and time.monotonic() - repeat_after_s < ts:
return False
else:
_log_cache_count[domain] = value, time.monotonic()
return True
|
Verbosity
|
python
|
facebook__pyre-check
|
client/libcst_vendored_visitors/_apply_type_annotations.py
|
{
"start": 17792,
"end": 18331
}
|
class ____:
global_annotations: int = 0
attribute_annotations: int = 0
parameter_annotations: int = 0
return_annotations: int = 0
classes_added: int = 0
typevars_and_generics_added: int = 0
def any_changes_applied(self) -> bool:
return (
self.global_annotations
+ self.attribute_annotations
+ self.parameter_annotations
+ self.return_annotations
+ self.classes_added
+ self.typevars_and_generics_added
) > 0
|
AnnotationCounts
|
python
|
pennersr__django-allauth
|
allauth/account/stages.py
|
{
"start": 1576,
"end": 4317
}
|
class ____:
def __init__(self, request, login):
self.request = request
self.login = login
self.state = self.login.state.setdefault("stages", {})
@classmethod
def enter(cls, request, stage_key):
from allauth.account.internal.stagekit import unstash_login
login = unstash_login(request, peek=True)
if not login:
return None
ctrl = LoginStageController(request, login)
if ctrl.state.get("current") != stage_key:
return None
stages = ctrl.get_stages()
for stage in stages:
if stage.key == stage_key:
return stage
return None
def set_current(self, stage_key):
self.state["current"] = stage_key
def is_handled(self, stage_key):
return self.state.get(stage_key, {}).get("handled", False)
def set_handled(self, stage_key):
stage_state = self.state.setdefault(stage_key, {})
stage_state["handled"] = True
def get_pending_stage(self) -> Optional[LoginStage]:
ret = None
stages = self.get_stages()
for stage in stages:
if self.is_handled(stage.key):
continue
ret = stage
break
return ret
def get_stage(self, key: str) -> Optional[LoginStage]:
try:
return next(iter(stage for stage in self.get_stages() if stage.key == key))
except StopIteration:
return None
def get_stages(self) -> List[LoginStage]:
stages = []
adapter = get_adapter(self.request)
paths = adapter.get_login_stages()
for path in paths:
cls = import_callable(path)
stage = cls(self, self.request, self.login)
stages.append(stage)
return stages
def handle(self):
from allauth.account.internal.stagekit import clear_login, stash_login
stages = self.get_stages()
for stage in stages:
if self.is_handled(stage.key):
continue
self.set_current(stage.key)
response, cont = stage.handle()
if response:
if cont:
stash_login(self.request, self.login)
else:
clear_login(self.request)
return response
else:
if not cont:
# So, on our stages is aborting without actually giving
# a response.
logger.error("Login stage aborted, redirecting to login")
return headed_redirect_response("account_login")
self.set_handled(stage.key)
clear_login(self.request)
|
LoginStageController
|
python
|
arrow-py__arrow
|
tests/test_locales.py
|
{
"start": 51710,
"end": 55022
}
|
class ____:
def test_couple_of_timeframe(self):
# Now
assert self.locale._format_timeframe("now", 0) == "הרגע"
# Second(s)
assert self.locale._format_timeframe("second", 1) == "שנייה"
assert self.locale._format_timeframe("seconds", 2) == "2 שניות"
assert self.locale._format_timeframe("seconds", 10) == "10 שניות"
assert self.locale._format_timeframe("seconds", 11) == "11 שניות"
# Minute(s)
assert self.locale._format_timeframe("minute", 1) == "דקה"
assert self.locale._format_timeframe("minutes", 2) == "2 דקות"
assert self.locale._format_timeframe("minutes", 10) == "10 דקות"
assert self.locale._format_timeframe("minutes", 11) == "11 דקות"
# Day(s)
assert self.locale._format_timeframe("day", 1) == "יום"
assert self.locale._format_timeframe("days", 2) == "יומיים"
assert self.locale._format_timeframe("days", 3) == "3 ימים"
assert self.locale._format_timeframe("days", 80) == "80 יום"
# Hour(s)
assert self.locale._format_timeframe("hour", 1) == "שעה"
assert self.locale._format_timeframe("hours", 2) == "שעתיים"
assert self.locale._format_timeframe("hours", 3) == "3 שעות"
assert self.locale._format_timeframe("hours", 11) == "11 שעות"
# Week(s)
assert self.locale._format_timeframe("week", 1) == "שבוע"
assert self.locale._format_timeframe("weeks", 2) == "שבועיים"
assert self.locale._format_timeframe("weeks", 3) == "3 שבועות"
assert self.locale._format_timeframe("weeks", 11) == "11 שבועות"
# Month(s)
assert self.locale._format_timeframe("month", 1) == "חודש"
assert self.locale._format_timeframe("months", 2) == "חודשיים"
assert self.locale._format_timeframe("months", 4) == "4 חודשים"
assert self.locale._format_timeframe("months", 11) == "11 חודשים"
# Year(s)
assert self.locale._format_timeframe("year", 1) == "שנה"
assert self.locale._format_timeframe("years", 2) == "שנתיים"
assert self.locale._format_timeframe("years", 5) == "5 שנים"
assert self.locale._format_timeframe("years", 15) == "15 שנה"
def test_describe_multi(self):
describe = self.locale.describe_multi
fulltest = [("years", 5), ("week", 1), ("hour", 1), ("minutes", 6)]
assert describe(fulltest) == "בעוד 5 שנים, שבוע, שעה ו־6 דקות"
seconds4000_0days = [("days", 0), ("hour", 1), ("minutes", 6)]
assert describe(seconds4000_0days) == "בעוד 0 ימים, שעה ו־6 דקות"
seconds4000 = [("hour", 1), ("minutes", 6)]
assert describe(seconds4000) == "בעוד שעה ו־6 דקות"
assert describe(seconds4000, only_distance=True) == "שעה ו־6 דקות"
seconds3700 = [("hour", 1), ("minute", 1)]
assert describe(seconds3700) == "בעוד שעה ודקה"
seconds300_0hours = [("hours", 0), ("minutes", 5)]
assert describe(seconds300_0hours) == "בעוד 0 שעות ו־5 דקות"
seconds300 = [("minutes", 5)]
assert describe(seconds300) == "בעוד 5 דקות"
seconds60 = [("minute", 1)]
assert describe(seconds60) == "בעוד דקה"
assert describe(seconds60, only_distance=True) == "דקה"
@pytest.mark.usefixtures("lang_locale")
|
TestHebrewLocale
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/operators/test_stackdriver.py
|
{
"start": 8613,
"end": 9147
}
|
class ____:
@mock.patch("airflow.providers.google.cloud.operators.stackdriver.StackdriverHook")
def test_execute(self, mock_hook):
operator = StackdriverDisableNotificationChannelsOperator(task_id=TEST_TASK_ID, filter_=TEST_FILTER)
operator.execute(context=mock.MagicMock())
mock_hook.return_value.disable_notification_channels.assert_called_once_with(
project_id=None, filter_=TEST_FILTER, retry=DEFAULT, timeout=None, metadata=()
)
|
TestStackdriverDisableNotificationChannelsOperator
|
python
|
mlflow__mlflow
|
mlflow/prompt/promptlab_model.py
|
{
"start": 132,
"end": 7095
}
|
class ____:
import pandas as pd
def __init__(self, prompt_template, prompt_parameters, model_parameters, model_route):
self.prompt_parameters = prompt_parameters
self.model_parameters = model_parameters
self.model_route = model_route
self.prompt_template = prompt_template
def predict(self, inputs: pd.DataFrame) -> list[str]:
from mlflow.deployments import MlflowDeploymentClient, get_deploy_client
client = MlflowDeploymentClient(get_deploy_client())
results = []
for idx in inputs.index:
prompt_parameters_as_dict = {
param.key: inputs[param.key][idx] for param in self.prompt_parameters
}
# copy replacement logic from PromptEngineering.utils.ts for consistency
prompt = self.prompt_template
for key, value in prompt_parameters_as_dict.items():
prompt = re.sub(r"\{\{\s*" + key + r"\s*\}\}", value, prompt)
model_parameters_as_dict = {param.key: param.value for param in self.model_parameters}
query_data = self._construct_query_data(prompt)
response = client.predict(
endpoint=self.model_route, inputs={**query_data, **model_parameters_as_dict}
)
results.append(self._parse_gateway_response(response))
return results
def _construct_query_data(self, prompt):
from mlflow.deployments import MlflowDeploymentClient, get_deploy_client
client = MlflowDeploymentClient(get_deploy_client())
route_type = client.get_endpoint(self.model_route).endpoint_type
if route_type == "llm/v1/completions":
return {"prompt": prompt}
elif route_type == "llm/v1/chat":
return {"messages": [{"content": prompt, "role": "user"}]}
else:
raise MlflowException(
"Error when constructing gateway query: "
f"Unsupported route type for _PromptlabModel: {route_type}"
)
def _parse_gateway_response(self, response):
from mlflow.deployments import MlflowDeploymentClient, get_deploy_client
client = MlflowDeploymentClient(get_deploy_client())
route_type = client.get_endpoint(self.model_route).endpoint_type
if route_type == "llm/v1/completions":
return response["choices"][0]["text"]
elif route_type == "llm/v1/chat":
return response["choices"][0]["message"]["content"]
else:
raise MlflowException(
"Error when parsing gateway response: "
f"Unsupported route type for _PromptlabModel: {route_type}"
)
def _load_pyfunc(path):
from mlflow import pyfunc
from mlflow.entities.param import Param
from mlflow.utils.model_utils import (
_get_flavor_configuration,
)
pyfunc_flavor_conf = _get_flavor_configuration(model_path=path, flavor_name=pyfunc.FLAVOR_NAME)
parameters_path = os.path.join(path, pyfunc_flavor_conf["parameters_path"])
with open(parameters_path) as f:
parameters = yaml.safe_load(f)
prompt_parameters_as_params = [
Param(key=key, value=value) for key, value in parameters["prompt_parameters"].items()
]
model_parameters_as_params = [
Param(key=key, value=value) for key, value in parameters["model_parameters"].items()
]
return _PromptlabModel(
prompt_template=parameters["prompt_template"],
prompt_parameters=prompt_parameters_as_params,
model_parameters=model_parameters_as_params,
model_route=parameters["model_route"],
)
def save_model(
path,
conda_env=None,
code_paths=None,
mlflow_model=None,
signature=None,
input_example=None,
pip_requirements=None,
prompt_template=None,
prompt_parameters=None,
model_parameters=None,
model_route=None,
):
from mlflow import pyfunc
from mlflow.models import Model
from mlflow.models.model import MLMODEL_FILE_NAME, Model
from mlflow.models.utils import _save_example
from mlflow.utils.environment import (
_CONDA_ENV_FILE_NAME,
_CONSTRAINTS_FILE_NAME,
_PYTHON_ENV_FILE_NAME,
_REQUIREMENTS_FILE_NAME,
_process_conda_env,
_process_pip_requirements,
_PythonEnv,
_validate_env_arguments,
infer_pip_requirements,
)
from mlflow.utils.file_utils import write_to
from mlflow.utils.model_utils import (
_validate_and_copy_code_paths,
_validate_and_prepare_target_save_path,
)
_validate_env_arguments(conda_env, pip_requirements, None)
_validate_and_prepare_target_save_path(path)
code_dir_subpath = _validate_and_copy_code_paths(code_paths, path)
if mlflow_model is None:
mlflow_model = Model()
if signature is not None:
mlflow_model.signature = signature
if input_example is not None:
_save_example(mlflow_model, input_example, path)
parameters_sub_path = "parameters.yaml"
parameters_path = os.path.join(path, parameters_sub_path)
# dump prompt_template, prompt_parameters, model_parameters, model_route to parameters_path
parameters = {
"prompt_template": prompt_template,
"prompt_parameters": {param.key: param.value for param in prompt_parameters},
"model_parameters": {param.key: param.value for param in model_parameters},
"model_route": model_route,
}
with open(parameters_path, "w") as f:
yaml.safe_dump(parameters, stream=f, default_flow_style=False)
pyfunc.add_to_model(
mlflow_model,
loader_module="mlflow.prompt.promptlab_model",
parameters_path=parameters_sub_path,
conda_env=_CONDA_ENV_FILE_NAME,
python_env=_PYTHON_ENV_FILE_NAME,
code=code_dir_subpath,
)
mlflow_model.save(os.path.join(path, MLMODEL_FILE_NAME))
if conda_env is None:
if pip_requirements is None:
inferred_reqs = infer_pip_requirements(
path, "mlflow._promptlab", [f"mlflow[gateway]=={__version__}"]
)
default_reqs = sorted(inferred_reqs)
else:
default_reqs = None
conda_env, pip_requirements, pip_constraints = _process_pip_requirements(
default_reqs, pip_requirements, None
)
else:
conda_env, pip_requirements, pip_constraints = _process_conda_env(conda_env)
with open(os.path.join(path, _CONDA_ENV_FILE_NAME), "w") as f:
yaml.safe_dump(conda_env, stream=f, default_flow_style=False)
if pip_constraints:
write_to(os.path.join(path, _CONSTRAINTS_FILE_NAME), "\n".join(pip_constraints))
write_to(os.path.join(path, _REQUIREMENTS_FILE_NAME), "\n".join(pip_requirements))
_PythonEnv.current().to_yaml(os.path.join(path, _PYTHON_ENV_FILE_NAME))
|
_PromptlabModel
|
python
|
encode__django-rest-framework
|
tests/test_routers.py
|
{
"start": 26360,
"end": 26509
}
|
class ____(BasenameTestCase, TestCase):
def setUp(self):
self.router = SimpleRouter(trailing_slash=False)
|
TestDuplicateBasenameSimpleRouter
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/sql/traversals.py
|
{
"start": 5929,
"end": 6358
}
|
class ____(HasShallowCopy):
"""Supplies Generative behavior but making use of traversals to shallow
copy.
.. seealso::
:class:`sqlalchemy.sql.base.Generative`
"""
__slots__ = ()
def _generate(self) -> Self:
cls = self.__class__
s = cls.__new__(cls)
self._shallow_copy_to(s)
return s
def _clone(element, **kw):
return element._clone()
|
GenerativeOnTraversal
|
python
|
numpy__numpy
|
numpy/_core/tests/test_unicode.py
|
{
"start": 12671,
"end": 12830
}
|
class ____(ByteorderValues):
"""Check the byteorder in unicode (size 1009, UCS2 values)"""
ulen = 1009
ucs_value = ucs2_value
|
TestByteorder_1009_UCS2
|
python
|
FactoryBoy__factory_boy
|
tests/test_regression.py
|
{
"start": 356,
"end": 467
}
|
class ____(T.NamedTuple):
book: Book
published_on: datetime.date
countries: T.List[str]
|
PublishedBook
|
python
|
fluentpython__example-code
|
21-class-metaprog/bulkfood/model_v8.py
|
{
"start": 1023,
"end": 1309
}
|
class ____(Validated):
"""a string with at least one non-space character"""
def validate(self, instance, value):
value = value.strip()
if len(value) == 0:
raise ValueError('value cannot be empty or blank')
return value
# BEGIN MODEL_V8
|
NonBlank
|
python
|
huggingface__transformers
|
src/transformers/models/qwen3_vl/modular_qwen3_vl.py
|
{
"start": 13609,
"end": 14121
}
|
class ____(PatchEmbed):
def __init__(self, config) -> None:
super().__init__()
self.patch_size = config.patch_size
self.temporal_patch_size = config.temporal_patch_size
self.in_channels = config.in_channels
self.embed_dim = config.hidden_size
kernel_size = [self.temporal_patch_size, self.patch_size, self.patch_size]
self.proj = nn.Conv3d(self.in_channels, self.embed_dim, kernel_size=kernel_size, stride=kernel_size, bias=True)
|
Qwen3VLVisionPatchEmbed
|
python
|
jazzband__django-simple-history
|
simple_history/tests/models.py
|
{
"start": 23179,
"end": 23305
}
|
class ____(models.Model):
name = models.CharField(max_length=150)
log = HistoricalRecords(related_name="history")
|
Street
|
python
|
numpy__numpy
|
numpy/lib/tests/test_function_base.py
|
{
"start": 23432,
"end": 23943
}
|
class ____:
def test_basic(self):
a = np.array([3, 4, 5, 10, -3, -5, 6.0])
assert_equal(np.ptp(a, axis=0), 15.0)
b = np.array([[3, 6.0, 9.0],
[4, 10.0, 5.0],
[8, 3.0, 2.0]])
assert_equal(np.ptp(b, axis=0), [5.0, 7.0, 7.0])
assert_equal(np.ptp(b, axis=-1), [6.0, 6.0, 6.0])
assert_equal(np.ptp(b, axis=0, keepdims=True), [[5.0, 7.0, 7.0]])
assert_equal(np.ptp(b, axis=(0, 1), keepdims=True), [[8.0]])
|
TestPtp
|
python
|
tensorflow__tensorflow
|
tensorflow/python/tpu/tests/tpu_embedding_v2_enqueue_mode_test.py
|
{
"start": 1259,
"end": 8496
}
|
class ____(tpu_embedding_base_test.TPUEmbeddingBaseTest):
@parameterized.parameters([True, False])
def test_enqueue_with_outside_compilation(self, use_mlir):
if use_mlir:
config.enable_mlir_bridge()
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
mid_level_api.build([
TensorShape((self.batch_size, 2)),
TensorShape((self.batch_size, 2)),
TensorShape((self.batch_size, 3))
])
dataset = self._create_sparse_dataset(strategy)
dataset_iter = iter(
strategy.experimental_distribute_dataset(
dataset,
options=distribute_lib.InputOptions(
experimental_fetch_to_device=False)))
@def_function.function
def enqueue_with_outside_compilation(data):
def get_activations(features):
mid_level_api.enqueue(features, training=False)
return mid_level_api.dequeue()
return strategy.run(get_activations, args=(data,))
@def_function.function
def enqueue_without_outside_compilation(data):
def get_activations():
return mid_level_api.dequeue()
mid_level_api.enqueue(data, training=False)
return strategy.run(get_activations)
features = next(dataset_iter)
activations_oc = enqueue_with_outside_compilation(features)
activations = enqueue_without_outside_compilation(features)
# Extact per core numpy arrays.
activations_oc0 = self._get_replica_numpy(activations_oc, strategy, 0)
activations0 = self._get_replica_numpy(activations, strategy, 0)
self.assertAllClose(activations_oc0, activations0)
@parameterized.parameters(True, False)
def test_enqueue_with_outside_compilation_in_control_flow(self, use_mlir):
self.skip_if_oss()
if use_mlir:
config.enable_mlir_bridge()
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
dataset = self._create_sparse_dataset(strategy)
dataset_iter = iter(
strategy.experimental_distribute_dataset(
dataset,
options=distribute_lib.InputOptions(
experimental_fetch_to_device=False)))
# This is one way to force the enqueue in some control flow. @tf.functions
# aren't inlined in the calling tf.function. An alternative would be to
# place the enqueue in a switch_v2 or something similar.
@def_function.function
def enqueue_fn(features):
mid_level_api.enqueue(features, training=False)
@def_function.function
def enqueue_with_outside_compilation():
def get_activations(features):
enqueue_fn(features)
return mid_level_api.dequeue()
return strategy.run(get_activations, args=(next(dataset_iter),))
with self.assertRaisesRegex(
RuntimeError,
'does not match graph which contains TPUReplicateContext'):
enqueue_with_outside_compilation()
def test_enqueue_with_outside_compilation_non_direct_input(self):
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
mid_level_api.build([
TensorShape((self.batch_size, 2)),
TensorShape((self.batch_size, 2)),
TensorShape((self.batch_size, 3))
])
dataset = self._create_sparse_dataset(strategy)
dataset_iter = iter(
strategy.experimental_distribute_dataset(
dataset,
options=distribute_lib.InputOptions(
experimental_fetch_to_device=False)))
@def_function.function
def enqueue_with_outside_compilation():
def get_activations(features):
# This inserts a mul operation on the TPU to trigger the direct input
# error.
features = (features[0]*2, features[1]*2, features[2]*2)
mid_level_api.enqueue(features, training=False)
return mid_level_api.dequeue()
return strategy.run(get_activations, args=(next(dataset_iter),))
with self.assertRaisesRegex(
ValueError, 'which does not have the `_tpu_input_identity` attr'):
enqueue_with_outside_compilation()
def test_enqueue_with_outside_compilation_auto_mode(self):
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
mid_level_api.build([
TensorShape((self.batch_size, 2)),
TensorShape((self.batch_size, 2)),
TensorShape((self.batch_size, 3))
])
dataset = self._create_sparse_dataset(strategy)
dataset_iter = iter(
strategy.experimental_distribute_dataset(
dataset,
options=distribute_lib.InputOptions(
experimental_fetch_to_device=False)))
@def_function.function
def enqueue_with_no_gradient_apply(data):
def get_activations(features):
# Note the lack of setting training=False, so training defaults to true
# here even though we don't have apply gradients.
# We detect the correct mode based on which ops exist that share the
# same 'name'.
mid_level_api.enqueue(features, name='call1')
return mid_level_api.dequeue(name='call1')
return strategy.run(get_activations, args=(data,))
@def_function.function
def enqueue_with_gradient_apply(data):
def get_activations(features):
mid_level_api.enqueue(features, name='call2')
activations = mid_level_api.dequeue(name='call2')
# Apply an all ones gradient
gradients = nest.map_structure(array_ops.ones_like, activations)
mid_level_api.apply_gradients(gradients, name='call2')
return activations
return strategy.run(get_activations, args=(data,))
data = next(dataset_iter)
before_gradient_apply = enqueue_with_gradient_apply(data)
after_gradient_apply = enqueue_with_no_gradient_apply(data)
before_gradient_apply0 = self._get_replica_numpy(before_gradient_apply,
strategy, 0)
after_gradient_apply0 = self._get_replica_numpy(after_gradient_apply,
strategy, 0)
num_replicas = strategy.num_replicas_in_sync
# We are passing a gradient of 1 for all lookups, optimizer is SGD with a
# learning rate of 0.1. Feature 0 and 1 are looked up with a sum combiner
# with the following ids:
# Feature 0: [0, 0, 1], [0, 1, 1], ... repeated over num_replicas
# Feature 1: [0, 1, 1], [0, 0, 1], ... repeated over num_replicas
# i.e. Row 0 and 1 were looked up 3*num_replicas times over all cores and as
# the gradient is 1, the accumulated gradient is 3*num_replicas for each
# position in row 0 and 1 in table.
#
# See comments in test_pass_none_to_apply_gradients for the update to
# Feature 2 and its table.
# The *2 in the next tests are because those rows have 2 lookups vs
# the 1 lookup in the other row.
update = ([[0.3 * num_replicas], [0.3 * num_replicas * 2]],
[[0.3 * num_replicas * 2], [0.3 * num_replicas]],
[[0.1 * num_replicas], [0.1 / 3 * num_replicas]])
golden = tuple([before - np.array(up) for before, up in
zip(before_gradient_apply0, update)])
self.assertAllClose(golden, after_gradient_apply0)
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main()
|
TPUEmbeddingTest
|
python
|
Netflix__metaflow
|
metaflow/_vendor/click/core.py
|
{
"start": 53560,
"end": 54653
}
|
class ____(MultiCommand):
"""A command collection is a multi command that merges multiple multi
commands together into one. This is a straightforward implementation
that accepts a list of different multi commands as sources and
provides all the commands for each of them.
"""
def __init__(self, name=None, sources=None, **attrs):
MultiCommand.__init__(self, name, **attrs)
#: The list of registered multi commands.
self.sources = sources or []
def add_source(self, multi_cmd):
"""Adds a new multi command to the chain dispatcher."""
self.sources.append(multi_cmd)
def get_command(self, ctx, cmd_name):
for source in self.sources:
rv = source.get_command(ctx, cmd_name)
if rv is not None:
if self.chain:
_check_multicommand(self, cmd_name, rv)
return rv
def list_commands(self, ctx):
rv = set()
for source in self.sources:
rv.update(source.list_commands(ctx))
return sorted(rv)
|
CommandCollection
|
python
|
huggingface__transformers
|
tests/models/video_llama_3/test_video_processing_video_llama_3.py
|
{
"start": 1345,
"end": 4936
}
|
class ____:
def __init__(
self,
parent,
batch_size=5,
num_frames=8,
num_channels=3,
min_resolution=30,
max_resolution=80,
do_resize=True,
size=None,
do_center_crop=True,
crop_size=None,
do_normalize=True,
image_mean=IMAGENET_STANDARD_MEAN,
image_std=IMAGENET_STANDARD_STD,
do_convert_rgb=True,
temporal_patch_size=2,
patch_size=14,
min_pixels=20 * 20,
max_pixels=100 * 100 * 8,
merge_size=2,
):
size = size if size is not None else {"shortest_edge": 20}
crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size
self.num_frames = num_frames
self.num_channels = num_channels
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_convert_rgb = do_convert_rgb
self.temporal_patch_size = temporal_patch_size
self.patch_size = patch_size
self.min_pixels = min_pixels
self.max_pixels = max_pixels
self.merge_size = merge_size
def prepare_video_processor_dict(self):
return {
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
"temporal_patch_size": self.temporal_patch_size,
"patch_size": self.patch_size,
"min_pixels": self.min_pixels,
"max_pixels": self.max_pixels,
"merge_size": self.merge_size,
}
@require_vision
def expected_output_video_shape(self, videos, num_frames=None):
num_frames = num_frames if num_frames is not None else self.num_frames
grid_t = num_frames // self.temporal_patch_size
hidden_dim = self.num_channels * self.temporal_patch_size * self.patch_size * self.patch_size
seq_len = 0
for video in videos:
if isinstance(video[0], Image.Image):
video = np.stack([np.array(frame) for frame in video])
height, width = get_image_size(video)
resized_height, resized_width = smart_resize(
height,
width,
factor=self.patch_size * self.merge_size,
min_pixels=self.min_pixels,
max_pixels=self.max_pixels,
)
grid_h, grid_w = resized_height // self.patch_size, resized_width // self.patch_size
seq_len += grid_t * grid_h * grid_w
return [seq_len, hidden_dim]
def prepare_video_inputs(self, equal_resolution=False, return_tensors="pil"):
videos = prepare_video_inputs(
batch_size=self.batch_size,
num_frames=self.num_frames,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
return_tensors=return_tensors,
)
return videos
@require_torch
@require_vision
|
VideoLlama3VideoProcessingTester
|
python
|
django__django
|
tests/backends/postgresql/test_operations.py
|
{
"start": 355,
"end": 2836
}
|
class ____(SimpleTestCase):
def test_sql_flush(self):
self.assertEqual(
connection.ops.sql_flush(
no_style(),
[Person._meta.db_table, Tag._meta.db_table],
),
['TRUNCATE "backends_person", "backends_tag";'],
)
def test_sql_flush_allow_cascade(self):
self.assertEqual(
connection.ops.sql_flush(
no_style(),
[Person._meta.db_table, Tag._meta.db_table],
allow_cascade=True,
),
['TRUNCATE "backends_person", "backends_tag" CASCADE;'],
)
def test_sql_flush_sequences(self):
self.assertEqual(
connection.ops.sql_flush(
no_style(),
[Person._meta.db_table, Tag._meta.db_table],
reset_sequences=True,
),
['TRUNCATE "backends_person", "backends_tag" RESTART IDENTITY;'],
)
def test_sql_flush_sequences_allow_cascade(self):
self.assertEqual(
connection.ops.sql_flush(
no_style(),
[Person._meta.db_table, Tag._meta.db_table],
reset_sequences=True,
allow_cascade=True,
),
['TRUNCATE "backends_person", "backends_tag" RESTART IDENTITY CASCADE;'],
)
def test_prepare_join_on_clause_same_type(self):
author_table = Author._meta.db_table
author_id_field = Author._meta.get_field("id")
lhs_expr, rhs_expr = connection.ops.prepare_join_on_clause(
author_table,
author_id_field,
author_table,
author_id_field,
)
self.assertEqual(lhs_expr, Col(author_table, author_id_field))
self.assertEqual(rhs_expr, Col(author_table, author_id_field))
def test_prepare_join_on_clause_different_types(self):
author_table = Author._meta.db_table
author_id_field = Author._meta.get_field("id")
book_table = Book._meta.db_table
book_fk_field = Book._meta.get_field("author")
lhs_expr, rhs_expr = connection.ops.prepare_join_on_clause(
author_table,
author_id_field,
book_table,
book_fk_field,
)
self.assertEqual(lhs_expr, Col(author_table, author_id_field))
self.assertEqual(
rhs_expr, Cast(Col(book_table, book_fk_field), author_id_field)
)
|
PostgreSQLOperationsTests
|
python
|
getsentry__sentry
|
src/sentry/workflow_engine/migrations/0068_migrate_anomaly_detection_alerts.py
|
{
"start": 1512,
"end": 2046
}
|
class ____(Enum):
EMAIL = 0
PAGERDUTY = 1
SLACK = 2
MSTEAMS = 3
SENTRY_APP = 4
SENTRY_NOTIFICATION = 5 # Use personal notification platform (src/sentry/notifications)
OPSGENIE = 6
DISCORD = 7
MAX_ACTIONS = 3
ACTION_TYPE_TO_STRING = {
AlertRuleTriggerActionType.PAGERDUTY.value: "PagerDuty",
AlertRuleTriggerActionType.SLACK.value: "Slack",
AlertRuleTriggerActionType.MSTEAMS.value: "Microsoft Teams",
AlertRuleTriggerActionType.OPSGENIE.value: "Opsgenie",
}
|
AlertRuleTriggerActionType
|
python
|
pennersr__django-allauth
|
allauth/socialaccount/providers/zoho/provider.py
|
{
"start": 310,
"end": 1194
}
|
class ____(OAuth2Provider):
id = "zoho"
name = "Zoho"
account_class = ZohoAccount
oauth2_adapter_class = ZohoOAuth2Adapter
def get_default_scope(self):
return ["aaaserver.profile.READ"]
def extract_uid(self, data):
return str(data["ZUID"])
def extract_common_fields(self, data):
return dict(
email=data["Email"],
username=data["Display_Name"],
first_name=data["First_Name"],
last_name=data["Last_Name"],
)
def extract_email_addresses(self, data):
ret = []
email = data.get("Email")
if email:
ret.append(
EmailAddress(
email=email,
verified=False,
primary=True,
)
)
return ret
provider_classes = [ZohoProvider]
|
ZohoProvider
|
python
|
python__mypy
|
mypy/nodes.py
|
{
"start": 134397,
"end": 136393
}
|
class ____(TypeInfo):
__slots__ = ("msg",)
# types.py defines a single instance of this class, called types.NOT_READY.
# This instance is used as a temporary placeholder in the process of de-serialization
# of 'Instance' types. The de-serialization happens in two steps: In the first step,
# Instance.type is set to NOT_READY. In the second step (in fixup.py) it is replaced by
# an actual TypeInfo. If you see the assertion error below, then most probably something
# went wrong during the second step and an 'Instance' that raised this error was not fixed.
# Note:
# 'None' is not used as a dummy value for two reasons:
# 1. This will require around 80-100 asserts to make 'mypy --strict-optional mypy'
# pass cleanly.
# 2. If NOT_READY value is accidentally used somewhere, it will be obvious where the value
# is from, whereas a 'None' value could come from anywhere.
#
# Additionally, this serves as a more general-purpose placeholder
# for missing TypeInfos in a number of places where the excuses
# for not being Optional are a little weaker.
#
# TypeInfo defines a __bool__ method that returns False for FakeInfo
# so that it can be conveniently tested against in the same way that it
# would be if things were properly optional.
def __init__(self, msg: str) -> None:
self.msg = msg
def __getattribute__(self, attr: str) -> type:
# Handle __class__ so that isinstance still works...
if attr == "__class__":
return object.__getattribute__(self, attr) # type: ignore[no-any-return]
raise AssertionError(object.__getattribute__(self, "msg"))
VAR_NO_INFO: Final[TypeInfo] = FakeInfo("Var is lacking info")
CLASSDEF_NO_INFO: Final[TypeInfo] = FakeInfo("ClassDef is lacking info")
FUNC_NO_INFO: Final[TypeInfo] = FakeInfo("FuncBase for non-methods lack info")
MISSING_FALLBACK: Final = FakeInfo("fallback can't be filled out until semanal")
|
FakeInfo
|
python
|
pdm-project__pdm
|
src/pdm/models/session.py
|
{
"start": 1272,
"end": 4480
}
|
class ____(PyPIClient):
def __init__(self, *, sources: list[RepositoryConfig], cache_dir: Path | None = None, **kwargs: Any) -> None:
from httpx._utils import URLPattern
from unearth.fetchers.sync import LocalFSTransport
if cache_dir is None:
def cache_transport(transport: httpx.BaseTransport) -> httpx.BaseTransport:
return transport
else:
storage = hishel.FileStorage(serializer=MsgPackSerializer(), base_path=cache_dir, ttl=CACHES_TTL)
controller = hishel.Controller()
def cache_transport(transport: httpx.BaseTransport) -> httpx.BaseTransport:
return hishel.CacheTransport(transport, storage, controller)
mounts: dict[str, httpx.BaseTransport] = {"file://": LocalFSTransport()}
self._trusted_host_ports: set[tuple[str, int | None]] = set()
self._proxy_map = {
URLPattern(key): proxy for key, proxy in self._get_proxy_map(None, allow_env_proxies=True).items()
}
self._proxy_map = dict(sorted(self._proxy_map.items()))
for s in sources:
assert s.url is not None
url = httpx.URL(s.url)
if s.verify_ssl is False:
self._trusted_host_ports.add((url.host, url.port))
if s.name == "pypi":
kwargs["transport"] = self._transport_for(s)
continue
mounts[f"{url.scheme}://{url.netloc.decode('ascii')}/"] = cache_transport(self._transport_for(s))
mounts.update(kwargs.pop("mounts", None) or {})
kwargs.update(follow_redirects=True)
httpx.Client.__init__(self, mounts=mounts, **kwargs)
self.headers["User-Agent"] = self._make_user_agent()
self.event_hooks["response"].append(self.on_response)
self._transport = cache_transport(self._transport) # type: ignore[has-type]
def _transport_for(self, source: RepositoryConfig) -> httpx.BaseTransport:
if source.verify_ssl is False:
verify: str | bool | SSLContext = False
elif source.ca_certs:
verify = source.ca_certs
else:
verify = os.getenv("REQUESTS_CA_BUNDLE") or os.getenv("CURL_CA_BUNDLE") or _ssl_context or True
if source.client_cert:
cert = (source.client_cert, source.client_key)
else:
cert = None
source_url = httpx.URL(cast(str, source.url))
proxy = next((proxy for pattern, proxy in self._proxy_map.items() if pattern.matches(source_url)), None)
return _get_transport(verify=verify, cert=cert, proxy=proxy)
def _make_user_agent(self) -> str:
import platform
return f"pdm/{__version__} {platform.python_implementation()}/{platform.python_version()} {platform.system()}/{platform.release()}"
def on_response(self, response: httpx.Response) -> None:
from unearth.utils import ARCHIVE_EXTENSIONS
if response.extensions.get("from_cache"):
response.from_cache = True # type: ignore[attr-defined]
if response.url.path.endswith(ARCHIVE_EXTENSIONS):
logger.info("Using cached response for %s", response.url)
|
PDMPyPIClient
|
python
|
psf__requests
|
src/requests/auth.py
|
{
"start": 3095,
"end": 10186
}
|
class ____(AuthBase):
"""Attaches HTTP Digest Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
# Keep state in per-thread local storage
self._thread_local = threading.local()
def init_per_thread_state(self):
# Ensure state is initialized just once per-thread
if not hasattr(self._thread_local, "init"):
self._thread_local.init = True
self._thread_local.last_nonce = ""
self._thread_local.nonce_count = 0
self._thread_local.chal = {}
self._thread_local.pos = None
self._thread_local.num_401_calls = None
def build_digest_header(self, method, url):
"""
:rtype: str
"""
realm = self._thread_local.chal["realm"]
nonce = self._thread_local.chal["nonce"]
qop = self._thread_local.chal.get("qop")
algorithm = self._thread_local.chal.get("algorithm")
opaque = self._thread_local.chal.get("opaque")
hash_utf8 = None
if algorithm is None:
_algorithm = "MD5"
else:
_algorithm = algorithm.upper()
# lambdas assume digest modules are imported at the top level
if _algorithm == "MD5" or _algorithm == "MD5-SESS":
def md5_utf8(x):
if isinstance(x, str):
x = x.encode("utf-8")
return hashlib.md5(x).hexdigest()
hash_utf8 = md5_utf8
elif _algorithm == "SHA":
def sha_utf8(x):
if isinstance(x, str):
x = x.encode("utf-8")
return hashlib.sha1(x).hexdigest()
hash_utf8 = sha_utf8
elif _algorithm == "SHA-256":
def sha256_utf8(x):
if isinstance(x, str):
x = x.encode("utf-8")
return hashlib.sha256(x).hexdigest()
hash_utf8 = sha256_utf8
elif _algorithm == "SHA-512":
def sha512_utf8(x):
if isinstance(x, str):
x = x.encode("utf-8")
return hashlib.sha512(x).hexdigest()
hash_utf8 = sha512_utf8
KD = lambda s, d: hash_utf8(f"{s}:{d}") # noqa:E731
if hash_utf8 is None:
return None
# XXX not implemented yet
entdig = None
p_parsed = urlparse(url)
#: path is request-uri defined in RFC 2616 which should not be empty
path = p_parsed.path or "/"
if p_parsed.query:
path += f"?{p_parsed.query}"
A1 = f"{self.username}:{realm}:{self.password}"
A2 = f"{method}:{path}"
HA1 = hash_utf8(A1)
HA2 = hash_utf8(A2)
if nonce == self._thread_local.last_nonce:
self._thread_local.nonce_count += 1
else:
self._thread_local.nonce_count = 1
ncvalue = f"{self._thread_local.nonce_count:08x}"
s = str(self._thread_local.nonce_count).encode("utf-8")
s += nonce.encode("utf-8")
s += time.ctime().encode("utf-8")
s += os.urandom(8)
cnonce = hashlib.sha1(s).hexdigest()[:16]
if _algorithm == "MD5-SESS":
HA1 = hash_utf8(f"{HA1}:{nonce}:{cnonce}")
if not qop:
respdig = KD(HA1, f"{nonce}:{HA2}")
elif qop == "auth" or "auth" in qop.split(","):
noncebit = f"{nonce}:{ncvalue}:{cnonce}:auth:{HA2}"
respdig = KD(HA1, noncebit)
else:
# XXX handle auth-int.
return None
self._thread_local.last_nonce = nonce
# XXX should the partial digests be encoded too?
base = (
f'username="{self.username}", realm="{realm}", nonce="{nonce}", '
f'uri="{path}", response="{respdig}"'
)
if opaque:
base += f', opaque="{opaque}"'
if algorithm:
base += f', algorithm="{algorithm}"'
if entdig:
base += f', digest="{entdig}"'
if qop:
base += f', qop="auth", nc={ncvalue}, cnonce="{cnonce}"'
return f"Digest {base}"
def handle_redirect(self, r, **kwargs):
"""Reset num_401_calls counter on redirects."""
if r.is_redirect:
self._thread_local.num_401_calls = 1
def handle_401(self, r, **kwargs):
"""
Takes the given response and tries digest-auth, if needed.
:rtype: requests.Response
"""
# If response is not 4xx, do not auth
# See https://github.com/psf/requests/issues/3772
if not 400 <= r.status_code < 500:
self._thread_local.num_401_calls = 1
return r
if self._thread_local.pos is not None:
# Rewind the file position indicator of the body to where
# it was to resend the request.
r.request.body.seek(self._thread_local.pos)
s_auth = r.headers.get("www-authenticate", "")
if "digest" in s_auth.lower() and self._thread_local.num_401_calls < 2:
self._thread_local.num_401_calls += 1
pat = re.compile(r"digest ", flags=re.IGNORECASE)
self._thread_local.chal = parse_dict_header(pat.sub("", s_auth, count=1))
# Consume content and release the original connection
# to allow our new request to reuse the same one.
r.content
r.close()
prep = r.request.copy()
extract_cookies_to_jar(prep._cookies, r.request, r.raw)
prep.prepare_cookies(prep._cookies)
prep.headers["Authorization"] = self.build_digest_header(
prep.method, prep.url
)
_r = r.connection.send(prep, **kwargs)
_r.history.append(r)
_r.request = prep
return _r
self._thread_local.num_401_calls = 1
return r
def __call__(self, r):
# Initialize per-thread state, if needed
self.init_per_thread_state()
# If we have a saved nonce, skip the 401
if self._thread_local.last_nonce:
r.headers["Authorization"] = self.build_digest_header(r.method, r.url)
try:
self._thread_local.pos = r.body.tell()
except AttributeError:
# In the case of HTTPDigestAuth being reused and the body of
# the previous request was a file-like object, pos has the
# file position of the previous body. Ensure it's set to
# None.
self._thread_local.pos = None
r.register_hook("response", self.handle_401)
r.register_hook("response", self.handle_redirect)
self._thread_local.num_401_calls = 1
return r
def __eq__(self, other):
return all(
[
self.username == getattr(other, "username", None),
self.password == getattr(other, "password", None),
]
)
def __ne__(self, other):
return not self == other
|
HTTPDigestAuth
|
python
|
ray-project__ray
|
python/ray/train/v2/_internal/execution/worker_group/worker.py
|
{
"start": 1566,
"end": 2519
}
|
class ____:
hostname: str
node_id: str
node_ip: str
pid: int
accelerator_ids: Dict[str, List[Union[int, str]]]
@property
def gpu_ids(self) -> List[Union[int, str]]:
return self.accelerator_ids.get("GPU", [])
@cached_property
def _repr(self) -> str:
indent = " "
repr_lines = [
"ActorMetadata(",
f"{indent}hostname={repr(self.hostname)},",
f"{indent}node_id={repr(self.node_id)},",
f"{indent}node_ip={repr(self.node_ip)},",
f"{indent}pid={repr(self.pid)},",
]
non_empty_accelerator_ids = {k: v for k, v in self.accelerator_ids.items() if v}
if non_empty_accelerator_ids:
repr_lines.append(f"{indent}accelerator_ids={non_empty_accelerator_ids},")
repr_lines.append(")")
return "\n".join(repr_lines)
def __repr__(self) -> str:
return self._repr
@dataclass
|
ActorMetadata
|
python
|
plotly__plotly.py
|
plotly/graph_objs/choropleth/colorbar/_tickformatstop.py
|
{
"start": 233,
"end": 8529
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "choropleth.colorbar"
_path_str = "choropleth.colorbar.tickformatstop"
_valid_props = {"dtickrange", "enabled", "name", "templateitemname", "value"}
@property
def dtickrange(self):
"""
range [*min*, *max*], where "min", "max" - dtick values which
describe some zoom level, it is possible to omit "min" or "max"
value by passing "null"
The 'dtickrange' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'dtickrange[0]' property accepts values of any type
(1) The 'dtickrange[1]' property accepts values of any type
Returns
-------
list
"""
return self["dtickrange"]
@dtickrange.setter
def dtickrange(self, val):
self["dtickrange"] = val
@property
def enabled(self):
"""
Determines whether or not this stop is used. If `false`, this
stop is ignored even within its `dtickrange`.
The 'enabled' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
@property
def value(self):
"""
string - dtickformat for described zoom level, the same as
"tickformat"
The 'value' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
@property
def _prop_descriptions(self):
return """\
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
"""
def __init__(
self,
arg=None,
dtickrange=None,
enabled=None,
name=None,
templateitemname=None,
value=None,
**kwargs,
):
"""
Construct a new Tickformatstop object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.choropleth.col
orbar.Tickformatstop`
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
Returns
-------
Tickformatstop
"""
super().__init__("tickformatstops")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.choropleth.colorbar.Tickformatstop
constructor must be a dict or
an instance of :class:`plotly.graph_objs.choropleth.colorbar.Tickformatstop`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("dtickrange", arg, dtickrange)
self._set_property("enabled", arg, enabled)
self._set_property("name", arg, name)
self._set_property("templateitemname", arg, templateitemname)
self._set_property("value", arg, value)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Tickformatstop
|
python
|
PyCQA__pylint
|
tests/functional/d/dataclass/dataclass_parameter.py
|
{
"start": 179,
"end": 333
}
|
class ____:
"""Simple dataclass with a KW_ONLY parameter."""
_: dataclasses.KW_ONLY
data: str
MyDataClass(data="test")
@dataclass
|
MyDataClass
|
python
|
tensorflow__tensorflow
|
tensorflow/python/checkpoint/checkpoint.py
|
{
"start": 9599,
"end": 16368
}
|
class ____:
"""Holds the status of an object-based checkpoint load."""
def __init__(self, object_graph_proto, save_path, save_path_tensor, reader,
restore_op_cache, graph_view, options, saveables_cache):
"""Specify the checkpoint being loaded.
Args:
object_graph_proto: The TrackableObjectGraph protocol buffer associated
with this checkpoint.
save_path: A string, the path to the checkpoint, as returned by
`tf.train.latest_checkpoint`.
save_path_tensor: A string `Tensor` which contains or will be fed the save
path.
reader: A `CheckpointReader` for `save_path`. If None,
`_CheckpointRestoreCoordinator` will initialize one itself.
restore_op_cache: A dictionary shared between
`_CheckpointRestoreCoordinator`s for the same Python objects, used to
look up restore ops by name to avoid re-creating them across multiple
`restore()` calls.
graph_view: A graph_view_lib.ObjectGraphView object for the restored
objects.
options: A CheckpointOptions object.
saveables_cache: An optional cache storing previously created
SaveableObjects created for each Trackable. Maps Trackables to a
dictionary of attribute names to Trackable.
"""
self.options = options
self.object_graph_proto = object_graph_proto
self.restore_uid = ops.uid()
# Maps from proto ids to lists of attributes which were in the checkpoint
# but not loaded into any object, for error checking.
self.unused_attributes = {}
# Dictionary mapping from an id in the protocol buffer flat array to
# Trackable Python objects. This mapping may be deferred if a
# checkpoint is restored before all dependencies have been tracked. Uses
# weak references so that partial restorations don't create reference cycles
# (as objects with deferred dependencies will generally have references to
# this object).
self.object_by_proto_id = weakref.WeakValueDictionary()
self.matched_proto_ids = set()
# A set of all Python objects we've seen as dependencies, even if we didn't
# use them (for example because of inconsistent references when
# loading). Used to make status assertions fail when loading checkpoints
# that don't quite match.
self.all_python_objects = object_identity.ObjectIdentityWeakSet()
self.save_path_tensor = save_path_tensor
self.save_path_string = save_path
self.dtype_map = reader.get_variable_to_dtype_map()
self.shape_map = reader.get_variable_to_shape_map()
# A NewCheckpointReader for the most recent checkpoint, for streaming Python
# state restoration.
# When graph building, contains a list of ops to run to restore objects from
# this checkpoint.
self.restore_ops = []
self.restore_ops_by_name = restore_op_cache
self.graph_view = graph_view
self.new_restore_ops_callback = None
# A mapping from optimizer proto ids to lists of slot variables to be
# restored when the optimizer is tracked. Only includes slot variables whose
# regular variables have already been created, and only for optimizer
# objects which have not yet been created/tracked.
self.deferred_slot_restorations = {}
# A mapping from variable proto ids to lists of slot variables to be
# restored when the variable is created/tracked. These get shifted over to
# deferred_slot_restorations if the optimizer hasn't been created when that
# happens.
self.slot_restorations = collections.defaultdict(list)
# Controls whether errors are printed in __del__ if some objects did not
# match.
self.expect_partial_attr = False
if not self.options.experimental_skip_slot_variables:
for node_index, node in enumerate(self.object_graph_proto.nodes):
for slot_reference in node.slot_variables:
# `node` refers to an `Optimizer`, since only these have slot
# variables.
self.slot_restorations[
slot_reference.original_variable_node_id
].append(
base._SlotVariableRestoration( # pylint: disable=protected-access
optimizer_id=node_index,
slot_variable_id=slot_reference.slot_variable_node_id,
slot_name=slot_reference.slot_name,
)
)
self._deleter = _CheckpointRestoreCoordinatorDeleter(
self.expect_partial_attr,
self.object_graph_proto,
self.matched_proto_ids,
self.unused_attributes)
self.saveables_cache = saveables_cache
@property
def expect_partial(self):
return self.expect_partial_attr
@expect_partial.setter
def expect_partial(self, expect_partial):
self.expect_partial_attr = expect_partial
self._deleter.set_expect_partial(expect_partial)
def new_restore_ops(self, new_ops):
self.restore_ops.extend(new_ops)
if self.new_restore_ops_callback:
self.new_restore_ops_callback(new_ops) # pylint: disable=not-callable
def restore_saveables(
self,
tensor_saveables,
python_positions,
registered_savers=None,
reader=None,
):
"""Run or build restore operations for SaveableObjects.
Args:
tensor_saveables: `SaveableObject`s which correspond to Tensors.
python_positions: List of CheckpointPositions bound to `PythonState`
objects which must be restored eagerly.
registered_savers: a dict mapping saver names-> object name -> Trackable.
reader: A `CheckpointReader`. If None, a new instance will be created.
Returns:
When graph building, a list of restore operations, either cached or newly
created, to restore `tensor_saveables`.
"""
if reader is None:
reader = py_checkpoint_reader.NewCheckpointReader(self.save_path_string)
restore_ops = []
# Eagerly run restorations for Python state.
for position in python_positions:
key = position.object_proto.attributes[0].checkpoint_key
position.trackable.deserialize(reader.get_tensor(key))
# If we have new SaveableObjects, extract and cache restore ops.
if tensor_saveables or registered_savers:
flat_saveables = saveable_object_util.validate_and_slice_inputs(
tensor_saveables)
new_restore_ops = functional_saver.MultiDeviceSaver.from_saveables(
flat_saveables,
registered_savers).restore(self.save_path_tensor, self.options)
if not context.executing_eagerly():
for name, restore_op in sorted(new_restore_ops.items()):
restore_ops.append(restore_op)
assert name not in self.restore_ops_by_name
self.restore_ops_by_name[name] = restore_op
return restore_ops
|
_CheckpointRestoreCoordinator
|
python
|
automl__auto-sklearn
|
autosklearn/pipeline/components/data_preprocessing/rescaling/abstract_rescaling.py
|
{
"start": 423,
"end": 1444
}
|
class ____(object):
# Rescaling does not support fit_transform (as of 0.19.1)!
def __init__(
self, random_state: Optional[Union[int, np.random.RandomState]] = None
) -> None:
self.preprocessor: Optional[BaseEstimator] = None
def fit(
self, X: PIPELINE_DATA_DTYPE, y: Optional[PIPELINE_DATA_DTYPE] = None
) -> "AutoSklearnPreprocessingAlgorithm":
if self.preprocessor is None:
raise NotFittedError()
self.preprocessor.fit(X)
return self
def transform(self, X: PIPELINE_DATA_DTYPE) -> PIPELINE_DATA_DTYPE:
if self.preprocessor is None:
raise NotFittedError()
transformed_X = self.preprocessor.transform(X)
return transformed_X
@staticmethod
def get_hyperparameter_search_space(
feat_type: Optional[FEAT_TYPE_TYPE] = None,
dataset_properties: Optional[DATASET_PROPERTIES_TYPE] = None,
) -> ConfigurationSpace:
cs = ConfigurationSpace()
return cs
|
Rescaling
|
python
|
scrapy__scrapy
|
tests/test_request_attribute_binding.py
|
{
"start": 341,
"end": 492
}
|
class ____:
def process_response(self, request, response):
return response.replace(request=Request(OVERRIDDEN_URL))
|
ProcessResponseMiddleware
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-microsoft-sharepoint/source_microsoft_sharepoint/spec.py
|
{
"start": 2624,
"end": 5751
}
|
class ____(AbstractFileBasedSpec, BaseModel):
"""
SourceMicrosoftSharePointSpec class for Microsoft SharePoint Source Specification.
This class combines the authentication details with additional configuration for the SharePoint API.
"""
class Config:
title = "Microsoft SharePoint Source Spec"
# Union type for credentials, allowing for either OAuth or Service Key authentication
credentials: Union[OAuthCredentials, ServiceCredentials] = Field(
title="Authentication",
description="Credentials for connecting to the One Drive API",
discriminator="auth_type",
type="object",
order=0,
)
delivery_method: DeliverRecords | DeliverRawFiles = Field(
title="Delivery Method",
discriminator="delivery_type",
type="object",
order=1,
display_type="radio",
group="advanced",
default="use_records_transfer",
)
search_scope: str = Field(
title="Search Scope",
description="Specifies the location(s) to search for files. Valid options are 'ACCESSIBLE_DRIVES' for all SharePoint drives the user can access, 'SHARED_ITEMS' for shared items the user has access to, and 'ALL' to search both.",
default="ALL",
enum=["ACCESSIBLE_DRIVES", "SHARED_ITEMS", "ALL"],
order=3,
)
folder_path: str = Field(
title="Folder Path",
description="Path to a specific folder within the drives to search for files. Leave empty to search all folders of the drives. This does not apply to shared items.",
order=4,
default=".",
)
site_url: str = Field(
title="Site URL",
description="Url of SharePoint site to search for files. Leave empty to search in the main site. Use 'https://<tenant_name>.sharepoint.com/sites/' to iterate over all sites.",
order=5,
default="",
)
@classmethod
def documentation_url(cls) -> str:
"""Provides the URL to the documentation for this specific source."""
return "https://docs.airbyte.com/integrations/sources/microsoft-sharepoint"
@classmethod
def schema(cls, *args: Any, **kwargs: Any) -> Dict[str, Any]:
"""
Generates the schema mapping for configuration fields.
It also cleans up the schema by removing legacy settings and discriminators.
"""
schema = super().schema(*args, **kwargs)
# Remove legacy settings related to streams
dpath.util.delete(schema, "properties/streams/items/properties/legacy_prefix")
dpath.util.delete(schema, "properties/streams/items/properties/format/oneOf/*/properties/inference_type")
# Hide API processing option until https://github.com/airbytehq/airbyte-platform-internal/issues/10354 is fixed
processing_options = dpath.util.get(schema, "properties/streams/items/properties/format/oneOf/4/properties/processing/oneOf")
dpath.util.set(schema, "properties/streams/items/properties/format/oneOf/4/properties/processing/oneOf", processing_options[:1])
return schema
|
SourceMicrosoftSharePointSpec
|
python
|
coleifer__peewee
|
playhouse/apsw_ext.py
|
{
"start": 4813,
"end": 4861
}
|
class ____(_DateField):
db_value = nh
|
DateField
|
python
|
numba__numba
|
numba/cuda/cudadrv/driver.py
|
{
"start": 36598,
"end": 36854
}
|
class ____(int):
"""
Dummy object for _PendingDeallocs when *size* is not set.
"""
def __new__(cls, *args, **kwargs):
return super().__new__(cls, 0)
def __str__(self):
return '?'
_SizeNotSet = _SizeNotSet()
|
_SizeNotSet
|
python
|
realpython__materials
|
python-microservices-with-grpc/marketplace/recommendations_pb2_grpc.py
|
{
"start": 656,
"end": 1644
}
|
class ____(object):
"""Missing associated documentation comment in .proto file"""
def Recommend(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_RecommendationsServicer_to_server(servicer, server):
rpc_method_handlers = {
"Recommend": grpc.unary_unary_rpc_method_handler(
servicer.Recommend,
request_deserializer=recommendations__pb2.RecommendationRequest.FromString,
response_serializer=recommendations__pb2.RecommendationResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"Recommendations", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
|
RecommendationsServicer
|
python
|
ansible__ansible
|
test/units/_internal/templating/test_jinja_bits.py
|
{
"start": 20956,
"end": 22043
}
|
class ____(NotifiableAccessContextBase):
def __init__(self) -> None:
self._type_interest = frozenset(Marker._concrete_subclasses)
self._markers: list[Marker] = []
def _notify(self, o: Marker) -> None:
self._markers.append(o)
@pytest.mark.parametrize("template", (
'{{ adict["bogus"] | default("ok") }}',
'{{ adict.bogus | default("ok") }}',
))
def test_marker_access_getattr_and_getitem(template: str) -> None:
"""Ensure that getattr and getitem always access markers."""
# the absence of a JinjaCallContext should cause the access done by getattr and getitem not to trip when a marker is encountered
assert TemplateEngine(variables=dict(adict={})).template(TRUST.tag(template)) == "ok"
with ExampleMarkerAccessTracker() as tracker: # the access done by getattr and getitem should immediately trip when a marker is encountered
TemplateEngine(variables=dict(adict={})).template(TRUST.tag(template))
assert type(tracker._markers[0]) is UndefinedMarker # pylint: disable=unidiomatic-typecheck
|
ExampleMarkerAccessTracker
|
python
|
Farama-Foundation__Gymnasium
|
gymnasium/envs/phys2d/pendulum.py
|
{
"start": 1013,
"end": 7549
}
|
class ____(
FuncEnv[StateType, jax.Array, int, float, bool, RenderStateType, PendulumParams]
):
"""Pendulum but in jax and functional structure."""
max_torque: float = 2.0
observation_space = gym.spaces.Box(-np.inf, np.inf, shape=(3,), dtype=np.float32)
action_space = gym.spaces.Box(-max_torque, max_torque, shape=(1,), dtype=np.float32)
def initial(
self, rng: PRNGKeyType, params: PendulumParams = PendulumParams
) -> StateType:
"""Initial state generation."""
high = jnp.array([params.high_x, params.high_y])
return jax.random.uniform(key=rng, minval=-high, maxval=high, shape=high.shape)
def transition(
self,
state: StateType,
action: int | jax.Array,
rng: None = None,
params: PendulumParams = PendulumParams,
) -> StateType:
"""Pendulum transition."""
th, thdot = state # th := theta
u = action
g = params.g
m = params.m
l = params.l
dt = params.dt
u = jnp.clip(u, -self.max_torque, self.max_torque)[0]
newthdot = thdot + (3 * g / (2 * l) * jnp.sin(th) + 3.0 / (m * l**2) * u) * dt
newthdot = jnp.clip(newthdot, -params.max_speed, params.max_speed)
newth = th + newthdot * dt
new_state = jnp.array([newth, newthdot])
return new_state
def observation(
self, state: StateType, rng: Any, params: PendulumParams = PendulumParams
) -> jax.Array:
"""Generates an observation based on the state."""
theta, thetadot = state
return jnp.array([jnp.cos(theta), jnp.sin(theta), thetadot])
def reward(
self,
state: StateType,
action: ActType,
next_state: StateType,
rng: Any,
params: PendulumParams = PendulumParams,
) -> float:
"""Generates the reward based on the state, action and next state."""
th, thdot = state # th := theta
u = action
u = jnp.clip(u, -self.max_torque, self.max_torque)[0]
th_normalized = ((th + jnp.pi) % (2 * jnp.pi)) - jnp.pi
costs = th_normalized**2 + 0.1 * thdot**2 + 0.001 * (u**2)
return -costs
def terminal(
self, state: StateType, rng: Any, params: PendulumParams = PendulumParams
) -> bool:
"""Determines if the state is a terminal state."""
return False
def render_image(
self,
state: StateType,
render_state: RenderStateType,
params: PendulumParams = PendulumParams,
) -> tuple[RenderStateType, np.ndarray]:
"""Renders an RGB image."""
try:
import pygame
from pygame import gfxdraw
except ImportError as e:
raise DependencyNotInstalled(
'pygame is not installed, run `pip install "gymnasium[classic_control]"`'
) from e
screen, clock, last_u = render_state
surf = pygame.Surface((params.screen_dim, params.screen_dim))
surf.fill((255, 255, 255))
bound = 2.2
scale = params.screen_dim / (bound * 2)
offset = params.screen_dim // 2
rod_length = 1 * scale
rod_width = 0.2 * scale
l, r, t, b = 0, rod_length, rod_width / 2, -rod_width / 2
coords = [(l, b), (l, t), (r, t), (r, b)]
transformed_coords = []
for c in coords:
c = pygame.math.Vector2(c).rotate_rad(state[0] + np.pi / 2)
c = (c[0] + offset, c[1] + offset)
transformed_coords.append(c)
gfxdraw.aapolygon(surf, transformed_coords, (204, 77, 77))
gfxdraw.filled_polygon(surf, transformed_coords, (204, 77, 77))
gfxdraw.aacircle(surf, offset, offset, int(rod_width / 2), (204, 77, 77))
gfxdraw.filled_circle(surf, offset, offset, int(rod_width / 2), (204, 77, 77))
rod_end = (rod_length, 0)
rod_end = pygame.math.Vector2(rod_end).rotate_rad(state[0] + np.pi / 2)
rod_end = (int(rod_end[0] + offset), int(rod_end[1] + offset))
gfxdraw.aacircle(
surf, rod_end[0], rod_end[1], int(rod_width / 2), (204, 77, 77)
)
gfxdraw.filled_circle(
surf, rod_end[0], rod_end[1], int(rod_width / 2), (204, 77, 77)
)
fname = path.join(path.dirname(__file__), "assets/clockwise.png")
img = pygame.image.load(fname)
if last_u is not None:
scale_img = pygame.transform.smoothscale(
img,
(scale * np.abs(last_u) / 2, scale * np.abs(last_u) / 2),
)
is_flip = bool(last_u > 0)
scale_img = pygame.transform.flip(scale_img, is_flip, True)
surf.blit(
scale_img,
(
offset - scale_img.get_rect().centerx,
offset - scale_img.get_rect().centery,
),
)
gfxdraw.aacircle(surf, offset, offset, int(0.05 * scale), (0, 0, 0))
gfxdraw.filled_circle(surf, offset, offset, int(0.05 * scale), (0, 0, 0))
surf = pygame.transform.flip(surf, False, True)
screen.blit(surf, (0, 0))
return (screen, clock, last_u), np.transpose(
np.array(pygame.surfarray.pixels3d(screen)), axes=(1, 0, 2)
)
def render_init(
self,
screen_width: int = 600,
screen_height: int = 400,
params: PendulumParams = PendulumParams,
) -> RenderStateType:
"""Initialises the render state."""
try:
import pygame
except ImportError as e:
raise DependencyNotInstalled(
'pygame is not installed, run `pip install "gymnasium[classic_control]"`'
) from e
pygame.init()
screen = pygame.Surface((screen_width, screen_height))
clock = pygame.time.Clock()
return screen, clock, None
def render_close(
self,
render_state: RenderStateType,
params: PendulumParams = PendulumParams,
):
"""Closes the render state."""
try:
import pygame
except ImportError as e:
raise DependencyNotInstalled(
'pygame is not installed, run `pip install "gymnasium[classic_control]"`'
) from e
pygame.display.quit()
pygame.quit()
def get_default_params(self, **kwargs) -> PendulumParams:
"""Returns the default parameters for the environment."""
return PendulumParams(**kwargs)
|
PendulumFunctional
|
python
|
pytorch__pytorch
|
test/export/test_export.py
|
{
"start": 611297,
"end": 640391
}
|
class ____(torch.nn.Module):
def forward(self, x: "f32[2, 4]", y: "f32[4]"):
add: "f32[2, 4]" = torch.ops.aten.add.Tensor(x, y); x = None
hints_wrapper_body_graph_0 = self.hints_wrapper_body_graph_0
hints_wrapper = torch.ops.higher_order.hints_wrapper(hints_wrapper_body_graph_0, (add, y), {}, hints = {'outer_body': True}); hints_wrapper_body_graph_0 = add = y = None
getitem: "f32[2, 4]" = hints_wrapper[0]; hints_wrapper = None
return (getitem,)
class hints_wrapper_body_graph_0(torch.nn.Module):
def forward(self, arg0_1: "f32[2, 4]", arg1_1: "f32[4]"):
hints_wrapper_body_graph_0 = self.hints_wrapper_body_graph_0
hints_wrapper = torch.ops.higher_order.hints_wrapper(hints_wrapper_body_graph_0, (arg0_1, arg1_1), {}, hints = {'inner_body': True}); hints_wrapper_body_graph_0 = arg0_1 = arg1_1 = None
getitem: "f32[2, 4]" = hints_wrapper[0]; hints_wrapper = None
abs_1: "f32[2, 4]" = torch.ops.aten.abs.default(getitem); getitem = None
return (abs_1,)
class hints_wrapper_body_graph_0(torch.nn.Module):
def forward(self, arg0_1: "f32[2, 4]", arg1_1: "f32[4]"):
relu: "f32[2, 4]" = torch.ops.aten.relu.default(arg0_1); arg0_1 = None
add: "f32[2, 4]" = torch.ops.aten.add.Tensor(relu, arg1_1); relu = arg1_1 = None
return (add,)
""",
ignore_empty_lines=True,
)
@testing.expectedFailureStrict # test_hop doesn't have a dynamo implementation
@testing.expectedFailureStrictV2 # test_hop doesn't have a dynamo implementation
@testing.expectedFailureRetraceability # test_hop doesn't have a dynamo implementation
@testing.expectedFailureTrainingIRToRunDecomp # test_hop doesn't have a dynamo implementation
@testing.expectedFailureSerDerNonStrict # TODO: serde torch.FunctionSchema is not implemented yet
@testing.expectedFailureSerDer # TODO: serde torch.FunctionSchema is not implemented yet
def test_export_function_schema(self):
import torch.utils._pytree as pytree
from torch._higher_order_ops.utils import (
_maybe_run_with_interpreter,
autograd_not_implemented,
reenter_make_fx,
unique_graph_id,
)
from torch._ops import HigherOrderOperator
from torch._subclasses.fake_tensor import FakeTensorMode
from torch.fx.experimental.proxy_tensor import (
ProxyTorchDispatchMode,
track_tensor_tree,
)
pytree.register_constant(torch.FunctionSchema)
class TestFunctionSchemaHop(HigherOrderOperator):
def __init__(self):
super().__init__("test_function_schema")
def __call__(
self,
fn,
x: torch.Tensor,
schema: Union[torch.FunctionSchema, pytree.TreeSpec],
):
if isinstance(schema, torch.FunctionSchema):
_, schema = pytree.tree_flatten(schema)
return super().__call__(fn, x, schema)
def trace_hop(proxy_mode, fn, x, schema):
sub_gm = reenter_make_fx(fn)(x)
i, gm_name = unique_graph_id(proxy_mode, prefix="_sub_gm")
proxy_mode.tracer.root.register_module(gm_name, sub_gm)
out_proxy = proxy_mode.tracer.create_proxy(
"call_function",
test_hop,
tuple(
proxy_mode.tracer.unwrap_proxy(arg) for arg in (sub_gm, x, schema)
),
{},
)
example_out = test_hop(sub_gm, x, schema)
return track_tensor_tree(
example_out, out_proxy, constant=None, tracer=proxy_mode.tracer
)
def dense_hop(fn, x, schema):
assert isinstance(schema, pytree.TreeSpec)
schema = pytree.tree_unflatten([], schema)
assert (
isinstance(schema, torch.FunctionSchema)
and schema == torch.ops.aten.sin.default._schema
)
return fn(x)
def fake_hop(mode, fn, x, schema):
with mode:
return dense_hop(fn, x, schema)
def func_hop(ctx, fn, x, schema):
unwrapped_x = ctx.unwrap_tensors(x)
functional_fn = ctx.functionalize(_maybe_run_with_interpreter(fn))
return ctx.wrap_tensors(test_hop(functional_fn, unwrapped_x, schema))
test_hop = TestFunctionSchemaHop()
test_hop.py_impl(ProxyTorchDispatchMode)(trace_hop)
test_hop.py_impl(torch._C.DispatchKey.CompositeExplicitAutograd)(dense_hop)
test_hop.py_impl(FakeTensorMode)(fake_hop)
test_hop.py_autograd_impl(
autograd_not_implemented(test_hop, deferred_error=True)
)
test_hop.py_functionalize_impl(func_hop)
class Model(torch.nn.Module):
def forward(self, x):
def fn(x):
return x.sin()
return test_hop(fn, x, torch.ops.aten.sin.default._schema)
mod = Model()
x = torch.randn(3, 4)
ep = export(mod, (x,))
self.assertEqual(x.sin(), ep.module()(x))
pytree._deregister_pytree_node(torch.FunctionSchema)
@unittest.skipIf(not torch.cuda.is_available(), "Test requires CUDA.")
def test_exception(self):
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.embedding = torch.nn.Embedding(num_embeddings=10, embedding_dim=8)
self.register_buffer("buffer", torch.ones(4, 4))
self.register_buffer("param", torch.ones(4, 4))
def forward(self, x):
token_ids = torch.randint(0, 10, (4,), device=x.device)
embedded = self.embedding(token_ids).sum()
return self.buffer.sum() + self.param.sum() + x.sum() + embedded
class BarModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.mod = Model()
def forward(self, x):
if "cuda" in str(x.device):
mod = self.mod.to(x.device)
return mod(x)
else:
return x.sum()
class BarBar(torch.nn.Module):
def __init__(self):
super().__init__()
self.mod = BarModel()
def forward(self, x):
with torch.amp.autocast(device_type="cuda"):
y = self.mod(x)
return y
with torch.no_grad():
with self.assertRaisesRegex(RuntimeError, "Couldn't swap Embedding.weight"):
_ = torch.export.export(
BarBar(),
(),
{"x": torch.randn(4, 4, 4, device="cuda")},
strict=False,
).module()
def test_export_for_training_with_state_dict_hooks(self):
def _state_dict_pre_hook(mod, prefix, keep_vars):
mod._buffers["test"] = torch.Tensor([1])
def _state_dict_hook(mod, state_dict, prefix, *args, **kwargs):
keys = list(state_dict.keys())
for key in keys:
local_key = key[len(prefix) :]
if local_key.startswith("layer"):
new_key = prefix + local_key.replace("layer.", "")
state_dict[new_key] = state_dict[key]
if new_key != key:
del state_dict[key]
class Layer(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear1 = torch.nn.Linear(2, 2)
self.linear2 = torch.nn.Linear(2, 2)
def forward(self, x):
x = self.linear1(x)
x = torch.relu(x)
x = self.linear2(x)
return x
class CustomModule(torch.nn.Module):
def __init__(self):
super().__init__()
self._register_state_dict_hook(_state_dict_hook)
self.register_state_dict_pre_hook(_state_dict_pre_hook)
# non-persistent buffer in named_buffers()
self.foo = torch.nn.Buffer(torch.rand(2, 3), persistent=False)
# non-persistent buffer not in named_buffers()
self.register_buffer("buf", None, persistent=False)
self.layer = Layer()
def forward(self, x):
x = self.layer(x)
return x
M = CustomModule()
inp = (torch.randn(2, 2),)
ep = export(M, inp)
export_res = ep.module()(*inp)
ref_res = M(*inp)
self.assertEqual(export_res, ref_res)
# we want to store the unprocessed keys
self.assertTrue(
{
"layer.linear1.weight",
"layer.linear1.bias",
"layer.linear2.weight",
"layer.linear2.bias",
}.issubset({spec.target for spec in ep.graph_signature.input_specs})
)
unflattened = torch.export.unflatten(ep)
export_res = unflattened(*inp)
self.assertEqual(export_res, ref_res)
with torch._export.utils._disable_load_state_dict_hooks(M):
state_dict = M.state_dict()
self.assertEqual(
{
"layer.linear1.weight",
"layer.linear1.bias",
"layer.linear2.weight",
"layer.linear2.bias",
},
state_dict.keys(),
)
state_dict = M.state_dict()
self.assertEqual(
{
"linear1.weight",
"linear1.bias",
"linear2.weight",
"linear2.bias",
"test",
},
state_dict.keys(),
)
@testing.expectedFailureSerDer # T202237665
@testing.expectedFailureSerDerNonStrict
def test_dynamic_sym_round(self):
class ModuleWithSymRound(torch.nn.Module):
def forward(self, x):
out_size = round(x.shape[0] / 2.0)
return x[:out_size]
dim_min = 5
dim_max = 10
dynamic_shapes = {"x": {0: Dim("n", min=dim_min, max=dim_max)}}
module = ModuleWithSymRound()
inp = (torch.randn(8),)
ep = export(module, inp, dynamic_shapes=dynamic_shapes)
# Expect builtin round in the export graph
round_nodes = [
n for n in ep.graph.nodes if n.op == "call_function" and n.target is round
]
self.assertEqual(len(round_nodes), 1)
# Check pre/post-export equality
for i in range(dim_min, dim_max + 1):
dyn_inp = (torch.randn(i),)
export_res = ep.module()(*dyn_inp)
ref_res = module(*dyn_inp)
self.assertEqual(export_res, ref_res)
@testing.expectedFailureSerDer
@testing.expectedFailureSerDerNonStrict
def test_dynamic_lr_shift(self):
class Module(torch.nn.Module):
def forward(self, x):
rshift = x.shape[0] >> 1
lshift = x.shape[0] << 1
return x[:rshift], x[:lshift]
dynamic_shapes = {"x": {0: Dim("N", min=5, max=10)}}
inp = (torch.randn(8),)
ep = export(Module(), inp, dynamic_shapes=dynamic_shapes)
for op in (operator.lshift, operator.rshift):
shift_op = [
n for n in ep.graph.nodes if n.op == "call_function" and n.target == op
]
self.assertEqual(len(shift_op), 1)
def test_export_rnn_variants_with_warning(self):
"""
Test that when exporting RNN, LSTM, and GRU models in non-strict mode, it:
1. Produces expected warnings about tensor attributes being assigned during export
2. Does not leak fake tensors in the model's flat weights
3. Does not produce extra tensor constants in the graph signature
"""
rnn_types = [
(torch.nn.RNN, "RNN"),
(torch.nn.LSTM, "LSTM"),
(torch.nn.GRU, "GRU"),
]
for rnn_class, rnn_name in rnn_types:
with self.subTest(rnn_type=rnn_name):
m = rnn_class(
input_size=2, hidden_size=4, num_layers=1, batch_first=True
)
sample_inputs = (torch.randn(1, 2, 2),)
eager_out = m(*sample_inputs)
# Verify that export produces the expected warning about tensor attributes
with self.assertWarnsRegex(
UserWarning,
r"The tensor attributes self\._flat_weights\[0\], self\._flat_weights\[1\], "
r"self\._flat_weights\[2\], self\._flat_weights\[3\] were assigned during export.*",
):
ep = torch.export.export(m, sample_inputs, strict=False)
ep_out = ep.module()(*sample_inputs)
self.assertEqual(eager_out, ep_out)
# Verify no fake tensor leakage: flat weights should be real tensors
for flat_weight in m._flat_weights:
self.assertTrue(
not isinstance(
flat_weight, torch._subclasses.fake_tensor.FakeTensor
)
)
# Verify no tensor constants in graph signature
self.assertEqual(len(ep.graph_signature.lifted_tensor_constants), 0)
@contextmanager
def distributed_env(self, world_size):
try:
torch.distributed.init_process_group(
backend="fake",
world_size=world_size,
rank=0,
)
yield
finally:
torch.distributed.destroy_process_group()
@xfailIfDistributedNotSupported
def test_distributed_all_reduce(self):
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(4, 3)
def forward(self, x):
y = self.linear(x).abs().clamp(max=1.0) * 2
torch.distributed.all_reduce(y)
return y
with self.distributed_env(world_size=2):
m = Foo()
ep = export(m, (torch.randn(4, 4),))
inp = (torch.randn(4, 4),)
self.assertTrue(torch.allclose(ep.module()(*inp), m(*inp)))
@xfailIfDistributedNotSupported
def test_distributed_all_gather(self):
class Foo(torch.nn.Module):
def forward(self, x):
ys = [torch.empty_like(x) for _ in range(2)]
torch.distributed.all_gather(ys, x)
return ys
with self.distributed_env(world_size=2):
m = Foo()
ep = export(m, (torch.randn(2),))
inp = (torch.randn(2),)
self.assertTrue(
torch.allclose(a, b) for a, b in zip(ep.module()(*inp), m(*inp))
)
@xfailIfDistributedNotSupported
def test_distributed_all_gather_into_tensor(self):
class Foo(torch.nn.Module):
def forward(self, x):
y = torch.empty(2 * 2)
torch.distributed.all_gather_into_tensor(y, x)
return y
with self.distributed_env(world_size=2):
m = Foo()
ep = export(m, (torch.randn(2),))
inp = (torch.randn(2),)
self.assertTrue(torch.allclose(ep.module()(*inp), m(*inp)))
@xfailIfDistributedNotSupported
@testing.expectedFailureCppRuntime
def test_distributed_all_to_all_single(self):
class Foo(torch.nn.Module):
def forward(self, x):
y = torch.empty(4)
torch.distributed.all_to_all_single(y, x)
return y
with self.distributed_env(world_size=4):
m = Foo()
ep = export(m, (torch.randn(4),))
nodes = ep.graph.find_nodes(
op="call_function",
target=torch.ops._c10d_functional.all_to_all_single.default,
)
self.assertEqual(len(nodes), 1)
@xfailIfDistributedNotSupported
@testing.expectedFailureCppRuntime
def test_distributed_reduce_scatter_tensor(self):
class Foo(torch.nn.Module):
def forward(self, x):
y = torch.empty(2)
torch.distributed.reduce_scatter_tensor(y, x)
return y
with self.distributed_env(world_size=2):
m = Foo()
ep = export(m, (torch.randn(2 * 2),))
nodes = ep.graph.find_nodes(
op="call_function",
target=torch.ops._c10d_functional.reduce_scatter_tensor.default,
)
self.assertEqual(len(nodes), 1)
def test_default_decomposition_core_cia_ops(self):
"""
Verify that core ATen ops with Composite Implicit Autograd dispatch are not
decomposed by default.
"""
# TODO Add avg_pool1d, and adaptive_avg_pool1d when ready.
# See issue #116684.
core_cia_ops = {
"torch.ops.aten.upsample_bilinear2d.vec": (
torch.ops.aten.upsample_bilinear2d.vec,
{
"align_corners": False,
"scale_factors": [2, 2],
"output_size": None,
},
),
"torch.ops.aten.upsample_nearest2d.vec": (
torch.ops.aten.upsample_nearest2d.vec,
{
"scale_factors": [2, 2],
"output_size": None,
},
),
}
for op_name, (op, kwargs) in core_cia_ops.items():
class M(torch.nn.Module):
def forward(self, x):
return op(x, **kwargs)
ep = export(M(), (torch.randn(2, 3, 4, 5),))
FileCheck().check_count(op_name, 1, exactly=True).run(ep.graph_module.code)
decomp_table = default_decompositions()
ep = ep.run_decompositions(
decomp_table=decomp_table,
)
FileCheck().check_count(op_name, 1, exactly=True).run(ep.graph_module.code)
def test_wrapper_module(self):
def f(x):
return torch.abs(x)
from torch.export import _wrapper_utils
model = _wrapper_utils._WrapperModule(f)
ep = export(
model,
(
torch.randn(
8,
),
),
)
self.assertExpectedInline(
str(ep.graph_module.code).strip(),
"""\
def forward(self, args_0):
abs_1 = torch.ops.aten.abs.default(args_0); args_0 = None
return (abs_1,)""",
)
@testing.expectedFailureStrictV2
def test_sdpa_gqa(self):
from torch.nn.attention import sdpa_kernel, SDPBackend
class Foo(torch.nn.Module):
def forward(self, q, k, v):
return F.scaled_dot_product_attention(q, k, v, enable_gqa=True)
q = torch.randn(1, 32, 256, 128)
k = torch.randn(1, 8, 256, 128)
v = torch.randn(1, 8, 256, 128)
with sdpa_kernel(SDPBackend.MATH):
ep_math = export(Foo(), (q, k, v))
ep_math = ep_math.run_decompositions()
self.assertExpectedInline(
ep_math.graph_module.code.strip(),
"""\
def forward(self, q, k, v):
mul = torch.ops.aten.mul.Scalar(q, 0.29730177875068026); q = None
unsqueeze = torch.ops.aten.unsqueeze.default(k, 2); k = None
expand = torch.ops.aten.expand.default(unsqueeze, [1, 8, 4, 256, 128]); unsqueeze = None
clone = torch.ops.aten.clone.default(expand, memory_format = torch.contiguous_format); expand = None
view = torch.ops.aten.view.default(clone, [1, 32, 256, 128]); clone = None
unsqueeze_1 = torch.ops.aten.unsqueeze.default(v, 2); v = None
expand_1 = torch.ops.aten.expand.default(unsqueeze_1, [1, 8, 4, 256, 128]); unsqueeze_1 = None
clone_1 = torch.ops.aten.clone.default(expand_1, memory_format = torch.contiguous_format); expand_1 = None
view_1 = torch.ops.aten.view.default(clone_1, [1, 32, 256, 128]); clone_1 = None
permute = torch.ops.aten.permute.default(view, [0, 1, 3, 2]); view = None
mul_1 = torch.ops.aten.mul.Scalar(permute, 0.29730177875068026); permute = None
expand_2 = torch.ops.aten.expand.default(mul, [1, 32, 256, 128]); mul = None
view_2 = torch.ops.aten.view.default(expand_2, [32, 256, 128]); expand_2 = None
expand_3 = torch.ops.aten.expand.default(mul_1, [1, 32, 128, 256]); mul_1 = None
view_3 = torch.ops.aten.view.default(expand_3, [32, 128, 256]); expand_3 = None
bmm = torch.ops.aten.bmm.default(view_2, view_3); view_2 = view_3 = None
view_4 = torch.ops.aten.view.default(bmm, [1, 32, 256, 256]); bmm = None
_softmax = torch.ops.aten._softmax.default(view_4, -1, False)
eq = torch.ops.aten.eq.Scalar(view_4, -inf); view_4 = None
logical_not = torch.ops.aten.logical_not.default(eq); eq = None
any_1 = torch.ops.aten.any.dim(logical_not, -1, True); logical_not = None
logical_not_1 = torch.ops.aten.logical_not.default(any_1); any_1 = None
full_like = torch.ops.aten.full_like.default(_softmax, 0, pin_memory = False, memory_format = torch.preserve_format)
where = torch.ops.aten.where.self(logical_not_1, full_like, _softmax); logical_not_1 = full_like = _softmax = None
expand_4 = torch.ops.aten.expand.default(where, [1, 32, 256, 256]); where = None
view_5 = torch.ops.aten.view.default(expand_4, [32, 256, 256]); expand_4 = None
expand_5 = torch.ops.aten.expand.default(view_1, [1, 32, 256, 128]); view_1 = None
view_6 = torch.ops.aten.view.default(expand_5, [32, 256, 128]); expand_5 = None
bmm_1 = torch.ops.aten.bmm.default(view_5, view_6); view_5 = view_6 = None
view_7 = torch.ops.aten.view.default(bmm_1, [1, 32, 256, 128]); bmm_1 = None
return (view_7,)""",
)
with sdpa_kernel(SDPBackend.FLASH_ATTENTION):
ep_flash = export(Foo(), (q, k, v))
ep_flash = ep_flash.run_decompositions()
self.assertExpectedInline(
ep_flash.graph_module.code.strip(),
"""\
def forward(self, q, k, v):
mul = torch.ops.aten.mul.Scalar(q, 0.29730177875068026); q = None
unsqueeze = torch.ops.aten.unsqueeze.default(k, 2); k = None
expand = torch.ops.aten.expand.default(unsqueeze, [1, 8, 4, 256, 128]); unsqueeze = None
clone = torch.ops.aten.clone.default(expand, memory_format = torch.contiguous_format); expand = None
view = torch.ops.aten.view.default(clone, [1, 32, 256, 128]); clone = None
unsqueeze_1 = torch.ops.aten.unsqueeze.default(v, 2); v = None
expand_1 = torch.ops.aten.expand.default(unsqueeze_1, [1, 8, 4, 256, 128]); unsqueeze_1 = None
clone_1 = torch.ops.aten.clone.default(expand_1, memory_format = torch.contiguous_format); expand_1 = None
view_1 = torch.ops.aten.view.default(clone_1, [1, 32, 256, 128]); clone_1 = None
permute = torch.ops.aten.permute.default(view, [0, 1, 3, 2]); view = None
mul_1 = torch.ops.aten.mul.Scalar(permute, 0.29730177875068026); permute = None
expand_2 = torch.ops.aten.expand.default(mul, [1, 32, 256, 128]); mul = None
view_2 = torch.ops.aten.view.default(expand_2, [32, 256, 128]); expand_2 = None
expand_3 = torch.ops.aten.expand.default(mul_1, [1, 32, 128, 256]); mul_1 = None
view_3 = torch.ops.aten.view.default(expand_3, [32, 128, 256]); expand_3 = None
bmm = torch.ops.aten.bmm.default(view_2, view_3); view_2 = view_3 = None
view_4 = torch.ops.aten.view.default(bmm, [1, 32, 256, 256]); bmm = None
_softmax = torch.ops.aten._softmax.default(view_4, -1, False)
eq = torch.ops.aten.eq.Scalar(view_4, -inf); view_4 = None
logical_not = torch.ops.aten.logical_not.default(eq); eq = None
any_1 = torch.ops.aten.any.dim(logical_not, -1, True); logical_not = None
logical_not_1 = torch.ops.aten.logical_not.default(any_1); any_1 = None
full_like = torch.ops.aten.full_like.default(_softmax, 0, pin_memory = False, memory_format = torch.preserve_format)
where = torch.ops.aten.where.self(logical_not_1, full_like, _softmax); logical_not_1 = full_like = _softmax = None
expand_4 = torch.ops.aten.expand.default(where, [1, 32, 256, 256]); where = None
view_5 = torch.ops.aten.view.default(expand_4, [32, 256, 256]); expand_4 = None
expand_5 = torch.ops.aten.expand.default(view_1, [1, 32, 256, 128]); view_1 = None
view_6 = torch.ops.aten.view.default(expand_5, [32, 256, 128]); expand_5 = None
bmm_1 = torch.ops.aten.bmm.default(view_5, view_6); view_5 = view_6 = None
view_7 = torch.ops.aten.view.default(bmm_1, [1, 32, 256, 128]); bmm_1 = None
permute_1 = torch.ops.aten.permute.default(view_7, [2, 0, 1, 3]); view_7 = None
clone_2 = torch.ops.aten.clone.default(permute_1, memory_format = torch.contiguous_format); permute_1 = None
permute_2 = torch.ops.aten.permute.default(clone_2, [1, 2, 0, 3]); clone_2 = None
return (permute_2,)""",
)
# test backend check for invalid inputs
error_type = (
RuntimeError
if is_non_strict_test(self._testMethodName)
else torch._dynamo.exc.TorchRuntimeError
)
with self.assertRaisesRegex(
error_type,
r"Number of heads in key and value must divide the number of heads",
):
export(Foo(), (torch.randn(1, 33, 256, 128), k, v))
def test_namedtuple_input_export(self):
# test for NamedTuple inputs with both strict and non-strict export modes
from collections import namedtuple
PointNT = namedtuple("PointNT", ["x", "y"])
class M(torch.nn.Module):
def forward(self, x, y):
return x + y
inp = PointNT(torch.ones(3), torch.ones(3))
ep_non_strict = export(M(), inp)
result_non_strict = ep_non_strict.module()(*inp)
ep_strict = export(M(), inp, strict=True)
result_strict = ep_strict.module()(*inp)
self.assertEqual(result_non_strict, result_strict)
def test_tril_dynamic_diagonal(self):
class Module(torch.nn.Module):
def forward(self, x, y):
x_len = x.shape[0]
y_len = y.shape[0]
mask = torch.ones(x_len, y_len, dtype=torch.bool, device=x.device)
mask = mask.tril(diagonal=y_len - x_len)
return mask
x = torch.randn(3, 4)
y = torch.randn(5, 4)
x_len = Dim("x_len", min=1, max=64)
y_len = Dim("y_len", min=1, max=64)
ep = export(
Module(),
(x, y),
dynamic_shapes={
"x": {0: x_len},
"y": {0: y_len},
},
)
eager_out = Module()(x, y)
exported_out = ep.module()(x, y)
self.assertEqual(eager_out, exported_out)
self.assertEqual(exported_out.shape, (3, 5))
x2 = torch.randn(4, 4)
y2 = torch.randn(7, 4)
eager_out2 = Module()(x2, y2)
exported_out2 = ep.module()(x2, y2)
self.assertEqual(eager_out2, exported_out2)
self.assertEqual(exported_out2.shape, (4, 7))
expected_mask = torch.ones(3, 5, dtype=torch.bool).tril(diagonal=2)
self.assertEqual(eager_out, expected_mask)
def test_triu_dynamic_diagonal(self):
class Module(torch.nn.Module):
def forward(self, x, y):
x_len = x.shape[0]
y_len = y.shape[0]
mask = torch.ones(x_len, y_len, dtype=torch.bool, device=x.device)
mask = mask.triu(diagonal=y_len - x_len)
return mask
x = torch.randn(3, 4)
y = torch.randn(5, 4)
x_len = Dim("x_len", min=1, max=64)
y_len = Dim("y_len", min=1, max=64)
ep = export(
Module(),
(x, y),
dynamic_shapes={
"x": {0: x_len},
"y": {0: y_len},
},
)
eager_out = Module()(x, y)
exported_out = ep.module()(x, y)
self.assertEqual(eager_out, exported_out)
self.assertEqual(exported_out.shape, (3, 5))
x2 = torch.randn(4, 4)
y2 = torch.randn(7, 4)
eager_out2 = Module()(x2, y2)
exported_out2 = ep.module()(x2, y2)
self.assertEqual(eager_out2, exported_out2)
self.assertEqual(exported_out2.shape, (4, 7))
expected_mask = torch.ones(3, 5, dtype=torch.bool).triu(diagonal=2)
self.assertEqual(eager_out, expected_mask)
@unittest.skipIf(not torchdynamo.is_dynamo_supported(), "dynamo isn't support")
|
GraphModule
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/structured/structured_tensor_test.py
|
{
"start": 3183,
"end": 70739
}
|
class ____(test_util.TensorFlowTestCase,
parameterized.TestCase):
def assertAllEqual(self, a, b, msg=None):
if not (isinstance(a, structured_tensor.StructuredTensor) or
isinstance(b, structured_tensor.StructuredTensor)):
return super(StructuredTensorTest, self).assertAllEqual(a, b, msg)
if not isinstance(a, structured_tensor.StructuredTensor):
a = structured_tensor.StructuredTensor.from_pyval(a)
self._assertStructuredEqual(a, b, msg, False)
elif not isinstance(b, structured_tensor.StructuredTensor):
b = structured_tensor.StructuredTensor.from_pyval(b)
self._assertStructuredEqual(a, b, msg, False)
else:
self._assertStructuredEqual(a, b, msg, True)
def _assertStructuredEqual(self, a, b, msg, check_shape):
if check_shape:
self.assertEqual(repr(a.shape), repr(b.shape))
self.assertEqual(set(a.field_names()), set(b.field_names()))
for field in a.field_names():
a_value = a.field_value(field)
b_value = b.field_value(field)
self.assertIs(type(a_value), type(b_value))
if isinstance(a_value, structured_tensor.StructuredTensor):
self._assertStructuredEqual(a_value, b_value, msg, check_shape)
else:
self.assertAllEqual(a_value, b_value, msg)
@parameterized.named_parameters([
# Scalar (rank=0) StructuredTensors.
{
"testcase_name": "Rank0_WithTensorFields",
"rank": 0,
"fields": {"Foo": 5, "Bar": [1, 2, 3]},
"expected_shape": []
},
{
"testcase_name": "Rank0_WithRaggedFields",
"fields": {
# note: fields have varying rank & ragged_rank.
"p": ragged_factory_ops.constant_value([[1, 2], [3]]),
"q": ragged_factory_ops.constant_value([[[4]], [], [[5, 6]]]),
"r": ragged_factory_ops.constant_value([[[4]], [], [[5]]],
ragged_rank=1),
"s": ragged_factory_ops.constant_value([[[4]], [], [[5]]],
ragged_rank=2),
},
"rank": 0,
"expected_shape": [],
},
{
"testcase_name": "Rank0_WithStructuredFields",
"fields": lambda: {
"foo": StructuredTensor.from_pyval({"a": 1, "b": [1, 2, 3]}),
"bar": StructuredTensor.from_pyval(
[[{"x": 12}], [{"x": 13}, {"x": 14}]]),
},
"rank": 0,
"expected_shape": [],
},
{
"testcase_name": "Rank0_WithMixedFields",
"fields": lambda: {
# TODO(martinz): should handle this, but can't.
"f1": 5,
"f2": [1, 2, 3],
"f3": ragged_factory_ops.constant_value([[1, 2], [3]]),
"f4": StructuredTensor.from_pyval({"a": 1, "b": [1, 2, 3]}),
},
"rank": 0,
"expected_shape": [],
},
# Vector (rank=1) StructuredTensors.
{
"testcase_name": "Rank1_WithExplicitNrows",
"fields": {"x": [1, 2], "y": [[1, 2], [3, 4]]},
"rank": 1,
"expected_shape": [2],
},
{
"testcase_name": "Rank1_WithTensorFields",
"fields": {"x": [1, 2], "y": [[1, 2], [3, 4]]},
"rank": 1,
"expected_shape": [2],
},
{
"testcase_name": "Rank1_WithRaggedFields",
"fields": {
# note: fields have varying rank & ragged_rank.
"p": ragged_factory_ops.constant_value([[1, 2], [3]]),
"q": ragged_factory_ops.constant_value([[[4]], [[5, 6], [7]]]),
"r": ragged_factory_ops.constant_value([[], [[[12]], [[13]]]]),
"s": ragged_factory_ops.constant_value([[], [[[12]], [[13]]]],
ragged_rank=1),
"t": ragged_factory_ops.constant_value([[], [[[12]], [[13]]]],
ragged_rank=2),
},
"rank": 1,
"expected_shape": [2],
},
{
"testcase_name": "Rank1_WithStructuredFields",
"fields": lambda: {
"foo": StructuredTensor.from_pyval(
[{"a": 1, "b": [1, 2, 3]}, {"a": 2, "b": []}]),
"bar": StructuredTensor.from_pyval(
[[{"x": 12}], [{"x": 13}, {"x": 14}]]),
},
"rank": 1,
"expected_shape": [2],
},
{
"testcase_name": "Rank1_WithMixedFields",
"fields": lambda: {
"x": [1, 2],
"y": [[1, 2], [3, 4]],
"r": ragged_factory_ops.constant_value([[1, 2], [3]]),
"s": StructuredTensor.from_pyval(
[[{"x": 12}], [{"x": 13}, {"x": 14}]]),
},
"rank": 1,
"expected_shape": [2],
},
{
"testcase_name": "Rank1_WithNoElements",
"fields": lambda: {
"x": [],
"y": np.zeros([0, 8]),
"r": ragged_factory_ops.constant([], ragged_rank=1),
"s": StructuredTensor.from_pyval([]),
},
"rank": 1,
"expected_shape": [0], # Note: could also be [None] (?)
},
{
"testcase_name": "Rank1_InferDimSize",
"fields": lambda: {
"x": [1, 2],
"y": [[1, 2], [3, 4]],
"r": ragged_factory_ops.constant_value([[1, 2], [3]]),
"p": ragged_factory_ops.constant_value([[4], [5, 6, 7]]),
"foo": StructuredTensor.from_pyval(
[{"a": 1, "b": [1, 2, 3]}, {"a": 2, "b": []}]),
"bar": StructuredTensor.from_pyval(
[[{"x": 12}], [{"x": 13}, {"x": 14}]]),
},
"rank": 1,
"expected_shape": [2], # inferred from field values.
},
# Matrix (rank=2) StructuredTensors.
{
"testcase_name": "Rank2_WithTensorFields",
"fields": {
"x": [[1, 2, 3], [4, 5, 6]],
"y": np.ones([2, 3, 8])
},
"rank": 2,
"expected_shape": [2, 3], # inferred from field values.
},
{
"testcase_name": "Rank2_WithRaggedFields",
"fields": {
# Note: fields must have identical row_splits.
"a": ragged_factory_ops.constant_value([[1, 2], [3]]),
"b": ragged_factory_ops.constant_value([[4, 5], [6]]),
"c": ragged_factory_ops.constant_value([[[1, 2], [3]], [[4, 5]]]),
"d": ragged_factory_ops.constant_value(
[[[[1, 2], [3]], [[4], [], [5]]], [[[6, 7, 8], []]]]),
},
"rank": 2,
"expected_shape": [2, None],
},
{
"testcase_name": "Rank2_WithStructuredFields",
"fields": lambda: {
# Note: fields must have identical row_splits.
"a": StructuredTensor.from_pyval(
[[{"x": 1}], [{"x": 2}, {"x": 3}]]),
"b": StructuredTensor.from_pyval(
[[[{"y": 1}]], [[], [{"y": 2}, {"y": 3}]]]),
},
"rank": 2,
"expected_shape": [2, None], # ragged shape = [[*], [*, *]]
},
{
"testcase_name": "Rank2_WithDiffDTypes",
"fields": lambda: {
# Note: fields must have identical row_splits.
"a": ragged_factory_ops.constant_value(
[[1], [2, 3]], row_splits_dtype=dtypes.int32),
"b": ragged_factory_ops.constant_value(
[["a"], ["b", "c"]], row_splits_dtype=dtypes.int64),
},
"rank": 2,
"expected_shape": [2, None], # ragged shape = [[*], [*, *]]
},
{
"testcase_name": "Rank2_WithMixedFields",
"fields": lambda: {
"a": [[1, 2], [3, 4]],
"b": ragged_factory_ops.constant_value([[1, 2], [3, 4]]),
"c": StructuredTensor.from_pyval(
[[[{"y": 1}], []], [[], [{"y": 2}, {"y": 3}]]]),
"d": ragged_factory_ops.constant_value(
[[[1, 2], []], [[3], [4]]]),
},
"rank": 2,
"expected_shape": [2, 2],
},
# Rank=4 StructuredTensors.
{
"testcase_name": "Rank4_WithMixedFields",
"fields": lambda: {
"a": np.ones([1, 2, 3, 1]),
"b": np.ones([1, 2, 3, 1, 5]),
"c": ragged_factory_ops.constant(np.zeros([1, 2, 3, 1])),
"d": ragged_factory_ops.constant(
np.zeros([1, 2, 3, 1, 3]).tolist(), ragged_rank=1),
"e": ragged_factory_ops.constant(
np.zeros([1, 2, 3, 1, 2, 2]).tolist(), ragged_rank=2),
"f": ragged_factory_ops.constant(np.zeros([1, 2, 3, 1, 3])),
"g": StructuredTensor.from_pyval(
[[[[{"x": j, "y": k}] for k in range(3)]
for j in range(2)]]),
"h": StructuredTensor.from_pyval(
[[[[[{"x": j, "y": k, "z": z} for z in range(j)]]
for k in range(3)]
for j in range(2)]]),
},
"rank": 4,
"expected_shape": [1, 2, 3, 1], # inferred from field values.
},
]) # pyformat: disable
def testFromFieldsAndRank(self, fields, rank, expected_shape):
if callable(fields):
fields = fields() # deferred construction: fields may include tensors.
struct = StructuredTensor.from_fields_and_rank(fields, rank)
self.assertEqual(struct.shape.as_list(), expected_shape)
@parameterized.named_parameters([
{
"testcase_name": "NoFields",
"rank": 1,
"fields": {},
"msg": "Must provide at least one field"
},
{
"testcase_name": "IntegerRank",
"rank": 0.5,
"fields": {
"foo": [1]
},
"msg": "rank must be an integer"
},
{
"testcase_name": "NonNegativeRank",
"rank": -1,
"fields": {
"bar": [1, 2, 3]
},
"msg": "rank must be nonnegative"
},
])
def testFromFieldsAndRankError(self, fields, rank, msg):
if callable(fields):
fields = fields() # deferred construction: fields may include tensors.
with self.assertRaisesRegex(ValueError, msg):
StructuredTensor.from_fields_and_rank(fields, rank)
@parameterized.named_parameters([
# Scalar (rank=0) StructuredTensors.
{
"testcase_name": "Rank0_WithNoFields",
"shape": [],
"fields": {},
},
{
"testcase_name": "Rank0_WithTensorFields",
"shape": [],
"fields": {"Foo": 5, "Bar": [1, 2, 3]},
},
{
"testcase_name": "Rank0_WithRaggedFields",
"shape": [],
"fields": {
# note: fields have varying rank & ragged_rank.
"p": ragged_factory_ops.constant_value([[1, 2], [3]]),
"q": ragged_factory_ops.constant_value([[[4]], [], [[5, 6]]]),
"r": ragged_factory_ops.constant_value([[[4]], [], [[5]]],
ragged_rank=1),
"s": ragged_factory_ops.constant_value([[[4]], [], [[5]]],
ragged_rank=2),
},
},
{
"testcase_name": "Rank0_WithStructuredFields",
"shape": [],
"fields": lambda: {
"foo": StructuredTensor.from_pyval({"a": 1, "b": [1, 2, 3]}),
"bar": StructuredTensor.from_pyval(
[[{"x": 12}], [{"x": 13}, {"x": 14}]]),
},
},
{
"testcase_name": "Rank0_WithMixedFields",
"shape": [],
"fields": lambda: {
"f1": 5,
"f2": [1, 2, 3],
"f3": ragged_factory_ops.constant_value([[1, 2], [3]]),
"f4": StructuredTensor.from_pyval({"a": 1, "b": [1, 2, 3]}),
},
},
# Vector (rank=1) StructuredTensors.
{
"testcase_name": "Rank1_WithNoFields",
"shape": [2],
"fields": {},
},
{
"testcase_name": "Rank1_WithExplicitNrows",
"shape": [None],
"nrows": 2,
"fields": {"x": [1, 2], "y": [[1, 2], [3, 4]]},
"expected_shape": [2],
},
{
"testcase_name": "Rank1_WithTensorFields",
"shape": [2],
"fields": {"x": [1, 2], "y": [[1, 2], [3, 4]]},
},
{
"testcase_name": "Rank1_WithRaggedFields",
"shape": [2],
"fields": {
# note: fields have varying rank & ragged_rank.
"p": ragged_factory_ops.constant_value([[1, 2], [3]]),
"q": ragged_factory_ops.constant_value([[[4]], [[5, 6], [7]]]),
"r": ragged_factory_ops.constant_value([[], [[[12]], [[13]]]]),
"s": ragged_factory_ops.constant_value([[], [[[12]], [[13]]]],
ragged_rank=1),
"t": ragged_factory_ops.constant_value([[], [[[12]], [[13]]]],
ragged_rank=2),
},
},
{
"testcase_name": "Rank1_WithStructuredFields",
"shape": [2],
"fields": lambda: {
"foo": StructuredTensor.from_pyval(
[{"a": 1, "b": [1, 2, 3]}, {"a": 2, "b": []}]),
"bar": StructuredTensor.from_pyval(
[[{"x": 12}], [{"x": 13}, {"x": 14}]]),
},
},
{
"testcase_name": "Rank1_WithMixedFields",
"shape": [2],
"fields": lambda: {
"x": [1, 2],
"y": [[1, 2], [3, 4]],
"r": ragged_factory_ops.constant_value([[1, 2], [3]]),
"s": StructuredTensor.from_pyval(
[[{"x": 12}], [{"x": 13}, {"x": 14}]]),
},
},
{
"testcase_name": "Rank1_WithNoElements",
"shape": [0],
"fields": lambda: {
"x": [],
"y": np.zeros([0, 8]),
"r": ragged_factory_ops.constant([], ragged_rank=1),
"s": StructuredTensor.from_pyval([]),
},
},
{
"testcase_name": "Rank1_InferDimSize",
"shape": [None],
"fields": lambda: {
"x": [1, 2],
"y": [[1, 2], [3, 4]],
"r": ragged_factory_ops.constant_value([[1, 2], [3]]),
"p": ragged_factory_ops.constant_value([[4], [5, 6, 7]]),
"foo": StructuredTensor.from_pyval(
[{"a": 1, "b": [1, 2, 3]}, {"a": 2, "b": []}]),
"bar": StructuredTensor.from_pyval(
[[{"x": 12}], [{"x": 13}, {"x": 14}]]),
},
"expected_shape": [2], # inferred from field values.
},
# Matrix (rank=2) StructuredTensors.
{
"testcase_name": "Rank2_WithNoFields",
"shape": [2, 8],
"fields": {},
},
{
"testcase_name": "Rank2_WithNoFieldsAndExplicitRowPartitions",
"shape": [2, None],
"row_partitions":
lambda: [row_partition.RowPartition.from_row_lengths([3, 7])],
"fields": {},
},
{
"testcase_name": "Rank2_WithTensorFields",
"shape": [None, None],
"fields": {
"x": [[1, 2, 3], [4, 5, 6]],
"y": np.ones([2, 3, 8])
},
"expected_shape": [2, 3], # inferred from field values.
},
{
"testcase_name": "Rank2_WithRaggedFields",
"shape": [2, None], # ragged shape = [[*, *], [*]]
"fields": {
# Note: fields must have identical row_splits.
"a": ragged_factory_ops.constant_value([[1, 2], [3]]),
"b": ragged_factory_ops.constant_value([[4, 5], [6]]),
"c": ragged_factory_ops.constant_value([[[1, 2], [3]], [[4, 5]]]),
"d": ragged_factory_ops.constant_value(
[[[[1, 2], [3]], [[4], [], [5]]], [[[6, 7, 8], []]]]),
},
},
{
"testcase_name": "Rank2_WithStructuredFields",
"shape": [2, None], # ragged shape = [[*], [*, *]]
"fields": lambda: {
# Note: fields must have identical row_splits.
"a": StructuredTensor.from_pyval(
[[{"x": 1}], [{"x": 2}, {"x": 3}]]),
"b": StructuredTensor.from_pyval(
[[[{"y": 1}]], [[], [{"y": 2}, {"y": 3}]]]),
},
},
{
"testcase_name": "Rank2_WithMixedFields",
"shape": [2, None],
"fields": lambda: {
"a": [[1, 2], [3, 4]],
"b": ragged_factory_ops.constant_value([[1, 2], [3, 4]]),
"c": StructuredTensor.from_pyval(
[[[{"y": 1}], []], [[], [{"y": 2}, {"y": 3}]]]),
"d": ragged_factory_ops.constant_value(
[[[1, 2], []], [[3], [4]]]),
},
"expected_shape": [2, 2],
},
# Rank=4 StructuredTensors.
{
"testcase_name": "Rank4_WithNoFields",
"shape": [1, None, None, 3],
"fields": {},
"row_partitions": lambda: [
row_partition.RowPartition.from_row_lengths([3]),
row_partition.RowPartition.from_row_lengths([2, 0, 1]),
row_partition.RowPartition.from_uniform_row_length(3, nvals=9)
]
},
{
"testcase_name": "Rank4_WithMixedFields",
"shape": [1, None, None, 1],
"fields": lambda: {
"a": np.ones([1, 2, 3, 1]),
"b": np.ones([1, 2, 3, 1, 5]),
"c": ragged_factory_ops.constant(np.zeros([1, 2, 3, 1])),
"d": ragged_factory_ops.constant(
np.zeros([1, 2, 3, 1, 3]).tolist(), ragged_rank=1),
"e": ragged_factory_ops.constant(
np.zeros([1, 2, 3, 1, 2, 2]).tolist(), ragged_rank=2),
"f": ragged_factory_ops.constant(np.zeros([1, 2, 3, 1, 3])),
"g": StructuredTensor.from_pyval(
[[[[{"x": j, "y": k}] for k in range(3)]
for j in range(2)]]),
"h": StructuredTensor.from_pyval(
[[[[[{"x": j, "y": k, "z": z} for z in range(j)]]
for k in range(3)]
for j in range(2)]]),
},
"expected_shape": [1, 2, 3, 1], # inferred from field values.
},
{
"testcase_name": "mixed_shape_dtype",
"fields": {},
"shape": [None, None],
"nrows": (lambda: constant_op.constant(2, dtypes.int32)),
"row_partitions": (
lambda: [row_partition.RowPartition.from_row_lengths([3, 4])]),
"expected_shape": [2, None],
},
{
"testcase_name": "mixed_shape_dtype_fields",
"fields": (lambda: {
"a": ragged_factory_ops.constant(
[[1]], row_splits_dtype=dtypes.int32),
"b": ragged_factory_ops.constant(
[[1]], row_splits_dtype=dtypes.int64)}),
"shape": [None, None],
"expected_shape": [1, None],
}
]) # pyformat: disable
def testFromFields(self,
shape,
fields,
expected_shape=None,
nrows=None,
row_partitions=None):
if callable(fields):
fields = fields() # deferred construction: fields may include tensors.
if callable(nrows):
nrows = nrows() # deferred construction.
if callable(row_partitions):
row_partitions = row_partitions() # deferred construction.
for validate in (True, False):
struct = StructuredTensor.from_fields(
fields,
shape,
nrows=nrows,
row_partitions=row_partitions,
validate=validate)
if expected_shape is None:
expected_shape = shape
self.assertEqual(struct.shape.as_list(), expected_shape)
self.assertLen(expected_shape, struct.rank)
self.assertCountEqual(struct.field_names(), tuple(fields.keys()))
for field, value in fields.items():
self.assertIsInstance(
struct.field_value(field),
(tensor.Tensor, structured_tensor.StructuredTensor,
ragged_tensor.RaggedTensor))
self.assertAllEqual(struct.field_value(field), value)
@parameterized.parameters([
dict(fields={}, shape=object(), err=TypeError),
dict(
fields=object(),
shape=[],
err=TypeError,
msg="fields must be a dictionary"),
dict(
fields={1: 2}, shape=[], err=TypeError,
msg="Unexpected type for key"),
dict(
fields={"x": object()},
shape=[],
err=(TypeError, ValueError),
msg="Error with shape of x|Unexpected type for value"),
dict(
fields={},
shape=None,
err=ValueError,
msg="StructuredTensor's shape must have known rank"),
dict(
fields={"f": 5},
shape=[5],
err=ValueError,
msg=r"Field f has shape \(\), which is incompatible with the shape "
r"that was specified or inferred from other fields: \(5,\)|Shapes"),
dict(
fields=dict(x=[1], y=[]),
shape=[None],
err=ValueError,
msg=r"Error in shape of y"),
dict(
fields={"": 5},
shape=[],
err=ValueError,
msg="Field name '' is not currently allowed."),
dict(
fields={"_": 5},
shape=[],
err=ValueError,
msg="Field name '_' is not currently allowed."),
dict(
fields={
"r1": ragged_factory_ops.constant_value([[1, 2], [3]]),
"r2": ragged_factory_ops.constant_value([[1, 2, 3], [4]])
},
shape=[2, None],
validate=True,
err=ValueError,
msg=r"Error in shape of r2",
),
dict(
fields={},
shape=(),
nrows=5,
err=ValueError,
msg="nrows must be None if shape.rank==0"),
dict(
fields={},
shape=(),
row_partitions=[0],
err=ValueError,
msg=r"row_partitions must be None or \[\] if shape.rank<2"),
dict(
fields={},
shape=(None, None, None),
row_partitions=[],
err=ValueError,
msg=r"len\(row_partitions\) must be shape.rank-1"),
dict(
fields={},
shape=[None],
err=ValueError,
msg="Must specify `nrows`, a fully specified `shape`, "
"or have `fields` if `rank=1`"),
dict(
fields={},
shape=[None, None],
err=ValueError,
msg="Must specify row_partitions, a fully specified shape, "
"or have fields if rank > 1"),
])
def testFromFieldsErrors(self,
fields,
shape,
nrows=None,
row_partitions=None,
validate=False,
err=ValueError,
msg=None,
test_in_eager=True):
if not test_in_eager and context.executing_eagerly():
return
if callable(fields):
fields = fields() # deferred construction.
if callable(nrows):
nrows = nrows() # deferred construction.
if callable(row_partitions):
row_partitions = row_partitions() # deferred construction.
with self.assertRaisesRegex(err, msg):
struct = StructuredTensor.from_fields(
fields=fields,
shape=shape,
nrows=nrows,
row_partitions=row_partitions,
validate=validate)
for field_name in struct.field_names():
self.evaluate(struct.field_value(field_name))
self.evaluate(struct.nrows())
def testMergeNrowsErrors(self):
nrows = constant_op.constant(5)
static_nrows = tensor_shape.Dimension(5)
value = constant_op.constant([1, 2, 3])
with self.assertRaisesRegex(ValueError, "fields have incompatible nrows"):
structured_tensor._merge_nrows(
nrows, static_nrows, value, dtypes.int32, validate=False)
def testNestedStructConstruction(self):
rt = ragged_factory_ops.constant([[1, 2], [3]])
struct1 = StructuredTensor.from_fields(shape=[], fields={"x": [1, 2]})
struct2 = StructuredTensor.from_fields(shape=[2], fields={"x": [1, 2]})
struct3 = StructuredTensor.from_fields(
shape=[], fields={
"r": rt,
"s": struct1
})
struct4 = StructuredTensor.from_fields(
shape=[2], fields={
"r": rt,
"s": struct2
})
self.assertEqual(struct3.shape.as_list(), [])
self.assertEqual(struct3.rank, 0)
self.assertEqual(set(struct3.field_names()), set(["r", "s"]))
self.assertAllEqual(struct3.field_value("r"), rt)
self.assertAllEqual(struct3.field_value("s"), struct1)
self.assertEqual(struct4.shape.as_list(), [2])
self.assertEqual(struct4.rank, 1)
self.assertEqual(set(struct4.field_names()), set(["r", "s"]))
self.assertAllEqual(struct4.field_value("r"), rt)
self.assertAllEqual(struct4.field_value("s"), struct2)
def testPartitionOuterDims(self):
a = dict(x=1, y=[1, 2])
b = dict(x=2, y=[3, 4])
c = dict(x=3, y=[5, 6])
d = dict(x=4, y=[7, 8])
st1 = StructuredTensor.from_pyval([a, b, c, d])
st2 = st1.partition_outer_dimension(
row_partition.RowPartition.from_row_splits([0, 2, 2, 3, 4]))
self.assertAllEqual(st2, [[a, b], [], [c], [d]])
st3 = st2.partition_outer_dimension(
row_partition.RowPartition.from_row_lengths([1, 0, 3, 0]))
self.assertAllEqual(st3, [[[a, b]], [], [[], [c], [d]], []])
# If we partition with uniform_row_lengths, then `x` is partitioned into
# a Tensor (not a RaggedTensor).
st4 = st1.partition_outer_dimension(
row_partition.RowPartition.from_uniform_row_length(
uniform_row_length=2, nvals=4, nrows=2))
self.assertAllEqual(
st4,
structured_tensor.StructuredTensor.from_pyval(
[[a, b], [c, d]],
structured_tensor.StructuredTensor.Spec(
_ragged_shape=DynamicRaggedShape.Spec(
row_partitions=[],
static_inner_shape=[2, 2],
dtype=dtypes.int64),
_fields={
"x":
tensor.TensorSpec([2, 2], dtypes.int32),
"y":
ragged_tensor.RaggedTensorSpec([2, 2, None],
dtypes.int32)
})))
def testPartitionOuterDimension3(self):
rt = ragged_tensor.RaggedTensor.from_value_rowids(
array_ops.constant([[1, 2], [3, 4], [5, 6]]), [0, 0, 1])
struct = structured_tensor.StructuredTensor.from_fields({"r": rt}, [2])
struct_2 = struct.partition_outer_dimension(
row_partition.RowPartition.from_row_splits([0, 1, 2]))
struct_3 = struct_2.partition_outer_dimension(
row_partition.RowPartition.from_row_splits([0, 1, 2]))
self.assertEqual(3, struct_3.rank)
def testWithPrivateSpecialType(self):
rt = ragged_tensor.RaggedTensor.from_value_rowids(
array_ops.constant([[1, 2], [3, 4], [5, 6]]), [0, 0, 1])
pst = _PrivateSpecialType(rt)
pst_shape = array_ops.shape_v2(pst)
st = structured_tensor.StructuredTensor.from_fields_and_rank({"r": pst}, 1)
st_shape = st._ragged_shape
self.assertEqual(1, st.rank)
self.assertAllEqual(pst_shape[0], st_shape[0])
def testWithPrivateBrokenType(self):
rt = ragged_tensor.RaggedTensor.from_value_rowids(
array_ops.constant([[1, 2], [3, 4], [5, 6]]), [0, 0, 1])
pbt = _PrivateBrokenType(rt)
with self.assertRaisesRegex(ValueError, "Error in shape of r"):
structured_tensor.StructuredTensor.from_fields_and_rank({"r": pbt}, 1)
def testPartitionOuterDimsErrors(self):
st = StructuredTensor.from_fields({})
partition = row_partition.RowPartition.from_row_splits([0])
with self.assertRaisesRegex(ValueError,
r"Shape \(\) must have rank at least 1"):
st.partition_outer_dimension(partition)
with self.assertRaisesRegex(TypeError,
"row_partition must be a RowPartition"):
st.partition_outer_dimension(10)
@parameterized.named_parameters([
{
"testcase_name": "ScalarEmpty",
"pyval": {},
"expected": lambda: StructuredTensor.from_fields(shape=[], fields={})
},
{
"testcase_name": "ScalarSimple",
"pyval": {"a": 12, "b": [1, 2, 3], "c": [[1, 2], [3]]},
"expected": lambda: StructuredTensor.from_fields(shape=[], fields={
"a": 12,
"b": [1, 2, 3],
"c": ragged_factory_ops.constant([[1, 2], [3]])})
},
{
"testcase_name": "ScalarSimpleWithTypeSpec",
"pyval": {"a": 12, "b": [1, 2, 3], "c": [[1, 2], [3]]},
"type_spec": StructuredTensor.Spec._from_fields_and_rank(
fields={
"a": tensor.TensorSpec([], dtypes.int32),
"b": tensor.TensorSpec([None], dtypes.int32),
"c": ragged_tensor.RaggedTensorSpec([None, None],
dtypes.int32)},
rank=0),
"expected": lambda: StructuredTensor.from_fields(shape=[], fields={
"a": 12,
"b": [1, 2, 3],
"c": ragged_factory_ops.constant([[1, 2], [3]])})
},
{
"testcase_name": "ScalarWithNestedStruct",
"pyval": {"a": 12, "b": [1, 2, 3], "c": {"x": b"Z", "y": [10, 20]}},
"expected": lambda: StructuredTensor.from_fields(shape=[], fields={
"a": 12,
"b": [1, 2, 3],
"c": StructuredTensor.from_fields(shape=[], fields={
"x": "Z",
"y": [10, 20]})})
},
{
"testcase_name": "EmptyList",
"pyval": [],
"expected": lambda: [],
},
{
"testcase_name": "ListOfEmptyList",
"pyval": [[], []],
"expected": lambda: [[], []],
},
{
"testcase_name": "EmptyListWithTypeSpecAndFields",
"pyval": [],
"type_spec": structured_tensor.StructuredTensor.Spec._from_fields_and_rank(
fields={"a": tensor.TensorSpec([0], dtypes.int32)},
rank=1),
"expected": lambda: StructuredTensor.from_fields(shape=[0], fields={
"a": []})
},
{
"testcase_name": "EmptyListWithTypeSpecNoFieldsShape0_5",
"pyval": [],
"type_spec": StructuredTensor.Spec._from_shape(DynamicRaggedShape.Spec(
row_partitions=[],
static_inner_shape=[0, 5],
dtype=dtypes.int64)),
"expected": lambda: StructuredTensor.from_fields(shape=[0, 5],
fields={})
},
{
"testcase_name": "EmptyListWithTypeSpecNoFieldsShape1_0",
"pyval": [[]],
"type_spec": StructuredTensor.Spec._from_shape(
DynamicRaggedShape.Spec(
row_partitions=[],
static_inner_shape=[1, 0],
dtype=dtypes.int64)),
"expected": lambda: StructuredTensor.from_shape(
DynamicRaggedShape.from_lengths([1, 0]))
},
{
"testcase_name": "VectorOfDict",
"pyval": [{"a": 1}, {"a": 2}],
"expected": lambda: StructuredTensor.from_fields(shape=[2], fields={
"a": [1, 2]})
},
{
"testcase_name": "VectorOfDictWithNestedStructScalar",
"pyval": [{"a": 1, "b": {"x": [1, 2]}},
{"a": 2, "b": {"x": [3]}}],
"expected": lambda: StructuredTensor.from_fields(shape=[2], fields={
"a": [1, 2],
"b": StructuredTensor.from_fields(shape=[2], fields={
"x": ragged_factory_ops.constant([[1, 2], [3]])})}),
},
{
"testcase_name": "VectorOfDictWithNestedStructVector",
"pyval": [{"a": 1, "b": [{"x": [1, 2]}, {"x": [5]}]},
{"a": 2, "b": [{"x": [3]}]}],
"expected": lambda: StructuredTensor.from_fields(shape=[2], fields={
"a": [1, 2],
"b": StructuredTensor.from_fields(shape=[2, None], fields={
"x": ragged_factory_ops.constant([[[1, 2], [5]], [[3]]])})}),
},
{
"testcase_name": "Ragged2DOfDict",
"pyval": [[{"a": 1}, {"a": 2}, {"a": 3},],
[{"a": 4}, {"a": 5}]],
"expected": lambda: StructuredTensor.from_fields(
shape=[2, None],
fields={
"a": ragged_factory_ops.constant([[1, 2, 3], [4, 5]])})
},
{
# With no type-spec, all tensors>1D are encoded as ragged:
"testcase_name": "MatrixOfDictWithoutTypeSpec",
"pyval": [[{"a": 1}, {"a": 2}, {"a": 3},],
[{"a": 4}, {"a": 5}, {"a": 6}]],
"expected": lambda: StructuredTensor.from_fields(
shape=[2, None], fields={
"a": ragged_factory_ops.constant([[1, 2, 3], [4, 5, 6]])})
},
{
# TypeSpec can be used to specify StructuredTensor shape.
"testcase_name": "MatrixOfDictWithTypeSpec",
"pyval": [[{"a": 1}, {"a": 2}, {"a": 3},],
[{"a": 4}, {"a": 5}, {"a": 6}]],
"type_spec": structured_tensor.StructuredTensorSpec([2, 3], {
"a": tensor.TensorSpec(None, dtypes.int32)}),
"expected": lambda: StructuredTensor.from_fields(
shape=[2, 3], fields={"a": [[1, 2, 3], [4, 5, 6]]})
},
]) # pyformat: disable
def testPyvalConversion(self, pyval, expected, type_spec=None):
expected = expected() # Deferred init because it creates tensors.
actual = structured_tensor.StructuredTensor.from_pyval(pyval, type_spec)
self.assertAllEqual(actual, expected)
if isinstance(actual, structured_tensor.StructuredTensor):
if context.executing_eagerly(): # to_pyval only available in eager.
self.assertEqual(actual.to_pyval(), pyval)
def testStructuredTensorSpecFactory(self):
spec = StructuredTensor.Spec._from_fields_and_rank(
fields={
"a": tensor.TensorSpec([], dtypes.int32),
"b": tensor.TensorSpec([None], dtypes.int32),
"c": ragged_tensor.RaggedTensorSpec([None, None], dtypes.int32)
},
rank=0)
self.assertEqual(spec.rank, 0)
@parameterized.named_parameters([
dict(
testcase_name="NoFieldsRaggedRank0",
st=lambda: StructuredTensor.from_fields({}, (3,)),
expected=[{}, {}, {}]),
dict(
testcase_name="NoFieldsRaggedRank1",
st=lambda: StructuredTensor.from_fields(
{}, (2, None),
row_partitions=[
row_partition.RowPartition.from_row_lengths([3, 2])]),
expected=[[{}, {}, {}], [{}, {}]]),
dict(
testcase_name="NoFieldsRaggedRank2",
st=lambda: StructuredTensor.from_fields(
{}, (2, None, None),
row_partitions=[
row_partition.RowPartition.from_row_lengths([2, 1]),
row_partition.RowPartition.from_row_lengths([2, 3, 1])]),
expected=[[[{}, {}], [{}, {}, {}]], [[{}]]]),
dict(
testcase_name="NoFieldsRaggedRank2NoDicts",
st=lambda: StructuredTensor.from_fields(
{}, (1, None, None),
row_partitions=[
row_partition.RowPartition.from_row_lengths([2]),
row_partition.RowPartition.from_row_lengths([0, 0])]),
expected=[[[], []]]),
dict(
testcase_name="NestedStructTensorWithNoFields",
st=lambda: StructuredTensor.from_fields(
{
"foo": ragged_factory_ops.constant([[[], []]]),
"bar": StructuredTensor.from_fields(
{}, (1, None, None, None), row_partitions=[
row_partition.RowPartition.from_row_lengths([2]),
row_partition.RowPartition.from_row_lengths([0, 0]),
row_partition.RowPartition.from_row_lengths([]),
])
}, (1, None, None),),
expected=[[[], []]]),
]) # pyformat: disable
def testToPyval(self, st, expected):
if context.executing_eagerly(): # to_pyval only available in eager.
st = st() # Deferred init because it creates tensors.
self.assertEqual(st.to_pyval(), expected)
@parameterized.named_parameters([
dict(testcase_name="MissingKeys",
pyval=[{"a": [1, 2]}, {"b": [3, 4]}],
err=KeyError,
msg="'b'"),
dict(testcase_name="TypeSpecMismatch_DictKey",
pyval={"a": 1},
type_spec=StructuredTensor.Spec._from_fields_and_rank(
fields={"b": tensor.TensorSpec([1], dtypes.int32)},
rank=1),
msg=r"Value at \(\) does not match typespec"),
dict(testcase_name="TypeSpecMismatch_ListDictKey",
pyval=[{"a": 1}],
type_spec=StructuredTensor.Spec._from_fields_and_rank(
fields={"b": tensor.TensorSpec([1], dtypes.int32)},
rank=1),
msg=r"Value at \(\) does not match typespec"),
dict(testcase_name="TypeSpecMismatch_RankMismatch",
pyval=[{"a": 1}],
type_spec=StructuredTensor.Spec._from_fields_and_rank(
fields={"a": tensor.TensorSpec([], dtypes.int32)},
rank=0),
msg=r"Value at \(\) does not match typespec \(rank mismatch\)"),
dict(testcase_name="TypeSpecMismatch_Scalar",
pyval=0,
type_spec=StructuredTensor.Spec._from_shape(
DynamicRaggedShape.Spec(
row_partitions=[],
static_inner_shape=[],
dtype=dtypes.int64)),
msg=r"Value at \(\) does not match typespec"),
dict(testcase_name="TypeSpecMismatch_ListTensor",
pyval={"a": [[1]]},
type_spec=StructuredTensor.Spec._from_fields_and_rank(
fields={"a": tensor.TensorSpec([], dtypes.int32)},
rank=0),
msg=r"Value at \('a',\) does not match typespec"),
dict(testcase_name="TypeSpecMismatch_ListTensorDeep",
pyval={"a": {"b": [[1]]}},
type_spec=StructuredTensor.Spec._from_fields_and_rank(
fields={"a": StructuredTensor.Spec._from_fields_and_rank(
fields={"b": tensor.TensorSpec([], dtypes.int32)},
rank=0
)},
rank=0),
msg=r"Value at \('a', 'b'\) does not match typespec"),
dict(testcase_name="TypeSpecMismatch_ListTensorDeep_infer",
pyval={"a": [{"b": [[1]]}, {"b": [["c"]]}]},
type_spec=None,
msg=r"Error parsing path \('a', 'b'\)"),
dict(testcase_name="TypeSpecMismatch_ListTensorDeep_infer2",
pyval=[{"a": 1}, {"a": "c"}],
type_spec=None,
msg=r"Error parsing path \('a',\)"),
dict(testcase_name="TypeSpecMismatch_ListSparse",
pyval=[1, 2],
type_spec=sparse_tensor.SparseTensorSpec([None], dtypes.int32),
msg=r"Value at \(\) does not match typespec"),
dict(testcase_name="TypeSpecMismatch_ListStruct",
pyval=[[1]],
type_spec=StructuredTensor.Spec._from_fields_and_rank(
fields={"a": tensor.TensorSpec([1, 1], dtypes.int32)},
rank=2),
msg=r"Value at \(\) does not match typespec"),
dict(testcase_name="InconsistentDictionaryDepth",
pyval=[{}, [{}]],
msg="Inconsistent depth of dictionaries"),
dict(testcase_name="FOO",
pyval=[[{}], 5],
msg="Expected dict or nested list/tuple of dict"),
]) # pyformat: disable
def testFromPyvalError(self, pyval, err=ValueError, type_spec=None, msg=None):
with self.assertRaisesRegex(err, msg):
structured_tensor.StructuredTensor.from_pyval(pyval, type_spec)
def testToPyvalRequiresEagerMode(self):
st = structured_tensor.StructuredTensor.from_pyval({"a": 5})
if not context.executing_eagerly():
with self.assertRaisesRegex(ValueError, "only supported in eager mode."):
st.to_pyval()
@parameterized.named_parameters([
(
"Rank0",
[],
),
(
"Rank1",
[5, 3],
),
(
"Rank2",
[5, 8, 3],
),
(
"Rank5",
[1, 2, 3, 4, 5],
),
])
def testRowPartitionsFromUniformShape(self, shape):
for rank in range(len(shape)):
partitions = structured_tensor._row_partitions_for_uniform_shape(
ops.convert_to_tensor(shape), rank)
self.assertLen(partitions, max(0, rank - 1))
if partitions:
self.assertAllEqual(shape[0], partitions[0].nrows())
for (dim, partition) in enumerate(partitions):
self.assertAllEqual(shape[dim + 1], partition.uniform_row_length())
@parameterized.named_parameters([
# For shapes: U = uniform dimension; R = ragged dimension.
dict(
testcase_name="Shape_UR_Rank2",
rt=[[1, 2], [], [3]],
rt_ragged_rank=1,
rank=2,
expected_row_lengths=[[2, 0, 1]]),
dict(
testcase_name="Shape_URR_Rank2",
rt=[[[1, 2], []], [[3]]],
rt_ragged_rank=2,
rank=2,
expected_row_lengths=[[2, 1]]),
dict(
testcase_name="Shape_URU_Rank2",
rt=[[[1], [2]], [[3]]],
rt_ragged_rank=1,
rank=2,
expected_row_lengths=[[2, 1]]),
dict(
testcase_name="Shape_URR_Rank3",
rt=[[[1, 2], []], [[3]]],
rt_ragged_rank=2,
rank=3,
expected_row_lengths=[[2, 1], [2, 0, 1]]),
dict(
testcase_name="Shape_URU_Rank3",
rt=[[[1], [2]], [[3]]],
rt_ragged_rank=1,
rank=3,
expected_row_lengths=[[2, 1], [1, 1, 1]]),
dict(
testcase_name="Shape_URRUU_Rank2",
rt=[[[[[1, 2]]]]],
rt_ragged_rank=2,
rank=2,
expected_row_lengths=[[1]]),
dict(
testcase_name="Shape_URRUU_Rank3",
rt=[[[[[1, 2]]]]],
rt_ragged_rank=2,
rank=3,
expected_row_lengths=[[1], [1]]),
dict(
testcase_name="Shape_URRUU_Rank4",
rt=[[[[[1, 2]]]]],
rt_ragged_rank=2,
rank=4,
expected_row_lengths=[[1], [1], [1]]),
dict(
testcase_name="Shape_URRUU_Rank5",
rt=[[[[[1, 2]]]]],
rt_ragged_rank=2,
rank=5,
expected_row_lengths=[[1], [1], [1], [2]]),
])
def testRowPartitionsForRaggedTensor(self, rt, rt_ragged_rank, rank,
expected_row_lengths):
rt = ragged_factory_ops.constant(rt, rt_ragged_rank)
partitions = structured_tensor._row_partitions_for_ragged_tensor(
rt, rank, dtypes.int64)
self.assertLen(partitions, rank - 1)
self.assertLen(partitions, len(expected_row_lengths))
for partition, expected in zip(partitions, expected_row_lengths):
self.assertAllEqual(partition.row_lengths(), expected)
@parameterized.named_parameters([
dict(
testcase_name="2D_0_1",
st=[[{"x": 1}, {"x": 2}], [{"x": 3}]],
outer_axis=0, inner_axis=1,
expected=[{"x": 1}, {"x": 2}, {"x": 3}]),
dict(
testcase_name="3D_0_1",
st=[[[{"x": 1}, {"x": 2}], [{"x": 3}]], [[{"x": 4}]]],
outer_axis=0, inner_axis=1,
expected=[[{"x": 1}, {"x": 2}], [{"x": 3}], [{"x": 4}]]),
dict(
testcase_name="3D_1_2",
st=[[[{"x": 1}, {"x": 2}], [{"x": 3}]], [[{"x": 4}]]],
outer_axis=1, inner_axis=2,
expected=[[{"x": 1}, {"x": 2}, {"x": 3}], [{"x": 4}]]),
dict(
testcase_name="3D_0_2",
st=[[[{"x": 1}, {"x": 2}], [{"x": 3}]], [[{"x": 4}]]],
outer_axis=0, inner_axis=2,
expected=[{"x": 1}, {"x": 2}, {"x": 3}, {"x": 4}]),
dict(
testcase_name="4D_0_1",
st=[[[[{"x": 1}, {"x": 2}], [{"x": 3}]], [[{"x": 4}]]],
[[[{"x": 5}]], [[{"x": 6}], [{"x": 7}]]]],
outer_axis=0, inner_axis=1,
expected=[[[{"x": 1}, {"x": 2}], [{"x": 3}]], [[{"x": 4}]],
[[{"x": 5}]], [[{"x": 6}], [{"x": 7}]]]),
dict(
testcase_name="4D_0_2",
st=[[[[{"x": 1}, {"x": 2}], [{"x": 3}]], [[{"x": 4}]]],
[[[{"x": 5}]], [[{"x": 6}], [{"x": 7}]]]],
outer_axis=0, inner_axis=2,
expected=[[{"x": 1}, {"x": 2}], [{"x": 3}], [{"x": 4}],
[{"x": 5}], [{"x": 6}], [{"x": 7}]]),
dict(
testcase_name="4D_0_3",
st=[[[[{"x": 1}, {"x": 2}], [{"x": 3}]], [[{"x": 4}]]],
[[[{"x": 5}]], [[{"x": 6}], [{"x": 7}]]]],
outer_axis=0, inner_axis=3,
expected=[{"x": 1}, {"x": 2}, {"x": 3}, {"x": 4},
{"x": 5}, {"x": 6}, {"x": 7}]),
dict(
testcase_name="4D_1_2",
st=[[[[{"x": 1}, {"x": 2}], [{"x": 3}]], [[{"x": 4}]]],
[[[{"x": 5}]], [[{"x": 6}], [{"x": 7}]]]],
outer_axis=1, inner_axis=2,
expected=[[[{"x": 1}, {"x": 2}], [{"x": 3}], [{"x": 4}]],
[[{"x": 5}], [{"x": 6}], [{"x": 7}]]]),
dict(
testcase_name="4D_1_3",
st=[[[[{"x": 1}, {"x": 2}], [{"x": 3}]], [[{"x": 4}]]],
[[[{"x": 5}]], [[{"x": 6}], [{"x": 7}]]]],
outer_axis=1, inner_axis=3,
expected=[[{"x": 1}, {"x": 2}, {"x": 3}, {"x": 4}],
[{"x": 5}, {"x": 6}, {"x": 7}]]),
dict(
testcase_name="4D_2_3",
st=[[[[{"x": 1}, {"x": 2}], [{"x": 3}]], [[{"x": 4}]]],
[[[{"x": 5}]], [[{"x": 6}], [{"x": 7}]]]],
outer_axis=2, inner_axis=3,
expected=[[[{"x": 1}, {"x": 2}, {"x": 3}], [{"x": 4}]],
[[{"x": 5}], [{"x": 6}, {"x": 7}]]]),
]) # pyformat: disable
def testMergeDims(self, st, outer_axis, inner_axis, expected):
st = StructuredTensor.from_pyval(st)
result = st.merge_dims(outer_axis, inner_axis)
self.assertAllEqual(result, expected)
def testMergeDimsDetail_3D_0_1(self):
st = StructuredTensor.from_pyval([[[{
"x": 1
}, {
"x": 2
}], [{
"x": 3
}]], [[{
"x": 4
}]]])
result = st.merge_dims(0, 1)
expected_shape = tensor_shape.TensorShape([3, None])
self.assertTrue(expected_shape.is_compatible_with(result.shape))
def testMergeDims_0_1(self):
rt = ragged_tensor.RaggedTensor.from_value_rowids(
array_ops.constant([[1, 2], [3, 4], [5, 6]]), [0, 0, 1])
struct = StructuredTensor.from_fields({"r": rt}, [2])
struct_2 = struct.partition_outer_dimension(
row_partition.RowPartition.from_row_splits([0, 1, 2]))
struct_3 = struct_2.partition_outer_dimension(
row_partition.RowPartition.from_row_splits([0, 1, 2]))
self.assertLen(struct_3.row_partitions, 2)
merged = struct_3.merge_dims(0, 1)
self.assertLen(merged.row_partitions, 1)
def testMergeDimsError(self):
st = StructuredTensor.from_pyval([[[{"a": 5}]]])
with self.assertRaisesRegex(
ValueError, r"Expected outer_axis \(2\) to be less than "
r"or equal to inner_axis \(1\)"):
st.merge_dims(2, 1)
def testTupleFieldValue(self):
st = StructuredTensor.from_pyval({"a": 5, "b": {"c": [1, 2, 3]}})
self.assertAllEqual(st.field_value(("a",)), 5)
self.assertAllEqual(st.field_value(("b", "c")), [1, 2, 3])
expected = r"Field path \(.*a.*,.*b.*\) not found in .*"
with self.assertRaisesRegex(KeyError, expected):
st.field_value(("a", "b"))
@parameterized.named_parameters([
dict(
testcase_name="scalar_scalar_scalar",
st={"b": {"a": 5}},
source_path=("b", "a"),
new_field_name="new_field",
expected={"b": {"a": 5}, "new_field": 5},),
dict(
testcase_name="scalar_scalar_repeated",
st={"b": {"a": [5, 3]}},
source_path=("b", "a"),
new_field_name="new_field",
expected={"b": {"a": [5, 3]}, "new_field": [5, 3]}),
dict(
testcase_name="scalar_scalar_repeated2",
st={"b": {"a": [[7], [5, 3]]}},
source_path=("b", "a"),
new_field_name="new_field",
expected={"b": {"a": [[7], [5, 3]]}, "new_field": [[7], [5, 3]]}),
dict(
testcase_name="repeated_scalar_repeated",
st=[{"b": {"a": [7]}},
{"b": {"a": [5, 3]}}],
source_path=("b", "a"),
new_field_name="new_field",
expected=[{"b": {"a": [7]}, "new_field": [7]},
{"b": {"a": [5, 3]}, "new_field": [5, 3]}]),
dict(
testcase_name="repeated_scalar_repeated2",
st=[{"b": {"a": [[5, 7], []]}},
{"b": {"a": [[5, 1], [3]]}}],
source_path=("b", "a"),
new_field_name="new_field",
expected=[{"b": {"a": [[5, 7], []]},
"new_field": [[5, 7], []]},
{"b": {"a": [[5, 1], [3]]},
"new_field": [[5, 1], [3]]}]),
dict(
testcase_name="scalar_scalar_scalar_scalar",
st={"a": {"b": {"c": 7}}},
source_path=("a", "b", "c"),
new_field_name="new_field",
expected={"a": {"b": {"c": 7}, "new_field": 7}}),
dict(
testcase_name="repeated_scalar_scalar_scalar",
st=[{"a": {"b": {"c": 7}}},
{"a": {"b": {"c": 5}}}],
source_path=("a", "b", "c"),
new_field_name="new_field",
expected=[{"a": {"b": {"c": 7}, "new_field": 7}},
{"a": {"b": {"c": 5}, "new_field": 5}}],),
dict(
testcase_name="repeated_repeated_scalar_scalar",
st=[{"a": [{"b": {"c": 7}}, {"b": {"c": 3}}]},
{"a": [{"b": {"c": 5}}]}],
source_path=("a", "b", "c"),
new_field_name="new_field",
expected=[{"a": [{"b": {"c": 7}, "new_field": 7},
{"b": {"c": 3}, "new_field": 3}]},
{"a": [{"b": {"c": 5}, "new_field": 5}]}]),
dict(
testcase_name="docs_tokens",
st=[{"docs": [{"tokens": [7, 17]}, {"tokens": [3, 13]}]},
{"docs": [{"tokens": [5, 15]}]}],
source_path=("docs", "tokens"),
new_field_name="docs_tokens",
expected=[{"docs": [{"tokens": [7, 17]}, {"tokens": [3, 13]}],
"docs_tokens": [7, 17, 3, 13]},
{"docs": [{"tokens": [5, 15]}],
"docs_tokens": [5, 15]}],
),
dict(
testcase_name="repeated_repeated_scalar_repeated",
st=[{"a": [{"b": {"c": [7, 17]}}, {"b": {"c": [3, 13]}}]},
{"a": [{"b": {"c": [5, 15]}}]}],
source_path=("a", "b", "c"),
new_field_name="new_field",
expected=[{"a": [{"b": {"c": [7, 17]}, "new_field": [7, 17]},
{"b": {"c": [3, 13]}, "new_field": [3, 13]}]},
{"a": [{"b": {"c": [5, 15]}, "new_field": [5, 15]}]}]),
dict(
testcase_name="scalar_scalar_scalar_repeated",
st={"a": {"b": {"c": [7, 3, 5]}}},
source_path=("a", "b", "c"),
new_field_name="new_field",
expected={"a": {"b": {"c": [7, 3, 5]}, "new_field": [7, 3, 5]}}),
dict(
testcase_name="repeated_repeated_scalar_repeated2",
st=[{"a": [{"b": {"c": [[7, 3], [17]]}}, {"b": {"c": [[3, 13]]}}]},
{"a": [{"b": {"c": [[5, 15]]}}]}],
source_path=("a", "b", "c"),
new_field_name="new_field",
expected=[{"a": [{"b": {"c": [[7, 3], [17]]},
"new_field": [[7, 3], [17]]},
{"b": {"c": [[3, 13]]},
"new_field": [[3, 13]]}]},
{"a": [{"b": {"c": [[5, 15]]},
"new_field": [[5, 15]]}]}]),
dict(testcase_name="example_4_promote_of_labeled_vector",
st=[{"user_info": [{"gaia_id": {"vec": [0, 1, 2]}}]},
{"user_info": [{"gaia_id": {"vec": [3, 4, 5]}}]}],
source_path=("user_info", "gaia_id"),
new_field_name="user_info_gaia_id",
expected=[{"user_info": [{"gaia_id": {"vec": [0, 1, 2]}}],
"user_info_gaia_id": [{"vec": [0, 1, 2]}]},
{"user_info": [{"gaia_id": {"vec": [3, 4, 5]}}],
"user_info_gaia_id": [{"vec": [3, 4, 5]}]}]),
dict(
testcase_name="promote_structure",
st=[{"a": [{"aa": [{"b": {"c": 1}}, {"b": {"c": 8}}]}],},
{"a": [{"aa": [{"b": {"c": 12}}]}],}],
source_path=("a", "aa", "b"),
new_field_name="new_field",
expected=[{"a": [{"aa": [{"b": {"c": 1}}, {"b": {"c": 8}}],
"new_field": [{"c": 1}, {"c": 8}]}]},
{"a": [{"aa": [{"b": {"c": 12}}],
"new_field": [{"c": 12}]}]}])]) # pyformat: disable
def testPromote(self, st, source_path, new_field_name, expected):
st2 = StructuredTensor.from_pyval(st)
expected2 = StructuredTensor.from_pyval(expected)
result = st2.promote(source_path, new_field_name)
self.assertAllEqual(result, expected2)
def testPromoteDense(self):
st = StructuredTensor.from_fields(
{
"a":
StructuredTensor.from_fields(
{"b": [[[1, 11], [2, 12]], [[3, 13], [4, 14]]]},
shape=[2, 2, 2])
},
shape=[2])
result = st.promote(("a", "b"), "new_field")
self.assertEqual(st.rank, 1)
self.assertEqual(st.field_value("a").rank, 3)
self.assertAllEqual(
result.field_value("new_field"), [[1, 11, 2, 12], [3, 13, 4, 14]])
def testMergeDimsGeneric(self):
"""This is an example of a dense tensor being merged, when outer=rank.
Note that outer=rank is equivalent to outer=rank - 1. And yet, from the
perspective of promote, it is nice to be able to have this functionality
directly available, because sometimes the rank of the parent equals the
rank of the child.
Finally, note that merge_dims for Ragged and StructuredTensor would not
accept this as a valid argument.
Note: _merge_dims_generic is private, but these unit tests help to
discuss the proper API definition.
"""
t = array_ops.constant([[[1, 11], [2, 12]], [[3, 13], [4, 14]]])
t2 = structured_tensor._merge_dims_generic(t, 1, 3)
self.assertAllEqual(t2, [[1, 11, 2, 12], [3, 13, 4, 14]])
def testMergeDimsGenericNoop(self):
"""This is an example of a dense tensor being merged, when outer=inner.
Sometimes, when promoting, the parent and grandparent ranks are equal.
Finally, note that merge_dims for Ragged and StructuredTensor would not
accept this as a valid argument. This should be aligned.
"""
t = array_ops.constant([[[1, 11], [2, 12]], [[3, 13], [4, 14]]])
t2 = structured_tensor._merge_dims_generic(t, 2, 2)
self.assertAllEqual(t2, [[[1, 11], [2, 12]], [[3, 13], [4, 14]]])
def testRepr(self):
st = StructuredTensor.from_pyval({"a": 5, "b": {"c": [1, 2, 3]}})
if context.executing_eagerly():
expected = textwrap.dedent("""
<StructuredTensor(
fields={
"a": tf.Tensor(5, shape=(), dtype=int32),
"b": <StructuredTensor(
fields={
"c": tf.Tensor([1 2 3], shape=(3,), dtype=int32)},
shape=())>},
shape=())>""")[1:]
else:
expected = textwrap.dedent("""
<StructuredTensor(
fields={
"a": Tensor("Const:0", shape=(), dtype=int32),
"b": <StructuredTensor(
fields={
"c": Tensor("RaggedConstant/Const:0", shape=(3,), dtype=int32)},
shape=())>},
shape=())>""")[1:]
self.assertEqual(repr(st), expected)
def testPartitionOuterDimension2DDenseField(self):
struct = structured_tensor.StructuredTensor.from_fields(
fields={"r": array_ops.constant([[1, 2], [3, 4]])}, shape=[2])
result = struct.partition_outer_dimension(
row_partition.RowPartition.from_uniform_row_length(2, 2))
r = result.field_value("r")
self.assertAllEqual(r, [[[1, 2], [3, 4]]])
@parameterized.parameters([
# Simple example.
(
{"a": 12, "b": 23},
{"a": 7},
),
# New field.
(
{"a": 12},
{("b",): 13},
),
# Nested example.
(
{"a": 12, "b": {"c": 23}},
{("b", "c"): 7},
),
# Multiple updates.
(
{"a": 12, "b": {"c": 23}},
{"a": 3, ("b", "c"): 7},
),
# Deep updates.
(
{"a": 12, "b": {"c": 23, "d": {"e": 11}}},
{("b", "c"): 7, ("b", "d", "e"): 13},
),
# Multiple updates to the same substructure.
(
{"a": 12, "b": {"c": 23, "d": {"e": 11}}},
{("b", "c"): 7, ("b", "f"): 13},
),
# Scalar to non-scalar elements. Shape remains unchanged.
(
{"a": 5},
{"a": ragged_factory_ops.constant_value([[51, 52], [61, 62, 63]])},
),
# Non-scalar element to scalar.
(
{"c": {"a": [5, 3], "b": 2}},
{("c", "a"): 5},
),
# Rank-1 StructuredTensor: shape is preserved and an item is added.
(
[{"a": 5}, {"a": 6}],
{"a": [15, 16], "b": np.array([0.9, 1.1])},
),
# Non-scalar ragged elements, within a rank-2 StructuredTensor: elements
# rows (inner dimensions) are changed, but StructuredTensor shape
# (outer dimensions) are preserved.
(
[[{"a": [5]}], [{"a": [3, 4]}, {"a": [8]}]],
{"a": ragged_factory_ops.constant_value([[[50, 60]], [[30], []]])},
),
]) # pyformat: disable
def testWithUpdatesValues(self, pyval, updates):
st = StructuredTensor.from_pyval(pyval)
updated_st = st.with_updates(updates, validate=False)
for key, value in updates.items():
got = updated_st.field_value(key)
self.assertAllEqual(
value, got,
"Update failed: key={}, value={}, got={}".format(key, value, got))
def testWithUpdatesFunctions(self):
pyval = {"a": 12, "b": {"c": 23, "d": {"e": 11}}}
st = StructuredTensor.from_pyval(pyval)
st_updated = st.with_updates(
{
"a": lambda x: x + 1,
("b", "d", "e"): lambda x: x + 7
}, validate=True)
# Updated values.
self.assertAllEqual(st_updated.field_value("a"), 13)
self.assertAllEqual(st_updated.field_value(("b", "d", "e")), 18)
# Unchanged value.
self.assertAllEqual(st_updated.field_value(("b", "c")), 23)
def test_from_pyval_list_of_empty(self):
"""See b/183245576."""
st = structured_tensor.StructuredTensor.from_pyval([{}])
self.assertAllEqual([1], st.shape.as_list())
def test_from_pyval_list_of_empty_three(self):
"""See b/183245576."""
st = structured_tensor.StructuredTensor.from_pyval([{}, {}, {}])
self.assertAllEqual([3], st.shape.as_list())
self.assertEmpty(st.field_names())
def test_from_pyval_deep_list_of_empty(self):
"""See b/183245576."""
st = structured_tensor.StructuredTensor.from_pyval([[{
"a": {},
"b": [3, 4]
}, {
"a": {},
"b": [5]
}], [{
"a": {},
"b": [7, 8, 9]
}]])
self.assertAllEqual(2, st.rank)
self.assertEqual(2, st.shape[0])
self.assertEmpty(st.field_value("a").field_names())
def testWithUpdatesChecks(self):
pyval = {"a": 12, "b": {"c": 23, "d": {"e": 11}}}
st = StructuredTensor.from_pyval(pyval)
# Try to set non-existent sub-structure.
with self.assertRaisesRegex(
ValueError, r"cannot create new sub-field.*\('b', 'x'\).*is not set"):
st.with_updates({("b", "x", "e"): 5})
# Try to set with path to a non-sub-structure.
with self.assertRaisesRegex(
ValueError, r"cannot create new sub-field.*\('b', 'c'\).*is not a "
r"`StructuredTensor`"):
st.with_updates({("b", "c", "e"): 5})
# Try to apply function to non-existing value.
with self.assertRaisesRegex(
ValueError, r"cannot update.*\('b', 'd', 'x'\).*does not already "
r"exist"):
st.with_updates({("b", "d", "x"): lambda x: x + 1})
# Empty names not allowed.
with self.assertRaisesRegex(ValueError, r"does not allow empty names"):
st.with_updates({(): lambda x: x + 1})
with self.assertRaisesRegex(ValueError, r"does not allow empty names"):
st.with_updates({("b", ""): lambda x: x + 1})
# Parent and child nodes cannot be updated simultaneously.
with self.assertRaisesRegex(
ValueError, r"does not allow both parent and child nodes.*"
r"parent=\('b'.*child=\('b', 'd'"):
st.with_updates({("b", "d"): lambda x: x + 1, "a": 3, "b": 10})
# Invalid shape change.
with self.assertRaisesRegex(
ValueError,
r"`StructuredTensor.with_updates` failed for field \('c',\)"):
st_with_shape = StructuredTensor.from_pyval([[{
"c": {
"a": 5,
"b": 2
}
}], [{
"c": {
"a": 3,
"b": 1
}
}, {
"c": {
"a": 8,
"b": 18
}
}]])
st_with_shape.with_updates({("c", "a"): 3})
def testWithUpdatesDelete(self):
pyval = {"a": 12, "b": {"c": 23, "d": {"e": 11}}}
st = StructuredTensor.from_pyval(pyval)
updated_st = st.with_updates({("b", "c"): None}, validate=True)
self.assertNotIn("c", updated_st.field_value("b").field_names())
with self.assertRaisesRegex(ValueError,
r"cannot delete.*\('b', 'x'\).*not present"):
st.with_updates({("b", "x"): None}, validate=True)
with self.assertRaisesRegex(ValueError,
r"cannot delete.*\'x'.*not present"):
st.with_updates({"x": None}, validate=False)
# Test that nrows() and rowpartitions() is preserved after removal.
pyval = [[{"a": 1}, {"a": 2}], [{"a": 3}]]
st = StructuredTensor.from_pyval(pyval)
self.assertLen(st.row_partitions, 1)
self.assertAllEqual(st.nrows(), 2)
self.assertAllEqual(st.row_partitions[0].row_lengths(), [2, 1])
updated_st = st.with_updates({("a",): None}, validate=True)
self.assertLen(updated_st.row_partitions, 1)
self.assertAllEqual(updated_st.nrows(), 2)
self.assertAllEqual(updated_st.row_partitions[0].row_lengths(), [2, 1])
# Test that it works also for rank-1 and rank-0 empty results.
pyval = [{"a": 1}, {"a": 2}]
st = StructuredTensor.from_pyval(pyval)
self.assertEqual(st.rank, 1)
updated_st = st.with_updates({("a",): None}, validate=True)
self.assertEqual(updated_st.rank, 1)
# assertEqual won't work because nrows() returns a tensor, and
# assertEqual doesn't do the magic to convert them to numbers in a
# way that works in eager/non-eager mode.
self.assertAllEqual(updated_st.nrows(), 2)
pyval = {"a": [0, 1]}
st = StructuredTensor.from_pyval(pyval)
self.assertEqual(st.rank, 0)
updated_st = st.with_updates({("a",): None}, validate=True)
self.assertEqual(updated_st.rank, 0)
self.assertFalse(updated_st.row_partitions)
self.assertIsNone(updated_st.nrows())
def test_from_pyval_deep_row_partitions(self):
"""See b/179195750."""
st = structured_tensor.StructuredTensor.from_pyval([{
"foo": [{
"bar": [{
"baz": [b"FW"]
}]
}]
}])
st2 = st.field_value(("foo", "bar"))
self.assertLen(st2.row_partitions, st2.rank - 1)
def test_from_fields_deep_row_partitions(self):
"""Test a field with its own row_partition. See b/179195750."""
st = structured_tensor.StructuredTensor.from_pyval([[[{"baz": [b"FW"]}]]])
self.assertLen(st.row_partitions, st.rank - 1)
st2 = structured_tensor.StructuredTensor.from_fields(
fields={"bar": st}, shape=(None, None), validate=False)
st3 = st2.field_value("bar")
self.assertLen(st3.row_partitions, st3.rank - 1)
def test_structured_tensor_spec_shape_property(self):
spec = StructuredTensor.Spec._from_shape(
DynamicRaggedShape.Spec(
row_partitions=[], static_inner_shape=[1, 2], dtype=dtypes.int64))
self.assertEqual(spec.shape.as_list(), [1, 2])
spec = StructuredTensor.Spec._from_shape(
DynamicRaggedShape.Spec(
row_partitions=[], static_inner_shape=[None], dtype=dtypes.int64))
self.assertEqual(spec.shape.as_list(), [None])
def test_dynamic_ragged_shape_init_vector(self):
x = constant_op.constant([1, 2, 3, 4])
y = constant_op.constant([[1, 2], [3, 4], [5, 6], [7, 8]])
fields = {"x": x, "y": y}
nrows = constant_op.constant(4)
shape = tensor_shape.TensorShape((4,))
row_partitions = ()
rs = structured_tensor_dynamic._dynamic_ragged_shape_init(
fields, shape, nrows, row_partitions)
self.assertEqual(
repr(rs._to_tensor_shape()), repr(tensor_shape.TensorShape((4,))))
def test_dynamic_ragged_shape_init_scalar(self):
x = constant_op.constant([1, 2, 3, 4])
y = constant_op.constant([[1, 2], [3, 4], [5, 6], [7, 8]])
fields = {"x": x, "y": y}
nrows = None
shape = tensor_shape.TensorShape(())
row_partitions = ()
rs = structured_tensor_dynamic._dynamic_ragged_shape_init(
fields, shape, nrows, row_partitions)
self.assertEqual(
repr(rs._to_tensor_shape()), repr(tensor_shape.TensorShape(())))
def test_dynamic_ragged_shape_init_ragged(self):
x = ragged_factory_ops.constant_value([[1, 2, 3], [4]])
fields = {"x": x}
nrows = constant_op.constant(2, dtype=dtypes.int64)
shape = tensor_shape.TensorShape([2, None])
row_partitions = tuple(x._nested_row_partitions)
rs = structured_tensor_dynamic._dynamic_ragged_shape_init(
fields, shape, nrows, row_partitions)
self.assertEqual(
repr(rs._to_tensor_shape()), repr(tensor_shape.TensorShape((2, None))))
if __name__ == "__main__":
googletest.main()
|
StructuredTensorTest
|
python
|
ansible__ansible
|
lib/ansible/_internal/_ssh/_ssh_agent.py
|
{
"start": 5618,
"end": 6891
}
|
class ____(str, VariableSized, enum.Enum):
RSA = "ssh-rsa"
DSA = "ssh-dss"
ECDSA256 = "ecdsa-sha2-nistp256"
SKECDSA256 = "sk-ecdsa-sha2-nistp256@openssh.com"
ECDSA384 = "ecdsa-sha2-nistp384"
ECDSA521 = "ecdsa-sha2-nistp521"
ED25519 = "ssh-ed25519"
SKED25519 = "sk-ssh-ed25519@openssh.com"
RSASHA256 = "rsa-sha2-256"
RSASHA512 = "rsa-sha2-512"
@property
def main_type(self) -> str:
match self:
case self.RSA:
return 'RSA'
case self.DSA:
return 'DSA'
case self.ECDSA256 | self.ECDSA384 | self.ECDSA521:
return 'ECDSA'
case self.ED25519:
return 'ED25519'
case _:
raise NotImplementedError(self.name)
def to_blob(self) -> bytes:
b_self = self.encode('utf-8')
return uint32(len(b_self)).to_blob() + b_self
@classmethod
def from_blob(cls, blob: memoryview | bytes) -> t.Self:
return cls(bytes(blob).decode('utf-8'))
if HAS_CRYPTOGRAPHY:
_ECDSA_KEY_TYPE: dict[KeyAlgo, type[EllipticCurve]] = {
KeyAlgo.ECDSA256: SECP256R1,
KeyAlgo.ECDSA384: SECP384R1,
KeyAlgo.ECDSA521: SECP521R1,
}
@dataclasses.dataclass
|
KeyAlgo
|
python
|
jina-ai__jina
|
tests/integration/instrumentation/__init__.py
|
{
"start": 1557,
"end": 2569
}
|
class ____(Executor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.meter:
self.request_counter = self.meter.create_counter('request_counter')
else:
self.request_counter = None
@requests(on='/search')
def empty(self, docs: DocumentArray, tracing_context: Optional[Context], **kwargs):
if self.request_counter:
self.request_counter.add(1)
if self.tracer:
with self.tracer.start_span('dummy', context=tracing_context) as span:
span.set_attribute('len_docs', len(docs))
return docs
else:
return docs
def get_services(jaeger_port):
response = http_requests.get(f'http://localhost:{jaeger_port}/api/services')
response.raise_for_status()
response_json = response.json()
services = response_json.get('data', []) or []
return [service for service in services if service != 'jaeger-query']
|
ExecutorTestWithTracing
|
python
|
scrapy__scrapy
|
tests/test_dependencies.py
|
{
"start": 147,
"end": 962
}
|
class ____:
def test_pinned_twisted_version(self):
"""When running tests within a Tox environment with pinned
dependencies, make sure that the version of Twisted is the pinned
version.
See https://github.com/scrapy/scrapy/pull/4814#issuecomment-706230011
"""
if not os.environ.get("_SCRAPY_PINNED", None):
pytest.skip("Not in a pinned environment")
tox_config_file_path = Path(__file__).parent / ".." / "tox.ini"
config_parser = ConfigParser()
config_parser.read(tox_config_file_path)
pattern = r"Twisted==([\d.]+)"
match = re.search(pattern, config_parser["pinned"]["deps"])
pinned_twisted_version_string = match[1]
assert twisted_version.short() == pinned_twisted_version_string
|
TestScrapyUtils
|
python
|
ansible__ansible
|
lib/ansible/_internal/_ssh/_ssh_agent.py
|
{
"start": 3429,
"end": 3832
}
|
class ____:
@classmethod
def from_blob(cls, blob: memoryview | bytes) -> t.Self:
raise NotImplementedError
@classmethod
def consume_from_blob(cls, blob: memoryview | bytes) -> tuple[t.Self, memoryview | bytes]:
length = uint32.from_blob(blob[:4])
blob = blob[4:]
data, rest = _split_blob(blob, length)
return cls.from_blob(data), rest
|
VariableSized
|
python
|
getsentry__sentry
|
src/sentry/types/condition_activity.py
|
{
"start": 491,
"end": 820
}
|
class ____:
group_id: int
type: ConditionActivityType
timestamp: datetime
data: dict[str, Any] = field(default_factory=dict)
def round_to_five_minute(time: datetime) -> datetime:
return time - timedelta(
minutes=time.minute % 5, seconds=time.second, microseconds=time.microsecond
)
|
ConditionActivity
|
python
|
django__django
|
tests/check_framework/test_urls.py
|
{
"start": 376,
"end": 8088
}
|
class ____(SimpleTestCase):
@override_settings(ROOT_URLCONF="check_framework.urls.no_warnings")
def test_no_warnings(self):
result = check_url_config(None)
self.assertEqual(result, [])
@override_settings(ROOT_URLCONF="check_framework.urls.no_warnings_i18n")
def test_no_warnings_i18n(self):
self.assertEqual(check_url_config(None), [])
@override_settings(ROOT_URLCONF="check_framework.urls.warning_in_include")
def test_check_resolver_recursive(self):
# The resolver is checked recursively (examining URL patterns in
# include()).
result = check_url_config(None)
self.assertEqual(len(result), 1)
warning = result[0]
self.assertEqual(warning.id, "urls.W001")
@override_settings(ROOT_URLCONF="check_framework.urls.include_with_dollar")
def test_include_with_dollar(self):
result = check_url_config(None)
self.assertEqual(len(result), 1)
warning = result[0]
self.assertEqual(warning.id, "urls.W001")
self.assertEqual(
warning.msg,
(
"Your URL pattern '^include-with-dollar$' uses include with a "
"route ending with a '$'. Remove the dollar from the route to "
"avoid problems including URLs."
),
)
@override_settings(ROOT_URLCONF="check_framework.urls.contains_tuple")
def test_contains_tuple_not_url_instance(self):
result = check_url_config(None)
warning = result[0]
self.assertEqual(warning.id, "urls.E004")
self.assertRegex(
warning.msg,
(
r"^Your URL pattern \('\^tuple/\$', <function <lambda> at 0x(\w+)>\) "
r"is invalid. Ensure that urlpatterns is a list of path\(\) and/or "
r"re_path\(\) instances\.$"
),
)
@override_settings(ROOT_URLCONF="check_framework.urls.include_contains_tuple")
def test_contains_included_tuple(self):
result = check_url_config(None)
warning = result[0]
self.assertEqual(warning.id, "urls.E004")
self.assertRegex(
warning.msg,
(
r"^Your URL pattern \('\^tuple/\$', <function <lambda> at 0x(\w+)>\) "
r"is invalid. Ensure that urlpatterns is a list of path\(\) and/or "
r"re_path\(\) instances\.$"
),
)
@override_settings(ROOT_URLCONF="check_framework.urls.beginning_with_slash")
def test_beginning_with_slash(self):
msg = (
"Your URL pattern '%s' has a route beginning with a '/'. Remove "
"this slash as it is unnecessary. If this pattern is targeted in "
"an include(), ensure the include() pattern has a trailing '/'."
)
warning1, warning2 = check_url_config(None)
self.assertEqual(warning1.id, "urls.W002")
self.assertEqual(warning1.msg, msg % "/path-starting-with-slash/")
self.assertEqual(warning2.id, "urls.W002")
self.assertEqual(warning2.msg, msg % "/url-starting-with-slash/$")
@override_settings(
ROOT_URLCONF="check_framework.urls.beginning_with_slash",
APPEND_SLASH=False,
)
def test_beginning_with_slash_append_slash(self):
# It can be useful to start a URL pattern with a slash when
# APPEND_SLASH=False (#27238).
result = check_url_config(None)
self.assertEqual(result, [])
@override_settings(ROOT_URLCONF="check_framework.urls.name_with_colon")
def test_name_with_colon(self):
result = check_url_config(None)
self.assertEqual(len(result), 1)
warning = result[0]
self.assertEqual(warning.id, "urls.W003")
expected_msg = (
"Your URL pattern '^$' [name='name_with:colon'] has a name including a ':'."
)
self.assertIn(expected_msg, warning.msg)
@override_settings(ROOT_URLCONF=None)
def test_no_root_urlconf_in_settings(self):
delattr(settings, "ROOT_URLCONF")
result = check_url_config(None)
self.assertEqual(result, [])
def test_get_warning_for_invalid_pattern_string(self):
warning = get_warning_for_invalid_pattern("")[0]
self.assertEqual(
warning.hint,
"Try removing the string ''. The list of urlpatterns should "
"not have a prefix string as the first element.",
)
def test_get_warning_for_invalid_pattern_tuple(self):
warning = get_warning_for_invalid_pattern((r"^$", lambda x: x))[0]
self.assertEqual(warning.hint, "Try using path() instead of a tuple.")
def test_get_warning_for_invalid_pattern_other(self):
warning = get_warning_for_invalid_pattern(object())[0]
self.assertIsNone(warning.hint)
@override_settings(ROOT_URLCONF="check_framework.urls.non_unique_namespaces")
def test_check_non_unique_namespaces(self):
result = check_url_namespaces_unique(None)
self.assertEqual(len(result), 2)
non_unique_namespaces = ["app-ns1", "app-1"]
warning_messages = [
"URL namespace '{}' isn't unique. You may not be able to reverse "
"all URLs in this namespace".format(namespace)
for namespace in non_unique_namespaces
]
for warning in result:
self.assertIsInstance(warning, Warning)
self.assertEqual("urls.W005", warning.id)
self.assertIn(warning.msg, warning_messages)
@override_settings(ROOT_URLCONF="check_framework.urls.unique_namespaces")
def test_check_unique_namespaces(self):
result = check_url_namespaces_unique(None)
self.assertEqual(result, [])
@override_settings(ROOT_URLCONF="check_framework.urls.cbv_as_view")
def test_check_view_not_class(self):
self.assertEqual(
check_url_config(None),
[
Error(
"Your URL pattern 'missing_as_view' has an invalid view, pass "
"EmptyCBV.as_view() instead of EmptyCBV.",
id="urls.E009",
),
],
)
@override_settings(
ROOT_URLCONF="check_framework.urls.path_compatibility.matched_angle_brackets"
)
def test_no_warnings_matched_angle_brackets(self):
self.assertEqual(check_url_config(None), [])
@override_settings(
ROOT_URLCONF="check_framework.urls.path_compatibility.unmatched_angle_brackets"
)
def test_warning_unmatched_angle_brackets(self):
self.assertEqual(
check_url_config(None),
[
Warning(
"Your URL pattern 'beginning-with/<angle_bracket' has an unmatched "
"'<' bracket.",
id="urls.W010",
),
Warning(
"Your URL pattern 'ending-with/angle_bracket>' has an unmatched "
"'>' bracket.",
id="urls.W010",
),
Warning(
"Your URL pattern 'closed_angle>/x/<opened_angle' has an unmatched "
"'>' bracket.",
id="urls.W010",
),
Warning(
"Your URL pattern 'closed_angle>/x/<opened_angle' has an unmatched "
"'<' bracket.",
id="urls.W010",
),
Warning(
"Your URL pattern '<mixed>angle_bracket>' has an unmatched '>' "
"bracket.",
id="urls.W010",
),
],
)
|
CheckUrlConfigTests
|
python
|
apache__avro
|
lang/py/avro/protocol.py
|
{
"start": 1358,
"end": 1510
}
|
class ____(TypedDict, total=False):
protocol: str
namespace: str
types: Sequence[str]
messages: Mapping[str, MessageObject]
|
ProtocolObject
|
python
|
pandas-dev__pandas
|
pandas/errors/__init__.py
|
{
"start": 31592,
"end": 32046
}
|
class ____(Warning):
"""
Warning raised by to_stata the column contains a non-valid stata name.
Because the column name is an invalid Stata variable, the name needs to be
converted.
See Also
--------
DataFrame.to_stata : Export DataFrame object to Stata dta format.
Examples
--------
>>> df = pd.DataFrame({"0categories": pd.Series([2, 2])})
>>> df.to_stata("test") # doctest: +SKIP
"""
|
InvalidColumnName
|
python
|
apache__airflow
|
airflow-core/tests/unit/ti_deps/deps/fake_models.py
|
{
"start": 1183,
"end": 1342
}
|
class ____:
def __init__(self, **kwds):
self.__dict__.update(kwds)
def get_running_dagruns(self, _):
return self.running_dagruns
|
FakeDag
|
python
|
langchain-ai__langchain
|
libs/langchain/langchain_classic/chains/conversational_retrieval/base.py
|
{
"start": 9672,
"end": 18609
}
|
class ____(BaseConversationalRetrievalChain):
r"""Chain for having a conversation based on retrieved documents.
This class is deprecated. See below for an example implementation using
`create_retrieval_chain`. Additional walkthroughs can be found at
https://python.langchain.com/docs/use_cases/question_answering/chat_history
```python
from langchain_classic.chains import (
create_history_aware_retriever,
create_retrieval_chain,
)
from langchain_classic.chains.combine_documents import (
create_stuff_documents_chain,
)
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_openai import ChatOpenAI
retriever = ... # Your retriever
model = ChatOpenAI()
# Contextualize question
contextualize_q_system_prompt = (
"Given a chat history and the latest user question "
"which might reference context in the chat history, "
"formulate a standalone question which can be understood "
"without the chat history. Do NOT answer the question, just "
"reformulate it if needed and otherwise return it as is."
)
contextualize_q_prompt = ChatPromptTemplate.from_messages(
[
("system", contextualize_q_system_prompt),
MessagesPlaceholder("chat_history"),
("human", "{input}"),
]
)
history_aware_retriever = create_history_aware_retriever(
model, retriever, contextualize_q_prompt
)
# Answer question
qa_system_prompt = (
"You are an assistant for question-answering tasks. Use "
"the following pieces of retrieved context to answer the "
"question. If you don't know the answer, just say that you "
"don't know. Use three sentences maximum and keep the answer "
"concise."
"\n\n"
"{context}"
)
qa_prompt = ChatPromptTemplate.from_messages(
[
("system", qa_system_prompt),
MessagesPlaceholder("chat_history"),
("human", "{input}"),
]
)
# Below we use create_stuff_documents_chain to feed all retrieved context
# into the LLM. Note that we can also use StuffDocumentsChain and other
# instances of BaseCombineDocumentsChain.
question_answer_chain = create_stuff_documents_chain(model, qa_prompt)
rag_chain = create_retrieval_chain(history_aware_retriever, question_answer_chain)
# Usage:
chat_history = [] # Collect chat history here (a sequence of messages)
rag_chain.invoke({"input": query, "chat_history": chat_history})
```
This chain takes in chat history (a list of messages) and new questions,
and then returns an answer to that question.
The algorithm for this chain consists of three parts:
1. Use the chat history and the new question to create a "standalone question".
This is done so that this question can be passed into the retrieval step to
fetch relevant documents. If only the new question was passed in, then relevant
context may be lacking. If the whole conversation was passed into retrieval,
there may be unnecessary information there that would distract from retrieval.
2. This new question is passed to the retriever and relevant documents are
returned.
3. The retrieved documents are passed to an LLM along with either the new question
(default behavior) or the original question and chat history to generate a final
response.
Example:
```python
from langchain_classic.chains import (
StuffDocumentsChain,
LLMChain,
ConversationalRetrievalChain,
)
from langchain_core.prompts import PromptTemplate
from langchain_openai import OpenAI
combine_docs_chain = StuffDocumentsChain(...)
vectorstore = ...
retriever = vectorstore.as_retriever()
# This controls how the standalone question is generated.
# Should take `chat_history` and `question` as input variables.
template = (
"Combine the chat history and follow up question into "
"a standalone question. Chat History: {chat_history}"
"Follow up question: {question}"
)
prompt = PromptTemplate.from_template(template)
model = OpenAI()
question_generator_chain = LLMChain(llm=model, prompt=prompt)
chain = ConversationalRetrievalChain(
combine_docs_chain=combine_docs_chain,
retriever=retriever,
question_generator=question_generator_chain,
)
```
"""
retriever: BaseRetriever
"""Retriever to use to fetch documents."""
max_tokens_limit: int | None = None
"""If set, enforces that the documents returned are less than this limit.
This is only enforced if `combine_docs_chain` is of type StuffDocumentsChain.
"""
def _reduce_tokens_below_limit(self, docs: list[Document]) -> list[Document]:
num_docs = len(docs)
if self.max_tokens_limit and isinstance(
self.combine_docs_chain,
StuffDocumentsChain,
):
tokens = [
self.combine_docs_chain.llm_chain._get_num_tokens(doc.page_content) # noqa: SLF001
for doc in docs
]
token_count = sum(tokens[:num_docs])
while token_count > self.max_tokens_limit:
num_docs -= 1
token_count -= tokens[num_docs]
return docs[:num_docs]
@override
def _get_docs(
self,
question: str,
inputs: dict[str, Any],
*,
run_manager: CallbackManagerForChainRun,
) -> list[Document]:
"""Get docs."""
docs = self.retriever.invoke(
question,
config={"callbacks": run_manager.get_child()},
)
return self._reduce_tokens_below_limit(docs)
@override
async def _aget_docs(
self,
question: str,
inputs: dict[str, Any],
*,
run_manager: AsyncCallbackManagerForChainRun,
) -> list[Document]:
"""Get docs."""
docs = await self.retriever.ainvoke(
question,
config={"callbacks": run_manager.get_child()},
)
return self._reduce_tokens_below_limit(docs)
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
retriever: BaseRetriever,
condense_question_prompt: BasePromptTemplate = CONDENSE_QUESTION_PROMPT,
chain_type: str = "stuff",
verbose: bool = False, # noqa: FBT001,FBT002
condense_question_llm: BaseLanguageModel | None = None,
combine_docs_chain_kwargs: dict | None = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> BaseConversationalRetrievalChain:
"""Convenience method to load chain from LLM and retriever.
This provides some logic to create the `question_generator` chain
as well as the combine_docs_chain.
Args:
llm: The default language model to use at every part of this chain
(eg in both the question generation and the answering)
retriever: The retriever to use to fetch relevant documents from.
condense_question_prompt: The prompt to use to condense the chat history
and new question into a standalone question.
chain_type: The chain type to use to create the combine_docs_chain, will
be sent to `load_qa_chain`.
verbose: Verbosity flag for logging to stdout.
condense_question_llm: The language model to use for condensing the chat
history and new question into a standalone question. If none is
provided, will default to `llm`.
combine_docs_chain_kwargs: Parameters to pass as kwargs to `load_qa_chain`
when constructing the combine_docs_chain.
callbacks: Callbacks to pass to all subchains.
kwargs: Additional parameters to pass when initializing
ConversationalRetrievalChain
"""
combine_docs_chain_kwargs = combine_docs_chain_kwargs or {}
doc_chain = load_qa_chain(
llm,
chain_type=chain_type,
verbose=verbose,
callbacks=callbacks,
**combine_docs_chain_kwargs,
)
_llm = condense_question_llm or llm
condense_question_chain = LLMChain(
llm=_llm,
prompt=condense_question_prompt,
verbose=verbose,
callbacks=callbacks,
)
return cls(
retriever=retriever,
combine_docs_chain=doc_chain,
question_generator=condense_question_chain,
callbacks=callbacks,
**kwargs,
)
|
ConversationalRetrievalChain
|
python
|
PyCQA__pylint
|
tests/pyreverse/functional/class_diagrams/relationships_filtering/all.py
|
{
"start": 148,
"end": 228
}
|
class ____:
def __init__(self):
self._x = P("protected")
|
ProtectedAttr
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/metaclass7.py
|
{
"start": 885,
"end": 1080
}
|
class ____(metaclass=MetaClass3):
def __new__(cls, *args, **kwargs):
raise RuntimeError("You cannot instantiate BaseFactory")
v3 = Class3()
reveal_type(v3, expected_text="Any")
|
Class3
|
python
|
tensorflow__tensorflow
|
tensorflow/python/data/experimental/kernel_tests/service/test_base.py
|
{
"start": 3114,
"end": 5059
}
|
class ____:
"""A tf.data service worker."""
def __init__(
self,
dispatcher_address,
shutdown_quiet_period_ms,
protocol=PROTOCOL,
data_transfer_protocol=None,
port=0,
worker_tags=None,
cross_trainer_cache_size_bytes=None,
snapshot_max_chunk_size_bytes=TEST_SNAPSHOT_MAX_CHUNK_SIZE_BYTES,
):
self._dispatcher_address = dispatcher_address
self._shutdown_quiet_period_ms = shutdown_quiet_period_ms
self._server = _make_worker(
dispatcher_address,
protocol,
data_transfer_protocol,
shutdown_quiet_period_ms,
port=port,
worker_tags=worker_tags,
cross_trainer_cache_size_bytes=cross_trainer_cache_size_bytes,
snapshot_max_chunk_size_bytes=snapshot_max_chunk_size_bytes,
)
self._running = False
self._protocol = protocol
self._data_transfer_protocol = data_transfer_protocol
def stop(self):
self._server._stop()
self._running = False
def start(self):
self._server.start()
self._port = int(self._server._address.split(":")[1])
self._running = True
def restart(self, use_same_port=True):
"""Restarts the worker, stopping it first if it is already running."""
if self._running:
self.stop()
port = 0
if use_same_port:
port = self._port
self._server = _make_worker(self._dispatcher_address,
self._protocol,
self._data_transfer_protocol,
self._shutdown_quiet_period_ms, port)
self._server.start()
self._port = int(self._server._address.split(":")[1])
self._running = True
def join(self):
self._server.join()
def num_tasks(self):
return self._server._num_tasks()
def snapshot_task_progresses(self):
return self._server._snapshot_task_progresses()
def worker_address(self):
return self._server._address
|
TestWorker
|
python
|
pytorch__pytorch
|
torch/jit/_state.py
|
{
"start": 295,
"end": 3803
}
|
class ____:
"""Stores whether the JIT is enabled or not.
This is just a wrapper for a bool, so that we get reference semantics
"""
def __init__(self) -> None:
self.enabled = self.parse_env(
"PYTORCH_JIT", True, "> Using PyTorch JIT", "> PyTorch JIT DISABLED"
)
def parse_env(self, name, default, true_message, false_message):
value = os.environ.get(name)
if value is None:
return default
if value.lower() in {"1", "true", "yes"}:
return True
elif value.lower() in {"0", "false", "no"}:
return False
if value == "1v":
print(true_message)
return True
elif value == "0v":
print(false_message)
return False
raise ValueError(f"Unknown setting of {name}. Try using 0 or 1.")
def __bool__(self) -> bool:
return self.enabled
_enabled = EnabledProxy()
def disable() -> None:
_enabled.enabled = False
def enable() -> None:
_enabled.enabled = True
# The Python CompilationUnit. All functions and modules defined in Python will
# live in here. It's defined in Python because doing in cpp creates static
# destruction order issues.
_python_cu = torch._C.CompilationUnit()
# python class => ScriptClass mapping
_script_classes: dict[type[Any], type[Any]] = {}
_name_to_pyclass: dict[str, type[Any]] = {}
def _add_script_class(python_class, script_class) -> None:
_script_classes[python_class] = script_class
_name_to_pyclass[script_class.qualified_name()] = python_class
def _get_script_class(python_class):
override = getattr(python_class, "_jit_override_qualname", None)
if override is not None:
python_class = _get_python_class(override)
return _script_classes.get(python_class)
def _get_python_class(qualified_name):
return _name_to_pyclass.get(qualified_name)
def _clear_class_state() -> None:
_script_classes.clear()
_name_to_pyclass.clear()
# Caching: we currently cache compilation of free functions and overloaded functions.
# To cache free functions we hold a weak ref to the function object and
# map to the compiled fn's qualified name.
# To cache overloaded functions we hold a weak ref to the function obj and
# map to all of its overloaded compiled fns.
# In the future we could consider caching more types of objects so that
# aliasing is preserved across separate compilations of the same object.
_jit_caching_layer: weakref.WeakKeyDictionary = weakref.WeakKeyDictionary()
_jit_function_overload_caching: weakref.WeakKeyDictionary = weakref.WeakKeyDictionary()
def _try_get_jit_cached_overloads(key):
qual_names = _jit_function_overload_caching.get(key, None)
if qual_names:
return [_python_cu.find_function(qual_name) for qual_name in qual_names]
else:
return None
def _set_jit_overload_cache(key, compiled_fns) -> None:
_jit_function_overload_caching[key] = [fn.qualified_name for fn in compiled_fns]
def _try_get_jit_cached_function(key):
if getattr(key, "__disable_jit_function_caching__", False) is True:
return None
qual_name = _jit_caching_layer.get(key, None)
if qual_name:
return _python_cu.find_function(qual_name)
else:
return None
def _set_jit_function_cache(key, value) -> None:
# only free functions currently supported
assert isinstance(value, torch.jit.ScriptFunction)
_jit_caching_layer[key] = value.qualified_name
|
EnabledProxy
|
python
|
python-openxml__python-docx
|
src/docx/oxml/simpletypes.py
|
{
"start": 5496,
"end": 5887
}
|
class ____(BaseIntType):
@classmethod
def convert_from_xml(cls, str_value: str) -> Length:
if "i" in str_value or "m" in str_value or "p" in str_value:
return ST_UniversalMeasure.convert_from_xml(str_value)
return Emu(int(str_value))
@classmethod
def validate(cls, value: Any) -> None:
ST_CoordinateUnqualified.validate(value)
|
ST_Coordinate
|
python
|
keon__algorithms
|
tests/test_graph.py
|
{
"start": 11259,
"end": 11689
}
|
class ____(unittest.TestCase):
def test_kosaraju_algorithm(self):
V = 6
adj = [
[2],
[0],
[3],
[1, 4],
[5],
[4]
]
result = strongly_connected_components_kosaraju.Kosaraju().kosaraju(V, adj)
# Expected result: 2 strongly connected components
self.assertEqual(result, 2)
|
TestStronglyConnectedComponentsKosaraju
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-dg-cli/dagster_dg_cli/cli/scaffold/branch/models.py
|
{
"start": 1226,
"end": 1466
}
|
class ____:
"""Record of AI interaction with Claude."""
correlation_id: str
timestamp: str
prompt: str
response: str
token_count: Optional[int]
allowed_tools: list[str]
duration_ms: float
@record
|
AIInteraction
|
python
|
getsentry__sentry
|
src/sentry/workflow_engine/endpoints/organization_data_condition_index.py
|
{
"start": 1040,
"end": 2775
}
|
class ____(OrganizationEndpoint):
publish_status = {
"GET": ApiPublishStatus.EXPERIMENTAL,
}
owner = ApiOwner.ISSUES
@extend_schema(
operation_id="Fetch Data Conditions",
parameters=[
GlobalParams.ORG_ID_OR_SLUG,
],
responses={
201: inline_sentry_response_serializer(
"ListDataConditionHandlerResponse", list[DataConditionHandlerResponse]
),
400: RESPONSE_BAD_REQUEST,
401: RESPONSE_UNAUTHORIZED,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
)
def get(self, request, organization):
"""
Returns a list of data conditions for a given org
"""
group = request.GET.get("group")
try:
DataConditionHandler.Group(group)
except ValueError:
raise serializers.ValidationError(
f"Please provide a valid group. Accepted values are: {', '.join([group.value for group in DataConditionHandler.Group])}"
)
data_conditions = []
for condition_type, handler in condition_handler_registry.registrations.items():
if condition_type not in LEGACY_CONDITIONS and handler.group == group:
serialized = serialize(
handler,
request.user,
DataConditionHandlerSerializer(),
condition_type=condition_type,
)
data_conditions.append(serialized)
return self.paginate(
request=request,
queryset=data_conditions,
paginator_cls=OffsetPaginator,
)
|
OrganizationDataConditionIndexEndpoint
|
python
|
apache__airflow
|
airflow-core/tests/unit/cli/commands/test_rotate_fernet_key_command.py
|
{
"start": 1315,
"end": 5715
}
|
class ____:
@classmethod
def setup_class(cls):
cls.parser = cli_parser.get_parser()
def setup_method(self) -> None:
clear_db_connections(add_default_connections_back=False)
clear_db_variables()
def teardown_method(self) -> None:
clear_db_connections(add_default_connections_back=False)
clear_db_variables()
@provide_session
def test_should_rotate_variable(self, session):
fernet_key1 = Fernet.generate_key()
fernet_key2 = Fernet.generate_key()
var1_key = f"{__file__}_var1"
var2_key = f"{__file__}_var2"
# Create unencrypted variable
with conf_vars({("core", "fernet_key"): ""}):
get_fernet.cache_clear() # Clear cached fernet
Variable.set(key=var1_key, value="value")
# Create encrypted variable
with conf_vars({("core", "fernet_key"): fernet_key1.decode()}):
get_fernet.cache_clear() # Clear cached fernet
Variable.set(key=var2_key, value="value")
# Rotate fernet key
with conf_vars({("core", "fernet_key"): f"{fernet_key2.decode()},{fernet_key1.decode()}"}):
get_fernet.cache_clear() # Clear cached fernet
args = self.parser.parse_args(["rotate-fernet-key"])
rotate_fernet_key_command.rotate_fernet_key(args)
# Assert correctness using a new fernet key
with conf_vars({("core", "fernet_key"): fernet_key2.decode()}):
get_fernet.cache_clear() # Clear cached fernet
var1 = session.query(Variable).filter(Variable.key == var1_key).first()
# Unencrypted variable should be unchanged
assert Variable.get(key=var1_key) == "value"
assert var1._val == "value"
assert Variable.get(key=var2_key) == "value"
@provide_session
def test_should_rotate_connection(self, session, mock_supervisor_comms):
fernet_key1 = Fernet.generate_key()
fernet_key2 = Fernet.generate_key()
var1_key = f"{__file__}_var1"
var2_key = f"{__file__}_var2"
# Create unencrypted variable
with conf_vars({("core", "fernet_key"): ""}):
get_fernet.cache_clear() # Clear cached fernet
session.add(Connection(conn_id=var1_key, uri="mysql://user:pass@localhost"))
session.commit()
# Create encrypted variable
with conf_vars({("core", "fernet_key"): fernet_key1.decode()}):
get_fernet.cache_clear() # Clear cached fernet
session.add(Connection(conn_id=var2_key, uri="mysql://user:pass@localhost"))
session.commit()
# Rotate fernet key
with conf_vars({("core", "fernet_key"): f"{fernet_key2.decode()},{fernet_key1.decode()}"}):
get_fernet.cache_clear() # Clear cached fernet
args = self.parser.parse_args(["rotate-fernet-key"])
rotate_fernet_key_command.rotate_fernet_key(args)
def mock_get_connection(conn_id):
conn = session.query(Connection).filter(Connection.conn_id == conn_id).first()
if conn:
from airflow.sdk.execution_time.comms import ConnectionResult
return ConnectionResult(
conn_id=conn.conn_id,
conn_type=conn.conn_type or "mysql", # Provide a default conn_type
host=conn.host,
login=conn.login,
password=conn.password,
schema_=conn.schema,
port=conn.port,
extra=conn.extra,
)
raise Exception(f"Connection {conn_id} not found")
# Mock the send method to return our connection data
mock_supervisor_comms.send.return_value = mock_get_connection(var1_key)
# Assert correctness using a new fernet key
with conf_vars({("core", "fernet_key"): fernet_key2.decode()}):
get_fernet.cache_clear() # Clear cached fernet
# Unencrypted variable should be unchanged
conn1: Connection = BaseHook.get_connection(var1_key)
assert conn1.password == "pass"
# Mock for the second connection
mock_supervisor_comms.send.return_value = mock_get_connection(var2_key)
assert BaseHook.get_connection(var2_key).password == "pass"
|
TestRotateFernetKeyCommand
|
python
|
scipy__scipy
|
scipy/fft/tests/test_helper.py
|
{
"start": 8863,
"end": 14718
}
|
class ____:
def test_py_0d_defaults(self, xp):
x = xp.asarray(4)
shape = None
axes = None
shape_expected = ()
axes_expected = []
shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
assert shape_res == shape_expected
assert axes_res == axes_expected
def test_xp_0d_defaults(self, xp):
x = xp.asarray(7.)
shape = None
axes = None
shape_expected = ()
axes_expected = []
shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
assert shape_res == shape_expected
assert axes_res == axes_expected
def test_py_1d_defaults(self, xp):
x = xp.asarray([1, 2, 3])
shape = None
axes = None
shape_expected = (3,)
axes_expected = [0]
shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
assert shape_res == shape_expected
assert axes_res == axes_expected
def test_xp_1d_defaults(self, xp):
x = xp.arange(0, 1, .1)
shape = None
axes = None
shape_expected = (10,)
axes_expected = [0]
shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
assert shape_res == shape_expected
assert axes_res == axes_expected
def test_py_2d_defaults(self, xp):
x = xp.asarray([[1, 2, 3, 4],
[5, 6, 7, 8]])
shape = None
axes = None
shape_expected = (2, 4)
axes_expected = [0, 1]
shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
assert shape_res == shape_expected
assert axes_res == axes_expected
def test_xp_2d_defaults(self, xp):
x = xp.arange(0, 1, .1)
x = xp.reshape(x, (5, 2))
shape = None
axes = None
shape_expected = (5, 2)
axes_expected = [0, 1]
shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
assert shape_res == shape_expected
assert axes_res == axes_expected
def test_xp_5d_defaults(self, xp):
x = xp.zeros([6, 2, 5, 3, 4])
shape = None
axes = None
shape_expected = (6, 2, 5, 3, 4)
axes_expected = [0, 1, 2, 3, 4]
shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
assert shape_res == shape_expected
assert axes_res == axes_expected
def test_xp_5d_set_shape(self, xp):
x = xp.zeros([6, 2, 5, 3, 4])
shape = [10, -1, -1, 1, 4]
axes = None
shape_expected = (10, 2, 5, 1, 4)
axes_expected = [0, 1, 2, 3, 4]
shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
assert shape_res == shape_expected
assert axes_res == axes_expected
def test_xp_5d_set_axes(self, xp):
x = xp.zeros([6, 2, 5, 3, 4])
shape = None
axes = [4, 1, 2]
shape_expected = (4, 2, 5)
axes_expected = [4, 1, 2]
shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
assert shape_res == shape_expected
assert axes_res == axes_expected
def test_xp_5d_set_shape_axes(self, xp):
x = xp.zeros([6, 2, 5, 3, 4])
shape = [10, -1, 2]
axes = [1, 0, 3]
shape_expected = (10, 6, 2)
axes_expected = [1, 0, 3]
shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
assert shape_res == shape_expected
assert axes_res == axes_expected
def test_shape_axes_subset(self, xp):
x = xp.zeros((2, 3, 4, 5))
shape, axes = _init_nd_shape_and_axes(x, shape=(5, 5, 5), axes=None)
assert shape == (5, 5, 5)
assert axes == [1, 2, 3]
def test_errors(self, xp):
x = xp.zeros(1)
with assert_raises(ValueError, match="axes must be a scalar or "
"iterable of integers"):
_init_nd_shape_and_axes(x, shape=None, axes=[[1, 2], [3, 4]])
with assert_raises(ValueError, match="axes must be a scalar or "
"iterable of integers"):
_init_nd_shape_and_axes(x, shape=None, axes=[1., 2., 3., 4.])
with assert_raises(ValueError,
match="axes exceeds dimensionality of input"):
_init_nd_shape_and_axes(x, shape=None, axes=[1])
with assert_raises(ValueError,
match="axes exceeds dimensionality of input"):
_init_nd_shape_and_axes(x, shape=None, axes=[-2])
with assert_raises(ValueError,
match="all axes must be unique"):
_init_nd_shape_and_axes(x, shape=None, axes=[0, 0])
with assert_raises(ValueError, match="shape must be a scalar or "
"iterable of integers"):
_init_nd_shape_and_axes(x, shape=[[1, 2], [3, 4]], axes=None)
with assert_raises(ValueError, match="shape must be a scalar or "
"iterable of integers"):
_init_nd_shape_and_axes(x, shape=[1., 2., 3., 4.], axes=None)
with assert_raises(ValueError,
match="when given, axes and shape arguments"
" have to be of the same length"):
_init_nd_shape_and_axes(xp.zeros([1, 1, 1, 1]),
shape=[1, 2, 3], axes=[1])
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[0\]\) specified"):
_init_nd_shape_and_axes(x, shape=[0], axes=None)
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[-2\]\) specified"):
_init_nd_shape_and_axes(x, shape=-2, axes=None)
|
Test_init_nd_shape_and_axes
|
python
|
huggingface__transformers
|
src/transformers/models/mm_grounding_dino/modeling_mm_grounding_dino.py
|
{
"start": 88375,
"end": 107907
}
|
class ____(MMGroundingDinoPreTrainedModel):
def __init__(self, config: MMGroundingDinoConfig):
super().__init__(config)
# Create backbone + positional encoding
backbone = MMGroundingDinoConvEncoder(config)
position_embeddings = build_position_encoding(config)
self.backbone = MMGroundingDinoConvModel(backbone, position_embeddings)
# Create input projection layers
num_backbone_outs = len(backbone.intermediate_channel_sizes)
input_proj_list = []
for i in range(num_backbone_outs):
in_channels = backbone.intermediate_channel_sizes[i]
input_proj_list.append(
nn.Sequential(
nn.Conv2d(in_channels, config.d_model, kernel_size=1),
nn.GroupNorm(32, config.d_model),
)
)
for _ in range(config.num_feature_levels - num_backbone_outs):
input_proj_list.append(
nn.Sequential(
nn.Conv2d(in_channels, config.d_model, kernel_size=3, stride=2, padding=1),
nn.GroupNorm(32, config.d_model),
)
)
in_channels = config.d_model
self.input_proj_vision = nn.ModuleList(input_proj_list)
# Create text backbone
self.text_backbone = AutoModel.from_config(config.text_config, add_pooling_layer=False)
self.text_projection = nn.Linear(config.text_config.hidden_size, config.d_model)
if config.embedding_init_target or not config.two_stage:
self.query_position_embeddings = nn.Embedding(config.num_queries, config.d_model)
self.encoder = MMGroundingDinoEncoder(config)
self.decoder = MMGroundingDinoDecoder(config)
self.level_embed = nn.Parameter(torch.Tensor(config.num_feature_levels, config.d_model))
self.enc_output = nn.Linear(config.d_model, config.d_model)
self.enc_output_norm = nn.LayerNorm(config.d_model, config.layer_norm_eps)
self.encoder_output_bbox_embed = MMGroundingDinoMLPPredictionHead(
input_dim=config.d_model, hidden_dim=config.d_model, output_dim=4, num_layers=3
)
self.encoder_output_class_embed = MMGroundingDinoContrastiveEmbedding(config)
self.post_init()
def freeze_backbone(self):
for name, param in self.backbone.conv_encoder.model.named_parameters():
param.requires_grad_(False)
def unfreeze_backbone(self):
for name, param in self.backbone.conv_encoder.model.named_parameters():
param.requires_grad_(True)
def get_valid_ratio(self, mask):
"""Get the valid ratio of all feature maps."""
_, height, width = mask.shape
valid_height = torch.sum(mask[:, :, 0], 1)
valid_width = torch.sum(mask[:, 0, :], 1)
valid_ratio_height = valid_height.float() / height
valid_ratio_width = valid_width.float() / width
valid_ratio = torch.stack([valid_ratio_width, valid_ratio_height], -1)
return valid_ratio
def generate_encoder_output_proposals(self, enc_output, padding_mask, spatial_shapes):
"""Generate the encoder output proposals from encoded enc_output.
Args:
enc_output (`torch.Tensor[batch_size, sequence_length, hidden_size]`): Output of the encoder.
padding_mask (`torch.Tensor[batch_size, sequence_length]`): Padding mask for `enc_output`.
spatial_shapes (`torch.Tensor[num_feature_levels, 2]`): Spatial shapes of the feature maps.
Returns:
`tuple(torch.FloatTensor)`: A tuple of feature map and bbox prediction.
- object_query (Tensor[batch_size, sequence_length, hidden_size]): Object query features. Later used to
directly predict a bounding box. (without the need of a decoder)
- output_proposals (Tensor[batch_size, sequence_length, 4]): Normalized proposals, after an inverse
sigmoid.
"""
batch_size = enc_output.shape[0]
proposals = []
current_position = 0
for level, (height, width) in enumerate(spatial_shapes):
mask_flatten_ = padding_mask[:, current_position : (current_position + height * width)]
mask_flatten_ = mask_flatten_.view(batch_size, height, width, 1)
valid_height = torch.sum(~mask_flatten_[:, :, 0, 0], 1)
valid_width = torch.sum(~mask_flatten_[:, 0, :, 0], 1)
grid_y, grid_x = meshgrid(
torch.linspace(0, height - 1, height, dtype=torch.float32, device=enc_output.device),
torch.linspace(0, width - 1, width, dtype=torch.float32, device=enc_output.device),
indexing="ij",
)
grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1)
scale = torch.cat([valid_width.unsqueeze(-1), valid_height.unsqueeze(-1)], 1).view(batch_size, 1, 1, 2)
grid = (grid.unsqueeze(0).expand(batch_size, -1, -1, -1) + 0.5) / scale
width_height = torch.ones_like(grid) * 0.05 * (2.0**level)
proposal = torch.cat((grid, width_height), -1).view(batch_size, -1, 4)
proposals.append(proposal)
current_position += height * width
output_proposals = torch.cat(proposals, 1)
output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(-1, keepdim=True)
output_proposals = torch.log(output_proposals / (1 - output_proposals)) # inverse sigmoid
output_proposals = output_proposals.masked_fill(padding_mask.unsqueeze(-1), float("inf"))
output_proposals = output_proposals.masked_fill(~output_proposals_valid, float("inf"))
# assign each pixel as an object query
object_query = enc_output
object_query = object_query.masked_fill(padding_mask.unsqueeze(-1), float(0))
object_query = object_query.masked_fill(~output_proposals_valid, float(0))
object_query = self.enc_output_norm(self.enc_output(object_query))
return object_query, output_proposals
@auto_docstring
def forward(
self,
pixel_values: Tensor,
input_ids: Tensor,
token_type_ids: Optional[Tensor] = None,
attention_mask: Optional[Tensor] = None,
pixel_mask: Optional[Tensor] = None,
encoder_outputs=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, text_sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`AutoTokenizer`]. See [`BertTokenizer.__call__`] for details.
token_type_ids (`torch.LongTensor` of shape `(batch_size, text_sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`: 0 corresponds to a `sentence A` token, 1 corresponds to a `sentence B` token
[What are token type IDs?](../glossary#token-type-ids)
Examples:
```python
>>> from transformers import AutoProcessor, AutoModel
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> text = "a cat."
>>> processor = AutoProcessor.from_pretrained("IDEA-Research/grounding-dino-tiny")
>>> model = AutoModel.from_pretrained("IDEA-Research/grounding-dino-tiny")
>>> inputs = processor(images=image, text=text, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
>>> list(last_hidden_states.shape)
[1, 900, 256]
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
text_self_attention_masks, position_ids = generate_masks_with_special_tokens_and_transfer_map(input_ids)
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
text_token_mask = attention_mask.bool() # just to avoid renaming everywhere
max_text_len = self.config.max_text_len
if text_self_attention_masks.shape[1] > max_text_len:
text_self_attention_masks = text_self_attention_masks[:, :max_text_len, :max_text_len]
position_ids = position_ids[:, :max_text_len]
input_ids = input_ids[:, :max_text_len]
token_type_ids = token_type_ids[:, :max_text_len]
text_token_mask = text_token_mask[:, :max_text_len]
# 3D -> 4D correction (add head dim)
# NOTE: we squeeze this later again as there is custom 3D logic in this model
if text_self_attention_masks.ndim == 3:
text_self_attention_masks = text_self_attention_masks[:, None, :, :]
# Extract text features from text backbone
text_outputs = self.text_backbone(
input_ids, text_self_attention_masks, token_type_ids, position_ids, return_dict=return_dict
)
text_features = text_outputs.last_hidden_state if return_dict else text_outputs[0]
text_features = self.text_projection(text_features)
batch_size, num_channels, height, width = pixel_values.shape
device = pixel_values.device
if pixel_mask is None:
pixel_mask = torch.ones(((batch_size, height, width)), dtype=torch.long, device=device)
# Extract multi-scale feature maps of same resolution `config.d_model` (cf Figure 4 in paper)
# First, sent pixel_values + pixel_mask through Backbone to obtain the features
# which is a list of tuples
vision_features, position_embeddings_list = self.backbone(pixel_values, pixel_mask)
# Then, apply 1x1 convolution to reduce the channel dimension to d_model (256 by default)
feature_maps = []
masks = []
for level, (source, mask) in enumerate(vision_features):
feature_maps.append(self.input_proj_vision[level](source))
masks.append(mask)
# Lowest resolution feature maps are obtained via 3x3 stride 2 convolutions on the final stage
if self.config.num_feature_levels > len(feature_maps):
_len_sources = len(feature_maps)
for level in range(_len_sources, self.config.num_feature_levels):
if level == _len_sources:
source = self.input_proj_vision[level](vision_features[-1][0])
else:
source = self.input_proj_vision[level](feature_maps[-1])
mask = nn.functional.interpolate(pixel_mask[None].float(), size=source.shape[-2:]).to(torch.bool)[0]
pos_l = self.backbone.position_embedding(source, mask).to(source.dtype)
feature_maps.append(source)
masks.append(mask)
position_embeddings_list.append(pos_l)
# Create queries
query_embeds = None
if self.config.embedding_init_target or self.config.two_stage:
query_embeds = self.query_position_embeddings.weight
# Prepare encoder inputs (by flattening)
source_flatten = []
mask_flatten = []
lvl_pos_embed_flatten = []
spatial_shapes_list = []
for level, (source, mask, pos_embed) in enumerate(zip(feature_maps, masks, position_embeddings_list)):
batch_size, num_channels, height, width = source.shape
spatial_shape = (height, width)
spatial_shapes_list.append(spatial_shape)
source = source.flatten(2).transpose(1, 2)
mask = mask.flatten(1)
pos_embed = pos_embed.flatten(2).transpose(1, 2)
lvl_pos_embed = pos_embed + self.level_embed[level].view(1, 1, -1)
lvl_pos_embed_flatten.append(lvl_pos_embed)
source_flatten.append(source)
mask_flatten.append(mask)
source_flatten = torch.cat(source_flatten, 1)
mask_flatten = torch.cat(mask_flatten, 1)
lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1)
spatial_shapes = torch.as_tensor(spatial_shapes_list, dtype=torch.long, device=source_flatten.device)
level_start_index = torch.cat((spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1]))
valid_ratios = torch.stack([self.get_valid_ratio(m) for m in masks], 1)
valid_ratios = valid_ratios.float()
# Fourth, sent source_flatten + mask_flatten + lvl_pos_embed_flatten (backbone + proj layer output) through encoder
# Also provide spatial_shapes, level_start_index and valid_ratios
if encoder_outputs is None:
encoder_outputs = self.encoder(
vision_features=source_flatten,
vision_attention_mask=~mask_flatten,
vision_position_embedding=lvl_pos_embed_flatten,
spatial_shapes=spatial_shapes,
spatial_shapes_list=spatial_shapes_list,
level_start_index=level_start_index,
valid_ratios=valid_ratios,
text_features=text_features,
text_attention_mask=~text_token_mask,
text_position_embedding=None,
text_self_attention_masks=~text_self_attention_masks.squeeze(1),
text_position_ids=position_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a MMGroundingDinoEncoderOutput when return_dict=True
elif return_dict and not isinstance(encoder_outputs, MMGroundingDinoEncoderOutput):
encoder_outputs = MMGroundingDinoEncoderOutput(
last_hidden_state_vision=encoder_outputs[0],
last_hidden_state_text=encoder_outputs[1],
vision_hidden_states=encoder_outputs[2] if output_hidden_states else None,
text_hidden_states=encoder_outputs[3] if output_hidden_states else None,
attentions=encoder_outputs[-1] if output_attentions else None,
)
# Fifth, prepare decoder inputs
topk_proposals = None
enc_outputs_class = None
enc_outputs_coord_logits = None
encoder_logits = None
encoder_pred_boxes = None
if self.config.two_stage:
object_query_embedding, output_proposals = self.generate_encoder_output_proposals(
encoder_outputs[0], ~mask_flatten, spatial_shapes
)
# hack implementation as in two-stage Deformable DETR
# apply a detection head to each pixel (A.4 in paper)
# linear projection for bounding box binary classification (i.e. foreground and background)
enc_outputs_class = self.encoder_output_class_embed(
object_query_embedding, encoder_outputs[1], text_token_mask
)
# 3-layer FFN to predict bounding boxes coordinates (bbox regression branch)
delta_bbox = self.encoder_output_bbox_embed(object_query_embedding)
enc_outputs_coord_logits = delta_bbox + output_proposals
# only keep top scoring `config.num_queries` proposals
topk = self.config.num_queries
topk_logits = enc_outputs_class.max(-1)[0]
topk_proposals = torch.topk(topk_logits, topk, dim=1)[1]
topk_coords_logits = torch.gather(
enc_outputs_coord_logits, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4)
)
topk_coords_logits = topk_coords_logits.detach()
reference_points = topk_coords_logits.sigmoid()
init_reference_points = reference_points
if query_embeds is not None:
target = query_embeds.unsqueeze(0).repeat(batch_size, 1, 1)
else:
target = torch.gather(
object_query_embedding, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, self.d_model)
).detach()
# Set intermediate topk proposals (coords and class) for loss computation
encoder_pred_boxes = reference_points
encoder_logits = self.encoder_output_class_embed(target, text_features, text_token_mask)
else:
target = query_embeds.unsqueeze(0).repeat(batch_size, 1, 1)
reference_points = self.reference_points.weight.unsqueeze(0).repeat(batch_size, 1, 1).sigmoid()
init_reference_points = reference_points
decoder_outputs = self.decoder(
inputs_embeds=target,
vision_encoder_hidden_states=encoder_outputs[0],
vision_encoder_attention_mask=mask_flatten,
text_encoder_hidden_states=encoder_outputs[1],
text_encoder_attention_mask=~text_token_mask,
reference_points=reference_points,
spatial_shapes=spatial_shapes,
spatial_shapes_list=spatial_shapes_list,
level_start_index=level_start_index,
valid_ratios=valid_ratios,
self_attn_mask=None,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
enc_outputs = tuple(
value
for value in [
enc_outputs_class,
enc_outputs_coord_logits,
encoder_logits,
encoder_pred_boxes,
]
if value is not None
)
tuple_outputs = (
(decoder_outputs[0], init_reference_points) + decoder_outputs[1:] + encoder_outputs + enc_outputs
)
return tuple_outputs
return MMGroundingDinoModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
init_reference_points=init_reference_points,
intermediate_hidden_states=decoder_outputs.intermediate_hidden_states,
intermediate_reference_points=decoder_outputs.intermediate_reference_points,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
encoder_last_hidden_state_vision=encoder_outputs.last_hidden_state_vision,
encoder_last_hidden_state_text=encoder_outputs.last_hidden_state_text,
encoder_vision_hidden_states=encoder_outputs.vision_hidden_states,
encoder_text_hidden_states=encoder_outputs.text_hidden_states,
encoder_attentions=encoder_outputs.attentions,
enc_outputs_class=enc_outputs_class,
enc_outputs_coord_logits=enc_outputs_coord_logits,
encoder_logits=encoder_logits,
encoder_pred_boxes=encoder_pred_boxes,
)
|
MMGroundingDinoModel
|
python
|
ray-project__ray
|
python/ray/util/collective/types.py
|
{
"start": 4098,
"end": 4222
}
|
class ____:
dst_rank = 0
dst_gpu_index = 0
n_elements = 0
timeout_ms = unset_timeout_ms
@dataclass
|
SendOptions
|
python
|
streamlit__streamlit
|
lib/streamlit/errors.py
|
{
"start": 1110,
"end": 1221
}
|
class ____(Error):
"""Exceptions thrown in the custom components code path."""
pass
|
CustomComponentError
|
python
|
ansible__ansible
|
lib/ansible/cli/galaxy.py
|
{
"start": 5940,
"end": 94348
}
|
class ____(CLI):
"""Command to manage Ansible roles and collections.
None of the CLI tools are designed to run concurrently with themselves.
Use an external scheduler and/or locking to ensure there are no clashing operations.
"""
name = 'ansible-galaxy'
SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url")
def __init__(self, args):
self._raw_args = args
self._implicit_role = False
if len(args) > 1:
# Inject role into sys.argv[1] as a backwards compatibility step
if args[1] not in ['-h', '--help', '--version'] and 'role' not in args and 'collection' not in args:
# TODO: Should we add a warning here and eventually deprecate the implicit role subcommand choice
args.insert(1, 'role')
self._implicit_role = True
# since argparse doesn't allow hidden subparsers, handle dead login arg from raw args after "role" normalization
if args[1:3] == ['role', 'login']:
display.error(
"The login command was removed in late 2020. An API key is now required to publish roles or collections "
"to Galaxy. The key can be found at https://galaxy.ansible.com/me/preferences, and passed to the "
"ansible-galaxy CLI via a file at {0} or (insecurely) via the `--token` "
"command-line argument.".format(to_text(C.GALAXY_TOKEN_PATH)))
sys.exit(1)
self.api_servers = []
self.galaxy = None
self.lazy_role_api = None
super(GalaxyCLI, self).__init__(args)
@property
def collection_paths(self):
"""
Exclude lib/ansible/_internal/ansible_collections/.
"""
# exclude bundled collections, e.g. ansible._protomatter
return [
path
for path in AnsibleCollectionConfig.collection_paths
if path != AnsibleCollectionConfig._internal_collections
]
def init_parser(self):
""" create an options parser for bin/ansible """
super(GalaxyCLI, self).init_parser(
desc="Perform various Role and Collection related operations.",
)
# Common arguments that apply to more than 1 action
common = opt_help.ArgumentParser(add_help=False)
common.add_argument('-s', '--server', dest='api_server', help='The Galaxy API server URL')
common.add_argument('--api-version', type=int, choices=[2, 3], help=argparse.SUPPRESS) # Hidden argument that should only be used in our tests
common.add_argument('--token', '--api-key', dest='api_key',
help='The Ansible Galaxy API key which can be found at '
'https://galaxy.ansible.com/me/preferences.')
common.add_argument('-c', '--ignore-certs', action='store_true', dest='ignore_certs', help='Ignore SSL certificate validation errors.', default=None)
# --timeout uses the default None to handle two different scenarios.
# * --timeout > C.GALAXY_SERVER_TIMEOUT for non-configured servers
# * --timeout > server-specific timeout > C.GALAXY_SERVER_TIMEOUT for configured servers.
common.add_argument('--timeout', dest='timeout', type=int,
help="The time to wait for operations against the galaxy server, defaults to 60s.")
opt_help.add_verbosity_options(common)
force = opt_help.ArgumentParser(add_help=False)
force.add_argument('-f', '--force', dest='force', action='store_true', default=False,
help='Force overwriting an existing role or collection')
github = opt_help.ArgumentParser(add_help=False)
github.add_argument('github_user', help='GitHub username')
github.add_argument('github_repo', help='GitHub repository')
offline = opt_help.ArgumentParser(add_help=False)
offline.add_argument('--offline', dest='offline', default=False, action='store_true',
help="Don't query the galaxy API when creating roles")
default_roles_path = C.config.get_configuration_definition('DEFAULT_ROLES_PATH').get('default', '')
roles_path = opt_help.ArgumentParser(add_help=False)
roles_path.add_argument('-p', '--roles-path', dest='roles_path', type=opt_help.unfrack_path(pathsep=True),
default=C.DEFAULT_ROLES_PATH, action=opt_help.PrependListAction,
help='The path to the directory containing your roles. The default is the first '
'writable one configured via DEFAULT_ROLES_PATH: %s ' % default_roles_path)
collections_path = opt_help.ArgumentParser(add_help=False)
collections_path.add_argument('-p', '--collections-path', dest='collections_path', type=opt_help.unfrack_path(pathsep=True),
action=opt_help.PrependListAction,
help="One or more directories to search for collections in addition "
"to the default COLLECTIONS_PATHS. Separate multiple paths "
"with '{0}'.".format(os.path.pathsep))
cache_options = opt_help.ArgumentParser(add_help=False)
cache_options.add_argument('--clear-response-cache', dest='clear_response_cache', action='store_true',
default=False, help='Clear the existing server response cache.')
cache_options.add_argument('--no-cache', dest='no_cache', action='store_true', default=False,
help='Do not use the server response cache.')
# Add sub parser for the Galaxy role type (role or collection)
type_parser = self.parser.add_subparsers(metavar='TYPE', dest='type')
type_parser.required = True
# Add sub parser for the Galaxy collection actions
collection = type_parser.add_parser('collection', help='Manage an Ansible Galaxy collection.')
collection.set_defaults(func=self.execute_collection) # to satisfy doc build
collection_parser = collection.add_subparsers(metavar='COLLECTION_ACTION', dest='action')
collection_parser.required = True
self.add_download_options(collection_parser, parents=[common, cache_options])
self.add_init_options(collection_parser, parents=[common, force])
self.add_build_options(collection_parser, parents=[common, force])
self.add_publish_options(collection_parser, parents=[common])
self.add_install_options(collection_parser, parents=[common, force, cache_options])
self.add_list_options(collection_parser, parents=[common, collections_path])
self.add_verify_options(collection_parser, parents=[common, collections_path])
# Add sub parser for the Galaxy role actions
role = type_parser.add_parser('role', help='Manage an Ansible Galaxy role.')
role.set_defaults(func=self.execute_role) # to satisfy doc build
role_parser = role.add_subparsers(metavar='ROLE_ACTION', dest='action')
role_parser.required = True
self.add_init_options(role_parser, parents=[common, force, offline])
self.add_remove_options(role_parser, parents=[common, roles_path])
self.add_delete_options(role_parser, parents=[common, github])
self.add_list_options(role_parser, parents=[common, roles_path])
self.add_search_options(role_parser, parents=[common])
self.add_import_options(role_parser, parents=[common, github])
self.add_setup_options(role_parser, parents=[common, roles_path])
self.add_info_options(role_parser, parents=[common, roles_path, offline])
self.add_install_options(role_parser, parents=[common, force, roles_path])
def add_download_options(self, parser, parents=None):
download_parser = parser.add_parser('download', parents=parents,
help='Download collections and their dependencies as a tarball for an '
'offline install.')
download_parser.set_defaults(func=self.execute_download)
download_parser.add_argument('args', help='Collection(s)', metavar='collection', nargs='*')
download_parser.add_argument('-n', '--no-deps', dest='no_deps', action='store_true', default=False,
help="Don't download collection(s) listed as dependencies.")
download_parser.add_argument('-p', '--download-path', dest='download_path',
default='./collections',
help='The directory to download the collections to.')
download_parser.add_argument('-r', '--requirements-file', dest='requirements',
help='A file containing a list of collections to be downloaded.')
download_parser.add_argument('--pre', dest='allow_pre_release', action='store_true',
help='Include pre-release versions. Semantic versioning pre-releases are ignored by default')
def add_init_options(self, parser, parents=None):
galaxy_type = 'collection' if parser.metavar == 'COLLECTION_ACTION' else 'role'
init_parser = parser.add_parser('init', parents=parents,
help='Initialize new {0} with the base structure of a '
'{0}.'.format(galaxy_type))
init_parser.set_defaults(func=self.execute_init)
init_parser.add_argument('--init-path', dest='init_path', default='./',
help='The path in which the skeleton {0} will be created. The default is the '
'current working directory.'.format(galaxy_type))
init_parser.add_argument('--{0}-skeleton'.format(galaxy_type), dest='{0}_skeleton'.format(galaxy_type),
default=C.GALAXY_COLLECTION_SKELETON if galaxy_type == 'collection' else C.GALAXY_ROLE_SKELETON,
help='The path to a {0} skeleton that the new {0} should be based '
'upon.'.format(galaxy_type))
obj_name_kwargs = {}
if galaxy_type == 'collection':
obj_name_kwargs['type'] = validate_collection_name
init_parser.add_argument('{0}_name'.format(galaxy_type), help='{0} name'.format(galaxy_type.capitalize()),
**obj_name_kwargs)
if galaxy_type == 'role':
init_parser.add_argument('--type', dest='role_type', action='store', default='default',
help="Initialize using an alternate role type. Valid types include: 'container', "
"'apb' and 'network'.")
opt_help.add_runtask_options(init_parser)
def add_remove_options(self, parser, parents=None):
remove_parser = parser.add_parser('remove', parents=parents, help='Delete roles from roles_path.')
remove_parser.set_defaults(func=self.execute_remove)
remove_parser.add_argument('args', help='Role(s)', metavar='role', nargs='+')
def add_delete_options(self, parser, parents=None):
delete_parser = parser.add_parser('delete', parents=parents,
help='Removes the role from Galaxy. It does not remove or alter the actual '
'GitHub repository.')
delete_parser.set_defaults(func=self.execute_delete)
def add_list_options(self, parser, parents=None):
galaxy_type = 'role'
if parser.metavar == 'COLLECTION_ACTION':
galaxy_type = 'collection'
list_parser = parser.add_parser('list', parents=parents,
help='Show the name and version of each {0} installed in the {0}s_path.'.format(galaxy_type))
list_parser.set_defaults(func=self.execute_list)
list_parser.add_argument(galaxy_type, help=galaxy_type.capitalize(), nargs='?', metavar=galaxy_type)
if galaxy_type == 'collection':
list_parser.add_argument('--format', dest='output_format', choices=('human', 'yaml', 'json'), default='human',
help="Format to display the list of collections in.")
def add_search_options(self, parser, parents=None):
search_parser = parser.add_parser('search', parents=parents,
help='Search the Galaxy database by tags, platforms, author and multiple '
'keywords.')
search_parser.set_defaults(func=self.execute_search)
search_parser.add_argument('--platforms', dest='platforms', help='list of OS platforms to filter by')
search_parser.add_argument('--galaxy-tags', dest='galaxy_tags', help='list of galaxy tags to filter by')
search_parser.add_argument('--author', dest='author', help='GitHub username')
search_parser.add_argument('args', help='Search terms', metavar='searchterm', nargs='*')
def add_import_options(self, parser, parents=None):
import_parser = parser.add_parser('import', parents=parents, help='Import a role into a galaxy server')
import_parser.set_defaults(func=self.execute_import)
import_parser.add_argument('--no-wait', dest='wait', action='store_false', default=True,
help="Don't wait for import results.")
import_parser.add_argument('--branch', dest='reference',
help='The name of a branch to import. Defaults to the repository\'s default branch '
'(usually master)')
import_parser.add_argument('--role-name', dest='role_name',
help='The name the role should have, if different than the repo name')
import_parser.add_argument('--status', dest='check_status', action='store_true', default=False,
help='Check the status of the most recent import request for given github_'
'user/github_repo.')
def add_setup_options(self, parser, parents=None):
setup_parser = parser.add_parser('setup', parents=parents,
help='Manage the integration between Galaxy and the given source.')
setup_parser.set_defaults(func=self.execute_setup)
setup_parser.add_argument('--remove', dest='remove_id', default=None,
help='Remove the integration matching the provided ID value. Use --list to see '
'ID values.')
setup_parser.add_argument('--list', dest="setup_list", action='store_true', default=False,
help='List all of your integrations.')
setup_parser.add_argument('source', help='Source')
setup_parser.add_argument('github_user', help='GitHub username')
setup_parser.add_argument('github_repo', help='GitHub repository')
setup_parser.add_argument('secret', help='Secret')
def add_info_options(self, parser, parents=None):
info_parser = parser.add_parser('info', parents=parents, help='View more details about a specific role.')
info_parser.set_defaults(func=self.execute_info)
info_parser.add_argument('args', nargs='+', help='role', metavar='role_name[,version]')
def add_verify_options(self, parser, parents=None):
galaxy_type = 'collection'
verify_parser = parser.add_parser('verify', parents=parents, help='Compare checksums with the collection(s) '
'found on the server and the installed copy. This does not verify dependencies.')
verify_parser.set_defaults(func=self.execute_verify)
verify_parser.add_argument('args', metavar='{0}_name'.format(galaxy_type), nargs='*', help='The installed collection(s) name. '
'This is mutually exclusive with --requirements-file.')
verify_parser.add_argument('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
help='Ignore errors during verification and continue with the next specified collection.')
verify_parser.add_argument('--offline', dest='offline', action='store_true', default=False,
help='Validate collection integrity locally without contacting server for '
'canonical manifest hash.')
verify_parser.add_argument('-r', '--requirements-file', dest='requirements',
help='A file containing a list of collections to be verified.')
verify_parser.add_argument('--keyring', dest='keyring', default=C.GALAXY_GPG_KEYRING,
help='The keyring used during signature verification') # Eventually default to ~/.ansible/pubring.kbx?
verify_parser.add_argument('--signature', dest='signatures', action='append',
help='An additional signature source to verify the authenticity of the MANIFEST.json before using '
'it to verify the rest of the contents of a collection from a Galaxy server. Use in '
'conjunction with a positional collection name (mutually exclusive with --requirements-file).')
valid_signature_count_help = 'The number of signatures that must successfully verify the collection. This should be a positive integer ' \
'or all to signify that all signatures must be used to verify the collection. ' \
'Prepend the value with + to fail if no valid signatures are found for the collection (e.g. +all).'
ignore_gpg_status_help = 'A space separated list of status codes to ignore during signature verification (for example, NO_PUBKEY FAILURE). ' \
'Descriptions for the choices can be seen at L(https://github.com/gpg/gnupg/blob/master/doc/DETAILS#general-status-codes).' \
'Note: specify these after positional arguments or use -- to separate them.'
verify_parser.add_argument('--required-valid-signature-count', dest='required_valid_signature_count', type=validate_signature_count,
help=valid_signature_count_help, default=C.GALAXY_REQUIRED_VALID_SIGNATURE_COUNT)
verify_parser.add_argument('--ignore-signature-status-code', dest='ignore_gpg_errors', type=str, action='append',
help=opt_help.argparse.SUPPRESS, default=C.GALAXY_IGNORE_INVALID_SIGNATURE_STATUS_CODES,
choices=list(GPG_ERROR_MAP.keys()))
verify_parser.add_argument('--ignore-signature-status-codes', dest='ignore_gpg_errors', type=str, action='extend', nargs='+',
help=ignore_gpg_status_help, default=C.GALAXY_IGNORE_INVALID_SIGNATURE_STATUS_CODES,
choices=list(GPG_ERROR_MAP.keys()))
def add_install_options(self, parser, parents=None):
galaxy_type = 'collection' if parser.metavar == 'COLLECTION_ACTION' else 'role'
args_kwargs = {}
if galaxy_type == 'collection':
args_kwargs['help'] = 'The collection(s) name or path/url to a tar.gz collection artifact. This is ' \
'mutually exclusive with --requirements-file.'
ignore_errors_help = 'Ignore errors during installation and continue with the next specified ' \
'collection. This will not ignore dependency conflict errors.'
else:
args_kwargs['help'] = 'Role name, URL or tar file. This is mutually exclusive with -r.'
ignore_errors_help = 'Ignore errors and continue with the next specified role.'
if self._implicit_role:
# might install both roles and collections
description_text = (
'Install roles and collections from file(s), URL(s) or Ansible '
'Galaxy to the first entry in the config COLLECTIONS_PATH for collections '
'and first entry in the config ROLES_PATH for roles. '
'The first entry in the config ROLES_PATH can be overridden by --roles-path '
'or -p, but this will result in only roles being installed.'
)
prog = 'ansible-galaxy install'
else:
prog = f"ansible-galaxy {galaxy_type} install"
description_text = (
'Install {0}(s) from file(s), URL(s) or Ansible '
'Galaxy to the first entry in the config {1}S_PATH '
'unless overridden by --{0}s-path.'.format(galaxy_type, galaxy_type.upper())
)
install_parser = parser.add_parser('install', parents=parents,
help='Install {0}(s) from file(s), URL(s) or Ansible '
'Galaxy'.format(galaxy_type),
description=description_text,
prog=prog,)
install_parser.set_defaults(func=self.execute_install)
install_parser.add_argument('args', metavar='{0}_name'.format(galaxy_type), nargs='*', **args_kwargs)
install_parser.add_argument('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
help=ignore_errors_help)
install_exclusive = install_parser.add_mutually_exclusive_group()
install_exclusive.add_argument('-n', '--no-deps', dest='no_deps', action='store_true', default=False,
help="Don't download {0}s listed as dependencies.".format(galaxy_type))
install_exclusive.add_argument('--force-with-deps', dest='force_with_deps', action='store_true', default=False,
help="Force overwriting an existing {0} and its "
"dependencies.".format(galaxy_type))
valid_signature_count_help = 'The number of signatures that must successfully verify the collection. This should be a positive integer ' \
'or -1 to signify that all signatures must be used to verify the collection. ' \
'Prepend the value with + to fail if no valid signatures are found for the collection (e.g. +all).'
ignore_gpg_status_help = 'A space separated list of status codes to ignore during signature verification (for example, NO_PUBKEY FAILURE). ' \
'Descriptions for the choices can be seen at L(https://github.com/gpg/gnupg/blob/master/doc/DETAILS#general-status-codes).' \
'Note: specify these after positional arguments or use -- to separate them.'
if galaxy_type == 'collection':
install_parser.add_argument('-p', '--collections-path', dest='collections_path',
default=self._get_default_collection_path(),
help='The path to the directory containing your collections.')
install_parser.add_argument('-r', '--requirements-file', dest='requirements',
help='A file containing a list of collections to be installed.')
install_parser.add_argument('--pre', dest='allow_pre_release', action='store_true',
help='Include pre-release versions. Semantic versioning pre-releases are ignored by default')
install_parser.add_argument('-U', '--upgrade', dest='upgrade', action='store_true', default=False,
help='Upgrade installed collection artifacts. This will also update dependencies unless --no-deps is provided')
install_parser.add_argument('--keyring', dest='keyring', default=C.GALAXY_GPG_KEYRING,
help='The keyring used during signature verification') # Eventually default to ~/.ansible/pubring.kbx?
install_parser.add_argument('--disable-gpg-verify', dest='disable_gpg_verify', action='store_true',
default=C.GALAXY_DISABLE_GPG_VERIFY,
help='Disable GPG signature verification when installing collections from a Galaxy server')
install_parser.add_argument('--signature', dest='signatures', action='append',
help='An additional signature source to verify the authenticity of the MANIFEST.json before '
'installing the collection from a Galaxy server. Use in conjunction with a positional '
'collection name (mutually exclusive with --requirements-file).')
install_parser.add_argument('--required-valid-signature-count', dest='required_valid_signature_count', type=validate_signature_count,
help=valid_signature_count_help, default=C.GALAXY_REQUIRED_VALID_SIGNATURE_COUNT)
install_parser.add_argument('--ignore-signature-status-code', dest='ignore_gpg_errors', type=str, action='append',
help=opt_help.argparse.SUPPRESS, default=C.GALAXY_IGNORE_INVALID_SIGNATURE_STATUS_CODES,
choices=list(GPG_ERROR_MAP.keys()))
install_parser.add_argument('--ignore-signature-status-codes', dest='ignore_gpg_errors', type=str, action='extend', nargs='+',
help=ignore_gpg_status_help, default=C.GALAXY_IGNORE_INVALID_SIGNATURE_STATUS_CODES,
choices=list(GPG_ERROR_MAP.keys()))
install_parser.add_argument('--offline', dest='offline', action='store_true', default=False,
help='Install collection artifacts (tarballs) without contacting any distribution servers. '
'This does not apply to collections in remote Git repositories or URLs to remote tarballs.'
)
else:
if self._implicit_role:
install_parser.add_argument('-r', '--role-file', dest='requirements',
help='A file containing a list of collections and roles to be installed.')
else:
install_parser.add_argument('-r', '--role-file', dest='requirements',
help='A file containing a list of roles to be installed.')
r_re = re.compile(r'^(?<!-)-[a-zA-Z]*r[a-zA-Z]*') # -r, -fr
contains_r = bool([a for a in self._raw_args if r_re.match(a)])
role_file_re = re.compile(r'--role-file($|=)') # --role-file foo, --role-file=foo
contains_role_file = bool([a for a in self._raw_args if role_file_re.match(a)])
if self._implicit_role and (contains_r or contains_role_file):
# Any collections in the requirements files will also be installed
install_parser.add_argument('--keyring', dest='keyring', default=C.GALAXY_GPG_KEYRING,
help='The keyring used during collection signature verification')
install_parser.add_argument('--disable-gpg-verify', dest='disable_gpg_verify', action='store_true',
default=C.GALAXY_DISABLE_GPG_VERIFY,
help='Disable GPG signature verification when installing collections from a Galaxy server')
install_parser.add_argument('--required-valid-signature-count', dest='required_valid_signature_count', type=validate_signature_count,
help=valid_signature_count_help, default=C.GALAXY_REQUIRED_VALID_SIGNATURE_COUNT)
install_parser.add_argument('--ignore-signature-status-code', dest='ignore_gpg_errors', type=str, action='append',
help=opt_help.argparse.SUPPRESS, default=C.GALAXY_IGNORE_INVALID_SIGNATURE_STATUS_CODES,
choices=list(GPG_ERROR_MAP.keys()))
install_parser.add_argument('--ignore-signature-status-codes', dest='ignore_gpg_errors', type=str, action='extend', nargs='+',
help=ignore_gpg_status_help, default=C.GALAXY_IGNORE_INVALID_SIGNATURE_STATUS_CODES,
choices=list(GPG_ERROR_MAP.keys()))
install_parser.add_argument('-g', '--keep-scm-meta', dest='keep_scm_meta', action='store_true',
default=False,
help='Use tar instead of the scm archive option when packaging the role.')
def add_build_options(self, parser, parents=None):
build_parser = parser.add_parser('build', parents=parents,
help='Build an Ansible collection artifact that can be published to Ansible '
'Galaxy.')
build_parser.set_defaults(func=self.execute_build)
build_parser.add_argument('args', metavar='collection', nargs='*', default=('.',),
help='Path to the collection(s) directory to build. This should be the directory '
'that contains the galaxy.yml file. The default is the current working '
'directory.')
build_parser.add_argument('--output-path', dest='output_path', default='./',
help='The path in which the collection is built to. The default is the current '
'working directory.')
def add_publish_options(self, parser, parents=None):
publish_parser = parser.add_parser('publish', parents=parents,
help='Publish a collection artifact to Ansible Galaxy.')
publish_parser.set_defaults(func=self.execute_publish)
publish_parser.add_argument('args', metavar='collection_path',
help='The path to the collection tarball to publish.')
publish_parser.add_argument('--no-wait', dest='wait', action='store_false', default=True,
help="Don't wait for import validation results.")
publish_parser.add_argument('--import-timeout', dest='import_timeout', type=int, default=0,
help="The time to wait for the collection import process to finish.")
def post_process_args(self, options):
options = super(GalaxyCLI, self).post_process_args(options)
# ensure we have 'usable' cli option
setattr(options, 'validate_certs', (None if options.ignore_certs is None else not options.ignore_certs))
# the default if validate_certs is None
setattr(options, 'resolved_validate_certs', (options.validate_certs if options.validate_certs is not None else not C.GALAXY_IGNORE_CERTS))
display.verbosity = options.verbosity
return options
def run(self):
super(GalaxyCLI, self).run()
self.galaxy = Galaxy()
# dynamically add per server config depending on declared servers
C.config.load_galaxy_server_defs(C.GALAXY_SERVER_LIST)
galaxy_options = {}
for optional_key in ['clear_response_cache', 'no_cache']:
if optional_key in context.CLIARGS:
galaxy_options[optional_key] = context.CLIARGS[optional_key]
config_servers = []
# Need to filter out empty strings or non truthy values as an empty server list env var is equal to [''].
server_list = [s for s in C.GALAXY_SERVER_LIST or [] if s]
for server_priority, server_key in enumerate(server_list, start=1):
# resolve the config created options above with existing config and user options
server_options = C.config.get_plugin_options(plugin_type='galaxy_server', name=server_key)
# auth_url is used to create the token, but not directly by GalaxyAPI, so
# it doesn't need to be passed as kwarg to GalaxyApi, same for others we pop here
auth_url = server_options.pop('auth_url')
client_id = server_options.pop('client_id')
client_secret = server_options.pop('client_secret')
token_val = server_options['token'] or NoTokenSentinel
username = server_options['username']
if server_options['validate_certs'] is None:
server_options['validate_certs'] = context.CLIARGS['resolved_validate_certs']
validate_certs = server_options['validate_certs']
# default case if no auth info is provided.
server_options['token'] = None
if username:
server_options['token'] = BasicAuthToken(username, server_options['password'])
else:
if auth_url:
server_options['token'] = KeycloakToken(
access_token=token_val,
auth_url=auth_url,
validate_certs=validate_certs,
client_id=client_id,
client_secret=client_secret,
)
elif token_val:
# The galaxy v1 / github / django / 'Token'
server_options['token'] = GalaxyToken(token=token_val)
server_options.update(galaxy_options)
config_servers.append(GalaxyAPI(
self.galaxy, server_key,
priority=server_priority,
**server_options
))
cmd_server = context.CLIARGS['api_server']
cmd_token = GalaxyToken(token=context.CLIARGS['api_key'])
validate_certs = context.CLIARGS['resolved_validate_certs']
default_server_timeout = context.CLIARGS['timeout'] if context.CLIARGS['timeout'] is not None else C.GALAXY_SERVER_TIMEOUT
if cmd_server:
# Cmd args take precedence over the config entry but fist check if the arg was a name and use that config
# entry, otherwise create a new API entry for the server specified.
config_server = next((s for s in config_servers if s.name == cmd_server), None)
if config_server:
self.api_servers.append(config_server)
else:
self.api_servers.append(GalaxyAPI(
self.galaxy, 'cmd_arg', cmd_server, token=cmd_token,
priority=len(config_servers) + 1,
validate_certs=validate_certs,
timeout=default_server_timeout,
**galaxy_options
))
else:
self.api_servers = config_servers
# Default to C.GALAXY_SERVER if no servers were defined
if len(self.api_servers) == 0:
self.api_servers.append(GalaxyAPI(
self.galaxy, 'default', C.GALAXY_SERVER, token=cmd_token,
priority=0,
validate_certs=validate_certs,
timeout=default_server_timeout,
**galaxy_options
))
# checks api versions once a GalaxyRole makes an api call
# self.api can be used to evaluate the best server immediately
self.lazy_role_api = RoleDistributionServer(None, self.api_servers)
return context.CLIARGS['func']()
@property
def api(self):
return self.lazy_role_api.api
def _get_default_collection_path(self):
return C.COLLECTIONS_PATHS[0]
def _parse_requirements_file(self, requirements_file, allow_old_format=True, artifacts_manager=None, validate_signature_options=True):
"""
Parses an Ansible requirement.yml file and returns all the roles and/or collections defined in it. There are 2
requirements file format:
# v1 (roles only)
- src: The source of the role, required if include is not set. Can be Galaxy role name, URL to a SCM repo or tarball.
name: Downloads the role to the specified name, defaults to Galaxy name from Galaxy or name of repo if src is a URL.
scm: If src is a URL, specify the SCM. Only git or hd are supported and defaults ot git.
version: The version of the role to download. Can also be tag, commit, or branch name and defaults to master.
include: Path to additional requirements.yml files.
# v2 (roles and collections)
---
roles:
# Same as v1 format just under the roles key
collections:
- namespace.collection
- name: namespace.collection
version: version identifier, multiple identifiers are separated by ','
source: the URL or a predefined source name that relates to C.GALAXY_SERVER_LIST
type: git|file|url|galaxy
:param requirements_file: The path to the requirements file.
:param allow_old_format: Will fail if a v1 requirements file is found and this is set to False.
:param artifacts_manager: Artifacts manager.
:return: a dict containing roles and collections to found in the requirements file.
"""
requirements = {
'roles': [],
'collections': [],
}
b_requirements_file = to_bytes(requirements_file, errors='surrogate_or_strict')
if not os.path.exists(b_requirements_file):
raise AnsibleError("The requirements file '%s' does not exist." % to_native(requirements_file))
display.vvv("Reading requirement file at '%s'" % requirements_file)
with open(b_requirements_file, 'rb') as req_obj:
try:
file_requirements = yaml_load(req_obj)
except YAMLError as err:
raise AnsibleError(
"Failed to parse the requirements yml at '%s' with the following error:\n%s"
% (to_native(requirements_file), to_native(err)))
if file_requirements is None:
raise AnsibleError("No requirements found in file '%s'" % to_native(requirements_file))
def parse_role_req(requirement):
if "include" not in requirement:
role = RoleRequirement.role_yaml_parse(requirement)
display.vvv("found role %s in yaml file" % to_text(role))
if "name" not in role and "src" not in role:
raise AnsibleError("Must specify name or src for role")
return [GalaxyRole(self.galaxy, self.lazy_role_api, **role)]
else:
b_include_path = to_bytes(requirement["include"], errors="surrogate_or_strict")
if not os.path.isfile(b_include_path):
raise AnsibleError("Failed to find include requirements file '%s' in '%s'"
% (to_native(b_include_path), to_native(requirements_file)))
with open(b_include_path, 'rb') as f_include:
try:
return [GalaxyRole(self.galaxy, self.lazy_role_api, **r) for r in
(RoleRequirement.role_yaml_parse(i) for i in yaml_load(f_include))]
except Exception as e:
raise AnsibleError("Unable to load data from include requirements file: %s %s"
% (to_native(requirements_file), to_native(e)))
if isinstance(file_requirements, list):
# Older format that contains only roles
if not allow_old_format:
raise AnsibleError("Expecting requirements file to be a dict with the key 'collections' that contains "
"a list of collections to install")
for role_req in file_requirements:
requirements['roles'] += parse_role_req(role_req)
elif isinstance(file_requirements, dict):
# Newer format with a collections and/or roles key
extra_keys = set(file_requirements.keys()).difference(set(['roles', 'collections']))
if extra_keys:
raise AnsibleError("Expecting only 'roles' and/or 'collections' as base keys in the requirements "
"file. Found: %s" % (to_native(", ".join(extra_keys))))
for role_req in file_requirements.get('roles') or []:
requirements['roles'] += parse_role_req(role_req)
requirements['collections'] = [
Requirement.from_requirement_dict(
self._init_coll_req_dict(collection_req),
artifacts_manager,
validate_signature_options,
)
for collection_req in file_requirements.get('collections') or []
]
else:
raise AnsibleError(f"Expecting requirements yaml to be a list or dictionary but got {type(file_requirements).__name__}")
return requirements
def _init_coll_req_dict(self, coll_req):
if not isinstance(coll_req, dict):
# Assume it's a string:
return {'name': coll_req}
if (
'name' not in coll_req or
not coll_req.get('source') or
coll_req.get('type', 'galaxy') != 'galaxy'
):
return coll_req
# Try and match up the requirement source with our list of Galaxy API
# servers defined in the config, otherwise create a server with that
# URL without any auth.
coll_req['source'] = next(
iter(
srvr for srvr in self.api_servers
if coll_req['source'] in {srvr.name, srvr.api_server}
),
GalaxyAPI(
self.galaxy,
'explicit_requirement_{name!s}'.format(
name=coll_req['name'],
),
coll_req['source'],
validate_certs=context.CLIARGS['resolved_validate_certs'],
),
)
return coll_req
@staticmethod
def exit_without_ignore(rc=1):
"""
Exits with the specified return code unless the
option --ignore-errors was specified
"""
if not context.CLIARGS['ignore_errors']:
raise AnsibleError('- you can use --ignore-errors to skip failed roles and finish processing the list.')
@staticmethod
def _display_role_info(role_info):
text = [u"", u"Role: %s" % to_text(role_info['name'])]
# Get the top-level 'description' first, falling back to galaxy_info['galaxy_info']['description'].
galaxy_info = role_info.get('galaxy_info', {})
description = role_info.get('description', galaxy_info.get('description', ''))
text.append(u"\tdescription: %s" % description)
for k in sorted(role_info.keys()):
if k in GalaxyCLI.SKIP_INFO_KEYS:
continue
if isinstance(role_info[k], dict):
text.append(u"\t%s:" % (k))
for key in sorted(role_info[k].keys()):
if key in GalaxyCLI.SKIP_INFO_KEYS:
continue
text.append(u"\t\t%s: %s" % (key, role_info[k][key]))
else:
text.append(u"\t%s: %s" % (k, role_info[k]))
# make sure we have a trailing newline returned
text.append(u"")
return u'\n'.join(text)
@staticmethod
def _resolve_path(path):
return os.path.abspath(os.path.expanduser(os.path.expandvars(path)))
@staticmethod
def _get_skeleton_galaxy_yml(template_path, inject_data):
with open(to_bytes(template_path, errors='surrogate_or_strict'), 'r') as template_obj:
meta_template = TrustedAsTemplate().tag(to_text(template_obj.read(), errors='surrogate_or_strict'))
galaxy_meta = get_collections_galaxy_meta_info()
required_config = []
optional_config = []
for meta_entry in galaxy_meta:
config_list = required_config if meta_entry.get('required', False) else optional_config
value = inject_data.get(meta_entry['key'], None)
if not value:
meta_type = meta_entry.get('type', 'str')
if meta_type == 'str':
value = ''
elif meta_type == 'list':
value = []
elif meta_type == 'dict':
value = {}
meta_entry['value'] = value
config_list.append(meta_entry)
link_pattern = re.compile(r"L\(([^)]+),\s+([^)]+)\)")
const_pattern = re.compile(r"C\(([^)]+)\)")
def comment_ify(v):
if isinstance(v, list):
v = ". ".join([l.rstrip('.') for l in v])
v = link_pattern.sub(r"\1 <\2>", v)
v = const_pattern.sub(r"'\1'", v)
return textwrap.fill(v, width=117, initial_indent="# ", subsequent_indent="# ", break_on_hyphens=False)
loader = DataLoader()
templar = TemplateEngine(loader, variables={'required_config': required_config, 'optional_config': optional_config})
templar.environment.filters['comment_ify'] = comment_ify
meta_value = templar.template(meta_template)
return meta_value
def _require_one_of_collections_requirements(
self, collections, requirements_file,
signatures=None,
artifacts_manager=None,
):
if collections and requirements_file:
raise AnsibleError("The positional collection_name arg and --requirements-file are mutually exclusive.")
elif not collections and not requirements_file:
raise AnsibleError("You must specify a collection name or a requirements file.")
elif requirements_file:
if signatures is not None:
raise AnsibleError(
"The --signatures option and --requirements-file are mutually exclusive. "
"Use the --signatures with positional collection_name args or provide a "
"'signatures' key for requirements in the --requirements-file."
)
requirements_file = GalaxyCLI._resolve_path(requirements_file)
requirements = self._parse_requirements_file(
requirements_file,
allow_old_format=False,
artifacts_manager=artifacts_manager,
)
else:
requirements = {
'collections': [
Requirement.from_string(coll_input, artifacts_manager, signatures)
for coll_input in collections
],
'roles': [],
}
return requirements
############################
# execute actions
############################
def execute_role(self):
"""
Perform the action on an Ansible Galaxy role. Must be combined with a further action like delete/install/init
as listed below.
"""
# To satisfy doc build
pass
def execute_collection(self):
"""
Perform the action on an Ansible Galaxy collection. Must be combined with a further action like init/install as
listed below.
"""
# To satisfy doc build
pass
def execute_build(self):
"""
Build an Ansible Galaxy collection artifact that can be stored in a central repository like Ansible Galaxy.
By default, this command builds from the current working directory. You can optionally pass in the
collection input path (where the ``galaxy.yml`` file is).
"""
force = context.CLIARGS['force']
output_path = GalaxyCLI._resolve_path(context.CLIARGS['output_path'])
b_output_path = to_bytes(output_path, errors='surrogate_or_strict')
if not os.path.exists(b_output_path):
os.makedirs(b_output_path)
elif os.path.isfile(b_output_path):
raise AnsibleError("- the output collection directory %s is a file - aborting" % to_native(output_path))
for collection_path in context.CLIARGS['args']:
collection_path = GalaxyCLI._resolve_path(collection_path)
build_collection(
to_text(collection_path, errors='surrogate_or_strict'),
to_text(output_path, errors='surrogate_or_strict'),
force,
)
@with_collection_artifacts_manager
def execute_download(self, artifacts_manager=None):
"""Download collections and their dependencies as a tarball for an offline install."""
collections = context.CLIARGS['args']
no_deps = context.CLIARGS['no_deps']
download_path = context.CLIARGS['download_path']
requirements_file = context.CLIARGS['requirements']
if requirements_file:
requirements_file = GalaxyCLI._resolve_path(requirements_file)
requirements = self._require_one_of_collections_requirements(
collections, requirements_file,
artifacts_manager=artifacts_manager,
)['collections']
download_path = GalaxyCLI._resolve_path(download_path)
b_download_path = to_bytes(download_path, errors='surrogate_or_strict')
if not os.path.exists(b_download_path):
os.makedirs(b_download_path)
download_collections(
requirements, download_path, self.api_servers, no_deps,
context.CLIARGS['allow_pre_release'],
artifacts_manager=artifacts_manager,
)
return 0
def execute_init(self):
"""
Creates the skeleton framework of a role or collection that complies with the Galaxy metadata format.
Requires a role or collection name. The collection name must be in the format ``<namespace>.<collection>``.
"""
galaxy_type = context.CLIARGS['type']
init_path = context.CLIARGS['init_path']
force = context.CLIARGS['force']
obj_skeleton = context.CLIARGS['{0}_skeleton'.format(galaxy_type)]
obj_name = context.CLIARGS['{0}_name'.format(galaxy_type)]
inject_data = dict(
description='your {0} description'.format(galaxy_type),
ansible_plugin_list_dir=get_versioned_doclink('plugins/plugins.html'),
)
if galaxy_type == 'role':
inject_data.update(dict(
author='your name',
company='your company (optional)',
license='license (GPL-2.0-or-later, MIT, etc)',
role_name=obj_name,
role_type=context.CLIARGS['role_type'],
issue_tracker_url='http://example.com/issue/tracker',
repository_url='http://example.com/repository',
documentation_url='http://docs.example.com',
homepage_url='http://example.com',
min_ansible_version=ansible_version[:3], # x.y
dependencies=[],
))
skeleton_ignore_expressions = C.GALAXY_ROLE_SKELETON_IGNORE
obj_path = os.path.join(init_path, obj_name)
elif galaxy_type == 'collection':
namespace, collection_name = obj_name.split('.', 1)
inject_data.update(dict(
namespace=namespace,
collection_name=collection_name,
version='1.0.0',
readme='README.md',
authors=['your name <example@domain.com>'],
license=['GPL-2.0-or-later'],
repository='http://example.com/repository',
documentation='http://docs.example.com',
homepage='http://example.com',
issues='http://example.com/issue/tracker',
build_ignore=[],
))
skeleton_ignore_expressions = C.GALAXY_COLLECTION_SKELETON_IGNORE
obj_path = os.path.join(init_path, namespace, collection_name)
b_obj_path = to_bytes(obj_path, errors='surrogate_or_strict')
if os.path.exists(b_obj_path):
if os.path.isfile(obj_path):
raise AnsibleError("- the path %s already exists, but is a file - aborting" % to_native(obj_path))
elif not force:
raise AnsibleError("- the directory %s already exists. "
"You can use --force to re-initialize this directory,\n"
"however it will reset any main.yml files that may have\n"
"been modified there already." % to_native(obj_path))
# delete the contents rather than the collection root in case init was run from the root (--init-path ../../)
for root, dirs, files in os.walk(b_obj_path, topdown=True):
for old_dir in dirs:
path = os.path.join(root, old_dir)
shutil.rmtree(path)
for old_file in files:
path = os.path.join(root, old_file)
os.unlink(path)
if obj_skeleton is not None:
own_skeleton = False
else:
own_skeleton = True
obj_skeleton = self.galaxy.default_role_skeleton_path
skeleton_ignore_expressions = ['^.*/.git_keep$']
obj_skeleton = os.path.expanduser(obj_skeleton)
skeleton_ignore_re = [re.compile(x) for x in skeleton_ignore_expressions]
if not os.path.exists(obj_skeleton):
raise AnsibleError("- the skeleton path '{0}' does not exist, cannot init {1}".format(
to_native(obj_skeleton), galaxy_type)
)
loader = DataLoader()
inject_data.update(load_extra_vars(loader))
templar = TemplateEngine(loader, variables=inject_data)
# create role directory
if not os.path.exists(b_obj_path):
os.makedirs(b_obj_path)
for root, dirs, files in os.walk(obj_skeleton, topdown=True):
rel_root = os.path.relpath(root, obj_skeleton)
rel_dirs = rel_root.split(os.sep)
rel_root_dir = rel_dirs[0]
if galaxy_type == 'collection':
# A collection can contain templates in playbooks/*/templates and roles/*/templates
in_templates_dir = rel_root_dir in ['playbooks', 'roles'] and 'templates' in rel_dirs
else:
in_templates_dir = rel_root_dir == 'templates'
# Filter out ignored directory names
# Use [:] to mutate the list os.walk uses
dirs[:] = [d for d in dirs if not any(r.match(d) for r in skeleton_ignore_re)]
for f in files:
filename, ext = os.path.splitext(f)
if any(r.match(os.path.join(rel_root, f)) for r in skeleton_ignore_re):
continue
if galaxy_type == 'collection' and own_skeleton and rel_root == '.' and f == 'galaxy.yml.j2':
# Special use case for galaxy.yml.j2 in our own default collection skeleton. We build the options
# dynamically which requires special options to be set.
# The templated data's keys must match the key name but the inject data contains collection_name
# instead of name. We just make a copy and change the key back to name for this file.
template_data = inject_data.copy()
template_data['name'] = template_data.pop('collection_name')
meta_value = GalaxyCLI._get_skeleton_galaxy_yml(os.path.join(root, rel_root, f), template_data)
b_dest_file = to_bytes(os.path.join(obj_path, rel_root, filename), errors='surrogate_or_strict')
with open(b_dest_file, 'wb') as galaxy_obj:
galaxy_obj.write(to_bytes(meta_value, errors='surrogate_or_strict'))
elif ext == ".j2" and not in_templates_dir:
src_template = os.path.join(root, f)
dest_file = os.path.join(obj_path, rel_root, filename)
template_data = trust_as_template(loader.get_text_file_contents(src_template))
try:
b_rendered = to_bytes(templar.template(template_data), errors='surrogate_or_strict')
except AnsibleError as e:
shutil.rmtree(b_obj_path)
raise AnsibleError(f"Failed to create {galaxy_type.title()} {obj_name}. Templating {src_template} failed with the error: {e}") from e
with open(dest_file, 'wb') as df:
df.write(b_rendered)
else:
f_rel_path = os.path.relpath(os.path.join(root, f), obj_skeleton)
shutil.copyfile(os.path.join(root, f), os.path.join(obj_path, f_rel_path), follow_symlinks=False)
for d in dirs:
b_dir_path = to_bytes(os.path.join(obj_path, rel_root, d), errors='surrogate_or_strict')
if os.path.exists(b_dir_path):
continue
b_src_dir = to_bytes(os.path.join(root, d), errors='surrogate_or_strict')
if os.path.islink(b_src_dir):
shutil.copyfile(b_src_dir, b_dir_path, follow_symlinks=False)
else:
os.makedirs(b_dir_path)
display.display("- %s %s was created successfully" % (galaxy_type.title(), obj_name))
def execute_info(self):
"""
prints out detailed information about an installed role as well as info available from the galaxy API.
"""
roles_path = context.CLIARGS['roles_path']
data = ''
for role in context.CLIARGS['args']:
role_info = {'path': roles_path}
gr = GalaxyRole(self.galaxy, self.lazy_role_api, role)
install_info = gr.install_info
if install_info:
if 'version' in install_info:
install_info['installed_version'] = install_info['version']
del install_info['version']
role_info.update(install_info)
if not context.CLIARGS['offline']:
remote_data = None
try:
remote_data = self.api.lookup_role_by_name(role, False)
except GalaxyError as e:
if e.http_code == 400 and 'Bad Request' in e.message:
# Role does not exist in Ansible Galaxy
data = u"- the role %s was not found" % role
break
raise AnsibleError("Unable to find info about '%s': %s" % (role, e))
if remote_data:
role_info.update(remote_data)
else:
data = u"- the role %s was not found" % role
break
elif context.CLIARGS['offline'] and not gr._exists:
data = u"- the role %s was not found" % role
break
if gr.metadata:
role_info.update(gr.metadata)
req = RoleRequirement()
role_spec = req.role_yaml_parse({'role': role})
if role_spec:
role_info.update(role_spec)
data += self._display_role_info(role_info)
self.pager(data)
@with_collection_artifacts_manager
def execute_verify(self, artifacts_manager=None):
"""Compare checksums with the collection(s) found on the server and the installed copy. This does not verify dependencies."""
collections = context.CLIARGS['args']
search_paths = self.collection_paths
ignore_errors = context.CLIARGS['ignore_errors']
local_verify_only = context.CLIARGS['offline']
requirements_file = context.CLIARGS['requirements']
signatures = context.CLIARGS['signatures']
if signatures is not None:
signatures = list(signatures)
requirements = self._require_one_of_collections_requirements(
collections, requirements_file,
signatures=signatures,
artifacts_manager=artifacts_manager,
)['collections']
resolved_paths = [validate_collection_path(GalaxyCLI._resolve_path(path)) for path in search_paths]
results = verify_collections(
requirements, resolved_paths,
self.api_servers, ignore_errors,
local_verify_only=local_verify_only,
artifacts_manager=artifacts_manager,
)
if any(result for result in results if not result.success):
return 1
return 0
@with_collection_artifacts_manager
def execute_install(self, artifacts_manager=None):
"""
Install one or more roles(``ansible-galaxy role install``), or one or more collections(``ansible-galaxy collection install``).
You can pass in a list (roles or collections) or use the file
option listed below (these are mutually exclusive). If you pass in a list, it
can be a name (which will be downloaded via the galaxy API and github), or it can be a local tar archive file.
"""
install_items = context.CLIARGS['args']
requirements_file = context.CLIARGS['requirements']
collection_path = None
signatures = context.CLIARGS.get('signatures')
if signatures is not None:
signatures = list(signatures)
if requirements_file:
requirements_file = GalaxyCLI._resolve_path(requirements_file)
two_type_warning = "The requirements file '%s' contains {0}s which will be ignored. To install these {0}s " \
"run 'ansible-galaxy {0} install -r' or to install both at the same time run " \
"'ansible-galaxy install -r' without a custom install path." % to_text(requirements_file)
# TODO: Would be nice to share the same behaviour with args and -r in collections and roles.
collection_requirements = []
role_requirements = []
if context.CLIARGS['type'] == 'collection':
collection_path = GalaxyCLI._resolve_path(context.CLIARGS['collections_path'])
requirements = self._require_one_of_collections_requirements(
install_items, requirements_file,
signatures=signatures,
artifacts_manager=artifacts_manager,
)
collection_requirements = requirements['collections']
if requirements['roles']:
display.vvv(two_type_warning.format('role'))
else:
if not install_items and requirements_file is None:
raise AnsibleOptionsError("- you must specify a user/role name or a roles file")
if requirements_file:
if not (requirements_file.endswith('.yaml') or requirements_file.endswith('.yml')):
raise AnsibleError("Invalid role requirements file, it must end with a .yml or .yaml extension")
galaxy_args = self._raw_args
will_install_collections = self._implicit_role and '-p' not in galaxy_args and '--roles-path' not in galaxy_args
requirements = self._parse_requirements_file(
requirements_file,
artifacts_manager=artifacts_manager,
validate_signature_options=will_install_collections,
)
role_requirements = requirements['roles']
# We can only install collections and roles at the same time if the type wasn't specified and the -p
# argument was not used. If collections are present in the requirements then at least display a msg.
if requirements['collections'] and (not self._implicit_role or '-p' in galaxy_args or
'--roles-path' in galaxy_args):
# We only want to display a warning if 'ansible-galaxy install -r ... -p ...'. Other cases the user
# was explicit about the type and shouldn't care that collections were skipped.
display_func = display.warning if self._implicit_role else display.vvv
display_func(two_type_warning.format('collection'))
else:
collection_path = self._get_default_collection_path()
collection_requirements = requirements['collections']
else:
# roles were specified directly, so we'll just go out grab them
# (and their dependencies, unless the user doesn't want us to).
for rname in context.CLIARGS['args']:
role = RoleRequirement.role_yaml_parse(rname.strip())
role_requirements.append(GalaxyRole(self.galaxy, self.lazy_role_api, **role))
if not role_requirements and not collection_requirements:
display.display("Skipping install, no requirements found")
return
if role_requirements:
display.display("Starting galaxy role install process")
self._execute_install_role(role_requirements)
if collection_requirements:
display.display("Starting galaxy collection install process")
# Collections can technically be installed even when ansible-galaxy is in role mode so we need to pass in
# the install path as context.CLIARGS['collections_path'] won't be set (default is calculated above).
self._execute_install_collection(
collection_requirements, collection_path,
artifacts_manager=artifacts_manager,
)
def _execute_install_collection(
self, requirements, path, artifacts_manager,
):
force = context.CLIARGS['force']
ignore_errors = context.CLIARGS['ignore_errors']
no_deps = context.CLIARGS['no_deps']
force_with_deps = context.CLIARGS['force_with_deps']
try:
disable_gpg_verify = context.CLIARGS['disable_gpg_verify']
except KeyError:
if self._implicit_role:
raise AnsibleError(
'Unable to properly parse command line arguments. Please use "ansible-galaxy collection install" '
'instead of "ansible-galaxy install".'
)
raise
# If `ansible-galaxy install` is used, collection-only options aren't available to the user and won't be in context.CLIARGS
allow_pre_release = context.CLIARGS.get('allow_pre_release', False)
upgrade = context.CLIARGS.get('upgrade', False)
collections_path = C.COLLECTIONS_PATHS
managed_paths = set(validate_collection_path(p) for p in C.COLLECTIONS_PATHS)
read_req_paths = set(validate_collection_path(p) for p in self.collection_paths)
unexpected_path = C.GALAXY_COLLECTIONS_PATH_WARNING and not any(p.startswith(path) for p in managed_paths)
if unexpected_path and any(p.startswith(path) for p in read_req_paths):
display.warning(
f"The specified collections path '{path}' appears to be part of the pip Ansible package. "
"Managing these directly with ansible-galaxy could break the Ansible package. "
"Install collections to a configured collections path, which will take precedence over "
"collections found in the PYTHONPATH."
)
elif unexpected_path:
display.warning("The specified collections path '%s' is not part of the configured Ansible "
"collections paths '%s'. The installed collection will not be picked up in an Ansible "
"run, unless within a playbook-adjacent collections directory." % (to_text(path), to_text(":".join(collections_path))))
output_path = validate_collection_path(path)
b_output_path = to_bytes(output_path, errors='surrogate_or_strict')
if not os.path.exists(b_output_path):
os.makedirs(b_output_path)
install_collections(
requirements, output_path, self.api_servers, ignore_errors,
no_deps, force, force_with_deps, upgrade,
allow_pre_release=allow_pre_release,
artifacts_manager=artifacts_manager,
disable_gpg_verify=disable_gpg_verify,
offline=context.CLIARGS.get('offline', False),
read_requirement_paths=read_req_paths,
)
return 0
def _execute_install_role(self, requirements):
role_file = context.CLIARGS['requirements']
no_deps = context.CLIARGS['no_deps']
force_deps = context.CLIARGS['force_with_deps']
force = context.CLIARGS['force'] or force_deps
for role in requirements:
# only process roles in roles files when names matches if given
if role_file and context.CLIARGS['args'] and role.name not in context.CLIARGS['args']:
display.vvv('Skipping role %s' % role.name)
continue
display.vvv('Processing role %s ' % role.name)
# query the galaxy API for the role data
if role.install_info is not None:
if role.install_info['version'] != role.version or force:
if force:
display.display('- changing role %s from %s to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
role.remove()
else:
display.warning('- %s (%s) is already installed - use --force to change version to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
continue
else:
if not force:
display.display('- %s is already installed, skipping.' % str(role))
continue
try:
installed = role.install()
except AnsibleError as e:
display.warning(u"- %s was NOT installed successfully: %s " % (role.name, to_text(e)))
self.exit_without_ignore()
continue
# install dependencies, if we want them
if not no_deps and installed:
if not role.metadata:
# NOTE: the meta file is also required for installing the role, not just dependencies
display.warning("Meta file %s is empty. Skipping dependencies." % role.path)
else:
role_dependencies = role.metadata_dependencies + role.requirements
for dep in role_dependencies:
display.debug('Installing dep %s' % dep)
dep_req = RoleRequirement()
dep_info = dep_req.role_yaml_parse(dep)
dep_role = GalaxyRole(self.galaxy, self.lazy_role_api, **dep_info)
if '.' not in dep_role.name and '.' not in dep_role.src and dep_role.scm is None:
# we know we can skip this, as it's not going to
# be found on galaxy.ansible.com
continue
if dep_role.install_info is None:
if dep_role not in requirements:
display.display('- adding dependency: %s' % to_text(dep_role))
requirements.append(dep_role)
else:
display.display('- dependency %s already pending installation.' % dep_role.name)
else:
if dep_role.install_info['version'] != dep_role.version:
if force_deps:
display.display('- changing dependent role %s from %s to %s' %
(dep_role.name, dep_role.install_info['version'], dep_role.version or "unspecified"))
dep_role.remove()
requirements.append(dep_role)
else:
display.warning('- dependency %s (%s) from role %s differs from already installed version (%s), skipping' %
(to_text(dep_role), dep_role.version, role.name, dep_role.install_info['version']))
else:
if force_deps:
requirements.append(dep_role)
else:
display.display('- dependency %s is already installed, skipping.' % dep_role.name)
if not installed:
display.warning("- %s was NOT installed successfully." % role.name)
self.exit_without_ignore()
return 0
def execute_remove(self):
"""
removes the list of roles passed as arguments from the local system.
"""
if not context.CLIARGS['args']:
raise AnsibleOptionsError('- you must specify at least one role to remove.')
for role_name in context.CLIARGS['args']:
role = GalaxyRole(self.galaxy, self.api, role_name)
try:
if role.remove():
display.display('- successfully removed %s' % role_name)
else:
display.display('- %s is not installed, skipping.' % role_name)
except Exception as e:
raise AnsibleError("Failed to remove role %s: %s" % (role_name, to_native(e)))
return 0
def execute_list(self):
"""
List installed collections or roles
"""
if context.CLIARGS['type'] == 'role':
self.execute_list_role()
elif context.CLIARGS['type'] == 'collection':
self.execute_list_collection()
def execute_list_role(self):
"""
List all roles installed on the local system or a specific role
"""
path_found = False
role_found = False
warnings = []
roles_search_paths = context.CLIARGS['roles_path']
role_name = context.CLIARGS['role']
for path in roles_search_paths:
role_path = GalaxyCLI._resolve_path(path)
if os.path.isdir(path):
path_found = True
else:
warnings.append("- the configured path {0} does not exist.".format(path))
continue
if role_name:
# show the requested role, if it exists
gr = GalaxyRole(self.galaxy, self.lazy_role_api, role_name, path=os.path.join(role_path, role_name))
if os.path.isdir(gr.path):
role_found = True
display.display('# %s' % os.path.dirname(gr.path))
_display_role(gr)
break
warnings.append("- the role %s was not found" % role_name)
else:
if not os.path.exists(role_path):
warnings.append("- the configured path %s does not exist." % role_path)
continue
if not os.path.isdir(role_path):
warnings.append("- the configured path %s, exists, but it is not a directory." % role_path)
continue
display.display('# %s' % role_path)
path_files = os.listdir(role_path)
for path_file in path_files:
gr = GalaxyRole(self.galaxy, self.lazy_role_api, path_file, path=path)
if gr.metadata:
_display_role(gr)
# Do not warn if the role was found in any of the search paths
if role_found and role_name:
warnings = []
for w in warnings:
display.warning(w)
if not path_found:
raise AnsibleOptionsError(
"- None of the provided paths were usable. Please specify a valid path with --{0}s-path".format(context.CLIARGS['type'])
)
return 0
@with_collection_artifacts_manager
def execute_list_collection(self, artifacts_manager=None):
"""
List all collections installed on the local system
:param artifacts_manager: Artifacts manager.
"""
if artifacts_manager is not None:
artifacts_manager.require_build_metadata = False
output_format = context.CLIARGS['output_format']
collection_name = context.CLIARGS['collection']
default_collections_path = set(C.COLLECTIONS_PATHS)
collections_search_paths = (
set(context.CLIARGS['collections_path'] or []) | default_collections_path | set(self.collection_paths)
)
collections_in_paths = {}
warnings = []
path_found = False
collection_found = False
namespace_filter = None
collection_filter = None
if collection_name:
# list a specific collection
validate_collection_name(collection_name)
namespace_filter, collection_filter = collection_name.split('.')
collections = list(find_existing_collections(
list(collections_search_paths),
artifacts_manager,
namespace_filter=namespace_filter,
collection_filter=collection_filter,
dedupe=False
))
seen = set()
fqcn_width, version_width = _get_collection_widths(collections)
for collection in sorted(collections, key=lambda c: c.src):
collection_found = True
collection_path = pathlib.Path(to_text(collection.src)).parent.parent.as_posix()
if output_format in {'yaml', 'json'}:
collections_in_paths.setdefault(collection_path, {})
collections_in_paths[collection_path][collection.fqcn] = {'version': collection.ver}
else:
if collection_path not in seen:
_display_header(
collection_path,
'Collection',
'Version',
fqcn_width,
version_width
)
seen.add(collection_path)
_display_collection(collection, fqcn_width, version_width)
path_found = False
for path in collections_search_paths:
if not os.path.exists(path):
if path in default_collections_path:
# don't warn for missing default paths
continue
warnings.append("- the configured path {0} does not exist.".format(path))
elif os.path.exists(path) and not os.path.isdir(path):
warnings.append("- the configured path {0}, exists, but it is not a directory.".format(path))
else:
path_found = True
# Do not warn if the specific collection was found in any of the search paths
if collection_found and collection_name:
warnings = []
for w in warnings:
display.warning(w)
if not collections and not path_found:
raise AnsibleOptionsError(
"- None of the provided paths were usable. Please specify a valid path with --{0}s-path".format(context.CLIARGS['type'])
)
if output_format == 'json':
display.display(json.dumps(collections_in_paths))
elif output_format == 'yaml':
display.display(yaml_dump(collections_in_paths))
return 0
def execute_publish(self):
"""
Publish a collection into Ansible Galaxy. Requires the path to the collection tarball to publish.
"""
collection_path = GalaxyCLI._resolve_path(context.CLIARGS['args'])
wait = context.CLIARGS['wait']
timeout = context.CLIARGS['import_timeout']
publish_collection(collection_path, self.api, wait, timeout)
def execute_search(self):
""" searches for roles on the Ansible Galaxy server"""
page_size = 1000
search = None
if context.CLIARGS['args']:
search = '+'.join(context.CLIARGS['args'])
if not search and not context.CLIARGS['platforms'] and not context.CLIARGS['galaxy_tags'] and not context.CLIARGS['author']:
raise AnsibleError("Invalid query. At least one search term, platform, galaxy tag or author must be provided.")
response = self.api.search_roles(search, platforms=context.CLIARGS['platforms'],
tags=context.CLIARGS['galaxy_tags'], author=context.CLIARGS['author'], page_size=page_size)
if response['count'] == 0:
display.warning("No roles match your search.")
return 0
data = [u'']
if response['count'] > page_size:
data.append(u"Found %d roles matching your search. Showing first %s." % (response['count'], page_size))
else:
data.append(u"Found %d roles matching your search:" % response['count'])
max_len = []
for role in response['results']:
max_len.append(len(role['username'] + '.' + role['name']))
name_len = max(max_len)
format_str = u" %%-%ds %%s" % name_len
data.append(u'')
data.append(format_str % (u"Name", u"Description"))
data.append(format_str % (u"----", u"-----------"))
for role in response['results']:
data.append(format_str % (u'%s.%s' % (role['username'], role['name']), role['description']))
data = u'\n'.join(data)
self.pager(data)
return 0
_task_check_delay_sec = 10 # allows unit test override
def execute_import(self):
""" used to import a role into Ansible Galaxy """
colors = {
'INFO': 'normal',
'WARNING': C.COLOR_WARN,
'ERROR': C.COLOR_ERROR,
'SUCCESS': C.COLOR_OK,
'FAILED': C.COLOR_ERROR,
}
github_user = to_text(context.CLIARGS['github_user'], errors='surrogate_or_strict')
github_repo = to_text(context.CLIARGS['github_repo'], errors='surrogate_or_strict')
rc = 0
if context.CLIARGS['check_status']:
task = self.api.get_import_task(github_user=github_user, github_repo=github_repo)
else:
# Submit an import request
task = self.api.create_import_task(github_user, github_repo,
reference=context.CLIARGS['reference'],
role_name=context.CLIARGS['role_name'])
if len(task) > 1:
# found multiple roles associated with github_user/github_repo
display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user, github_repo),
color='yellow')
display.display("The following Galaxy roles are being updated:" + u'\n', color=C.COLOR_CHANGED)
for t in task:
display.display('%s.%s' % (t['summary_fields']['role']['namespace'], t['summary_fields']['role']['name']), color=C.COLOR_CHANGED)
display.display(u'\nTo properly namespace this role, remove each of the above and re-import %s/%s from scratch' % (github_user, github_repo),
color=C.COLOR_CHANGED)
return rc
# found a single role as expected
display.display("Successfully submitted import request %d" % task[0]['id'])
if not context.CLIARGS['wait']:
display.display("Role name: %s" % task[0]['summary_fields']['role']['name'])
display.display("Repo: %s/%s" % (task[0]['github_user'], task[0]['github_repo']))
if context.CLIARGS['check_status'] or context.CLIARGS['wait']:
# Get the status of the import
msg_list = []
finished = False
while not finished:
task = self.api.get_import_task(task_id=task[0]['id'])
for msg in task[0]['summary_fields']['task_messages']:
if msg['id'] not in msg_list:
display.display(msg['message_text'], color=colors[msg['message_type']])
msg_list.append(msg['id'])
if (state := task[0]['state']) in ['SUCCESS', 'FAILED']:
rc = ['SUCCESS', 'FAILED'].index(state)
finished = True
else:
time.sleep(self._task_check_delay_sec)
return rc
def execute_setup(self):
""" Setup an integration from Github or Travis for Ansible Galaxy roles"""
if context.CLIARGS['setup_list']:
# List existing integration secrets
secrets = self.api.list_secrets()
if len(secrets) == 0:
# None found
display.display("No integrations found.")
return 0
display.display(u'\n' + "ID Source Repo", color=C.COLOR_OK)
display.display("---------- ---------- ----------", color=C.COLOR_OK)
for secret in secrets:
display.display("%-10s %-10s %s/%s" % (secret['id'], secret['source'], secret['github_user'],
secret['github_repo']), color=C.COLOR_OK)
return 0
if context.CLIARGS['remove_id']:
# Remove a secret
self.api.remove_secret(context.CLIARGS['remove_id'])
display.display("Secret removed. Integrations using this secret will not longer work.", color=C.COLOR_OK)
return 0
source = context.CLIARGS['source']
github_user = context.CLIARGS['github_user']
github_repo = context.CLIARGS['github_repo']
secret = context.CLIARGS['secret']
resp = self.api.add_secret(source, github_user, github_repo, secret)
display.display("Added integration for %s %s/%s" % (resp['source'], resp['github_user'], resp['github_repo']))
return 0
def execute_delete(self):
""" Delete a role from Ansible Galaxy. """
github_user = context.CLIARGS['github_user']
github_repo = context.CLIARGS['github_repo']
resp = self.api.delete_role(github_user, github_repo)
if len(resp['deleted_roles']) > 1:
display.display("Deleted the following roles:")
display.display("ID User Name")
display.display("------ --------------- ----------")
for role in resp['deleted_roles']:
display.display("%-8s %-15s %s" % (role.id, role.namespace, role.name))
display.display(resp['status'])
return 0
def main(args=None):
GalaxyCLI.cli_executor(args)
if __name__ == '__main__':
main()
|
GalaxyCLI
|
python
|
getsentry__sentry
|
tests/sentry/integrations/vsts/test_issues.py
|
{
"start": 6529,
"end": 22470
}
|
class ____(VstsIssueBase):
def tearDown(self) -> None:
responses.reset()
@responses.activate
def test_create_issue(self) -> None:
responses.add(
responses.PATCH,
"https://fabrikam-fiber-inc.visualstudio.com/0987654321/_apis/wit/workitems/$Microsoft.VSTS.WorkItemTypes.Task",
body=WORK_ITEM_RESPONSE,
content_type="application/json",
)
form_data = {
"title": "Hello",
"description": "Fix this.",
"project": "0987654321",
"work_item_type": "Microsoft.VSTS.WorkItemTypes.Task",
}
assert self.integration.create_issue(form_data) == {
"key": self.issue_id,
"description": "Fix this.",
"title": "Hello",
"metadata": {"display_name": "Fabrikam-Fiber-Git#309"},
}
request = responses.calls[-1].request
assert request.headers["Content-Type"] == "application/json-patch+json"
payload = orjson.loads(request.body)
assert payload == [
{"op": "add", "path": "/fields/System.Title", "value": "Hello"},
# Adds both a comment and a description.
# See method for details.
{"op": "add", "path": "/fields/System.Description", "value": "<p>Fix this.</p>\n"},
{"op": "add", "path": "/fields/System.History", "value": "<p>Fix this.</p>\n"},
]
@patch(
"sentry.integrations.vsts.client.VstsApiClient.create_work_item",
side_effect=ApiError(
"Error Communicating with Azure DevOps (HTTP 400): TF401320: Rule Error for field xxx. Error code: Required, HasValues, LimitedToValues, AllowsOldValue, InvalidEmpty."
),
)
@responses.activate
def test_create_issue_integration_form_error(self, create_work_item: MagicMock) -> None:
form_data = {
"title": "Hello",
"description": "Fix this.",
"project": "0987654321",
"work_item_type": "Microsoft.VSTS.WorkItemTypes.Task",
}
with pytest.raises(IntegrationFormError):
self.integration.create_issue(form_data)
@responses.activate
def test_create_issue_title_too_long(self) -> None:
responses.add(
responses.PATCH,
"https://fabrikam-fiber-inc.visualstudio.com/0987654321/_apis/wit/workitems/$Microsoft.VSTS.WorkItemTypes.Task",
body=WORK_ITEM_RESPONSE,
content_type="application/json",
)
long_title = "A" * 200 # Title longer than VSTS's 128 character limit
expected_title = "A" * 125 + "..."
form_data = {
"title": long_title,
"description": "Fix this.",
"project": "0987654321",
"work_item_type": "Microsoft.VSTS.WorkItemTypes.Task",
}
assert self.integration.create_issue(form_data) == {
"key": self.issue_id,
"description": "Fix this.",
"title": expected_title,
"metadata": {"display_name": "Fabrikam-Fiber-Git#309"},
}
request = responses.calls[-1].request
assert request.headers["Content-Type"] == "application/json-patch+json"
payload = orjson.loads(request.body)
assert payload == [
{"op": "add", "path": "/fields/System.Title", "value": expected_title},
# Adds both a comment and a description.
# See method for details.
{"op": "add", "path": "/fields/System.Description", "value": "<p>Fix this.</p>\n"},
{"op": "add", "path": "/fields/System.History", "value": "<p>Fix this.</p>\n"},
]
@responses.activate
def test_create_issue_failure(self) -> None:
form_data = {
"title": "rip",
"description": "Goodnight, sweet prince",
}
with pytest.raises(IntegrationFormError):
self.integration.create_issue(form_data)
@responses.activate
def test_get_issue(self) -> None:
responses.add(
responses.GET,
f"https://fabrikam-fiber-inc.visualstudio.com/_apis/wit/workitems/{self.issue_id}",
body=WORK_ITEM_RESPONSE,
content_type="application/json",
)
assert self.integration.get_issue(self.issue_id) == {
"key": self.issue_id,
"description": "Fix this.",
"title": "Hello",
"metadata": {"display_name": "Fabrikam-Fiber-Git#309"},
}
request = responses.calls[-1].request
assert request.headers["Content-Type"] == "application/json"
@responses.activate
@patch("sentry.integrations.vsts.client.VstsApiClient._use_proxy_url_for_tests")
def test_sync_assignee_outbound(self, use_proxy_url_for_tests: MagicMock) -> None:
use_proxy_url_for_tests.return_value = True
vsts_work_item_id = 5
generate_mock_response(
method=responses.PATCH,
body=WORK_ITEM_RESPONSE,
content_type="application/json",
path=f"_apis/wit/workitems/{vsts_work_item_id}",
non_region_url=f"https://fabrikam-fiber-inc.visualstudio.com/_apis/wit/workitems/{vsts_work_item_id}",
)
generate_mock_response(
method=responses.GET,
body=GET_USERS_RESPONSE,
content_type="application/json",
path="_apis/graph/users",
non_region_url="https://fabrikam-fiber-inc.vssps.visualstudio.com/_apis/graph/users",
)
user = user_service.get_user(user_id=self.create_user("ftotten@vscsi.us").id)
external_issue = ExternalIssue.objects.create(
organization_id=self.organization.id,
integration_id=self.integration.model.id,
key=vsts_work_item_id,
title="I'm a title!",
description="I'm a description.",
)
self.integration.sync_assignee_outbound(external_issue, user, assign=True)
assert len(responses.calls) == 2
assert_response_calls(
expected_region_response=[
"_apis/graph/users",
f"_apis/wit/workitems/{vsts_work_item_id}",
],
expected_non_region_response=[
"https://fabrikam-fiber-inc.vssps.visualstudio.com/_apis/graph/users",
f"https://fabrikam-fiber-inc.visualstudio.com/_apis/wit/workitems/{vsts_work_item_id}",
],
)
request_body = orjson.loads(responses.calls[1].request.body)
assert len(request_body) == 1
assert request_body[0]["path"] == "/fields/System.AssignedTo"
assert request_body[0]["value"] == "ftotten@vscsi.us"
assert request_body[0]["op"] == "replace"
assert responses.calls[1].response.status_code == 200
@responses.activate
@patch("sentry.integrations.vsts.client.VstsApiClient._use_proxy_url_for_tests")
def test_sync_assignee_outbound_with_paging(self, use_proxy_url_for_tests: MagicMock) -> None:
use_proxy_url_for_tests.return_value = True
vsts_work_item_id = 5
generate_mock_response(
method=responses.PATCH,
body=WORK_ITEM_RESPONSE,
content_type="application/json",
path=f"_apis/wit/workitems/{vsts_work_item_id}",
non_region_url=f"https://fabrikam-fiber-inc.visualstudio.com/_apis/wit/workitems/{vsts_work_item_id}",
)
generate_mock_response(
method=responses.GET,
json={
"value": [
{"mailAddress": "example1@example.com"},
{"mailAddress": "example2@example.com"},
{"mailAddress": "example3@example.com"},
]
},
headers={"X-MS-ContinuationToken": "continuation-token"},
path="_apis/graph/users",
non_region_url="https://fabrikam-fiber-inc.vssps.visualstudio.com/_apis/graph/users",
)
generate_mock_response(
method=responses.GET,
body=GET_USERS_RESPONSE,
content_type="application/json",
path="_apis/graph/users?continuationToken=continuation-token",
non_region_url="https://fabrikam-fiber-inc.vssps.visualstudio.com/_apis/graph/users",
)
user = user_service.get_user(user_id=self.create_user("ftotten@vscsi.us").id)
external_issue = ExternalIssue.objects.create(
organization_id=self.organization.id,
integration_id=self.integration.model.id,
key=vsts_work_item_id,
title="I'm a title!",
description="I'm a description.",
)
self.integration.sync_assignee_outbound(external_issue, user, assign=True)
assert len(responses.calls) == 3
assert_response_calls(
expected_region_response=[
"_apis/graph/users",
"_apis/graph/users?continuationToken=continuation-token",
f"_apis/wit/workitems/{vsts_work_item_id}",
],
expected_non_region_response=[
"https://fabrikam-fiber-inc.vssps.visualstudio.com/_apis/graph/users",
"https://fabrikam-fiber-inc.vssps.visualstudio.com/_apis/graph/users?continuationToken=continuation-token",
f"https://fabrikam-fiber-inc.visualstudio.com/_apis/wit/workitems/{vsts_work_item_id}",
],
)
request_body = orjson.loads(responses.calls[2].request.body)
assert len(request_body) == 1
assert request_body[0]["path"] == "/fields/System.AssignedTo"
assert request_body[0]["value"] == "ftotten@vscsi.us"
assert request_body[0]["op"] == "replace"
assert responses.calls[2].response.status_code == 200
@responses.activate
def test_sync_status_outbound(self) -> None:
vsts_work_item_id = 5
responses.add(
responses.PATCH,
f"https://fabrikam-fiber-inc.visualstudio.com/_apis/wit/workitems/{vsts_work_item_id}",
body=WORK_ITEM_RESPONSE,
content_type="application/json",
)
responses.add(
responses.GET,
"https://fabrikam-fiber-inc.vssps.visualstudio.com/_apis/graph/users",
body=GET_USERS_RESPONSE,
content_type="application/json",
)
responses.add(
responses.GET,
f"https://fabrikam-fiber-inc.visualstudio.com/_apis/wit/workitems/{vsts_work_item_id}",
body=WORK_ITEM_RESPONSE,
content_type="application/json",
)
responses.add(
responses.GET,
"https://fabrikam-fiber-inc.visualstudio.com/_apis/projects",
body=GET_PROJECTS_RESPONSE,
content_type="application/json",
)
external_issue = ExternalIssue.objects.create(
organization_id=self.organization.id,
integration_id=self.integration.model.id,
key=vsts_work_item_id,
title="I'm a title!",
description="I'm a description.",
)
with assume_test_silo_mode(SiloMode.CONTROL):
IntegrationExternalProject.objects.create(
external_id="ac7c05bb-7f8e-4880-85a6-e08f37fd4a10",
organization_integration_id=self.integration.org_integration.id,
resolved_status="Resolved",
unresolved_status="New",
)
self.integration.sync_status_outbound(external_issue, True, self.project.id)
assert len(responses.calls) == 3
req = responses.calls[2].request
assert (
req.url
== f"https://fabrikam-fiber-inc.visualstudio.com/_apis/wit/workitems/{vsts_work_item_id}"
)
assert orjson.loads(req.body) == [
{"path": "/fields/System.State", "value": "Resolved", "op": "replace"}
]
assert responses.calls[2].response.status_code == 200
@responses.activate
@patch(
"sentry.integrations.vsts.client.VstsApiClient.get_work_item",
side_effect=ApiError(
"According to Microsoft Entra, your Identity xxx is currently Deleted within the following Microsoft Entra tenant: xxx Please contact your Microsoft Entra administrator to resolve this."
),
)
def test_sync_status_outbound_invalid_identity(self, get_work_item: MagicMock) -> None:
vsts_work_item_id = 5
external_issue = ExternalIssue.objects.create(
organization_id=self.organization.id,
integration_id=self.integration.model.id,
key=vsts_work_item_id,
title="I'm a title!",
description="I'm a description.",
)
with assume_test_silo_mode(SiloMode.CONTROL):
IntegrationExternalProject.objects.create(
external_id="ac7c05bb-7f8e-4880-85a6-e08f37fd4a10",
organization_integration_id=self.integration.org_integration.id,
resolved_status="Resolved",
unresolved_status="New",
)
with pytest.raises(ApiUnauthorized):
self.integration.sync_status_outbound(external_issue, True, self.project.id)
def test_get_issue_url(self) -> None:
work_id = 345
url = self.integration.get_issue_url(work_id)
assert url == "https://fabrikam-fiber-inc.visualstudio.com/_workitems/edit/345"
@responses.activate
def test_should_resolve_active_to_resolved(self) -> None:
assert (
self.integration.get_resolve_sync_action(
{
"project": self.project_id_with_states,
"old_state": "Active",
"new_state": "Resolved",
}
)
== ResolveSyncAction.RESOLVE
)
@responses.activate
def test_should_resolve_resolved_to_active(self) -> None:
assert (
self.integration.get_resolve_sync_action(
{
"project": self.project_id_with_states,
"old_state": "Resolved",
"new_state": "Active",
}
)
== ResolveSyncAction.UNRESOLVE
)
@responses.activate
def test_should_resolve_new(self) -> None:
assert (
self.integration.get_resolve_sync_action(
{"project": self.project_id_with_states, "old_state": None, "new_state": "New"}
)
== ResolveSyncAction.UNRESOLVE
)
@responses.activate
def test_should_resolve_done_status_failure(self) -> None:
"""TODO(mgaeta): Should this be NOOP instead of UNRESOLVE when we lose connection?"""
responses.reset()
responses.add(
responses.GET,
"https://fabrikam-fiber-inc.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workitemtypes/Bug/states",
status=403,
json={
"error": "The requested operation is not allowed. Your account is pending deletion."
},
)
assert (
self.integration.get_resolve_sync_action(
{
"project": self.project_id_with_states,
"old_state": "Active",
"new_state": "Resolved",
}
)
== ResolveSyncAction.UNRESOLVE
)
@responses.activate
def test_should_not_unresolve_resolved_to_closed(self) -> None:
assert (
self.integration.get_resolve_sync_action(
{
"project": self.project_id_with_states,
"old_state": "Resolved",
"new_state": "Closed",
}
)
== ResolveSyncAction.NOOP
)
@region_silo_test(include_monolith_run=True)
|
VstsIssueSyncTest
|
python
|
pydantic__pydantic
|
pydantic/_internal/_utils.py
|
{
"start": 13208,
"end": 14156
}
|
class ____:
"""Wrapper redirecting `__getitem__` to `get` with a sentinel value as default
This makes is safe to use in `operator.itemgetter` when some keys may be missing
"""
# Define __slots__manually for performances
# @dataclasses.dataclass() only support slots=True in python>=3.10
__slots__ = ('wrapped',)
wrapped: Mapping[str, Any]
def __getitem__(self, key: str, /) -> Any:
return self.wrapped.get(key, _SENTINEL)
# required to pass the object to operator.itemgetter() instances due to a quirk of typeshed
# https://github.com/python/mypy/issues/13713
# https://github.com/python/typeshed/pull/8785
# Since this is typing-only, hide it in a typing.TYPE_CHECKING block
if TYPE_CHECKING:
def __contains__(self, key: str, /) -> bool:
return self.wrapped.__contains__(key)
_ModelT = TypeVar('_ModelT', bound='BaseModel')
_RT = TypeVar('_RT')
|
SafeGetItemProxy
|
python
|
yaml__pyyaml
|
lib/yaml/cyaml.py
|
{
"start": 692,
"end": 891
}
|
class ____(CParser, FullConstructor, Resolver):
def __init__(self, stream):
CParser.__init__(self, stream)
FullConstructor.__init__(self)
Resolver.__init__(self)
|
CFullLoader
|
python
|
pytorch__pytorch
|
test/test_overrides.py
|
{
"start": 11854,
"end": 12440
}
|
class ____:
"""A class that overrides the full torch API
This class is used to explicitly test that the full torch.tensor API
can be overridden with a class that defines __torch_function__.
"""
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
if func not in HANDLED_FUNCTIONS_TENSOR_LIKE:
return NotImplemented
# In this case _torch_function_ should override TensorLike objects
return HANDLED_FUNCTIONS_TENSOR_LIKE[func](*args, **kwargs)
|
TensorLike
|
python
|
spack__spack
|
lib/spack/spack/graph.py
|
{
"start": 16472,
"end": 16863
}
|
class ____(DotGraphBuilder):
"""Simple DOT graph, with nodes colored uniformly and edges without properties"""
def node_entry(self, node):
format_option = "{name}{@version}{/hash:7}{%compiler}"
return node.dag_hash(), f'[label="{node.format(format_option)}"]'
def edge_entry(self, edge):
return edge.parent.dag_hash(), edge.spec.dag_hash(), None
|
SimpleDAG
|
python
|
joke2k__faker
|
tests/providers/test_ssn.py
|
{
"start": 33851,
"end": 34820
}
|
class ____(unittest.TestCase):
def setUp(self):
self.fake = Faker("nl_BE")
Faker.seed(0)
def test_ssn(self):
for _ in range(1000):
ssn = self.fake.ssn()
assert len(ssn) == 11
gen_seq = ssn[6:9]
gen_chksum = ssn[9:11]
gen_seq_as_int = int(gen_seq)
gen_chksum_as_int = int(gen_chksum)
# Check that the sequence nr is between 1 inclusive and 998 inclusive
assert gen_seq_as_int > 0
assert gen_seq_as_int <= 998
# validate checksum calculation
# Since the century is not part of ssn, try both below and above year 2000
ssn_below = int(ssn[0:9])
chksum_below = 97 - (ssn_below % 97)
ssn_above = ssn_below + 2000000000
chksum_above = 97 - (ssn_above % 97)
results = [chksum_above, chksum_below]
assert gen_chksum_as_int in results
|
TestNlBE
|
python
|
lepture__authlib
|
authlib/jose/errors.py
|
{
"start": 1367,
"end": 1654
}
|
class ____(JoseError):
error = "invalid_algorithm_for_multiple_recipients_mode"
def __init__(self, alg):
description = f"{alg} algorithm cannot be used in multiple recipients mode"
super().__init__(description=description)
|
InvalidAlgorithmForMultipleRecipientsMode
|
python
|
pytorch__pytorch
|
test/dynamo/test_global.py
|
{
"start": 700,
"end": 7291
}
|
class ____(torch._dynamo.test_case.TestCase):
def test_store_global_1(self):
def fn(x):
global g_counter
val = x + g_counter
g_counter += 1
return val
x = torch.randn(10)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(fn, backend=cnts)
res1 = opt_fn(x)
res2 = fn(x)
self.assertTrue(same(res2 - res1, torch.ones(10)))
def test_store_global_2(self):
def fn(x):
global g_counter
val = x + g_counter
g_counter += 1
g_counter += 1
return val
x = torch.randn(10)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(fn, backend=cnts)
res1 = opt_fn(x)
"""Wrap the second call with torch._dynamo as well"""
opt_fn = torch.compile(fn, backend=cnts)
res2 = opt_fn(x)
self.assertTrue(same(res2 - res1, 2 * torch.ones(10)))
def test_store_global_new(self):
def fn(x):
# Test create a new global
global g_counter_new
g_counter_new = x + 1
return x + g_counter_new
x = torch.randn(10)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(fn, backend=cnts)
res1 = opt_fn(x)
self.assertTrue(same(res1, x + x + 1))
def test_store_global_list(self):
def fn(x):
global g_list
val = x + g_list[1]
"""
Strictly speaking, we are not testing STORE_GLOBAL
here, since STORE_SUBSCR is actually used to store.
"""
g_list[1] += 1
return val
x = torch.randn(10)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(fn, backend=cnts)
res1 = opt_fn(x)
res2 = fn(x)
self.assertTrue(same(res2 - res1, torch.ones(10)))
def test_store_global_list_2(self):
def fn(x):
global g_list
val = x + g_list[1]
g_list = [x + 1 for x in g_list]
return val
x = torch.randn(10)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(fn, backend=cnts)
res1 = opt_fn(x)
res2 = fn(x)
self.assertTrue(same(res2 - res1, torch.ones(10)))
def test_store_global_dict(self):
def fn(x):
global g_dict
val = x + g_dict["b"]
"""
Strictly speaking, we are not testing STORE_GLOBAL
here, since STORE_SUBSCR is actually used to store.
"""
g_dict["b"] += 1
return val
x = torch.randn(10)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(fn, backend=cnts)
res1 = opt_fn(x)
res2 = fn(x)
self.assertTrue(same(res2 - res1, torch.ones(10)))
def test_store_global_dict_2(self):
def fn(x):
global g_dict
g_dict = {key: value + 1 for key, value in g_dict.items()}
val = x + g_dict["b"]
return val
x = torch.randn(10)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(fn, backend=cnts)
res1 = opt_fn(x)
res2 = fn(x)
self.assertTrue(same(res2 - res1, torch.ones(10)))
def test_store_global_object(self):
def fn(x):
global g_object
val = x + g_object.y
g_object.y += 1
return val
x = torch.randn(10)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(fn, backend=cnts)
res1 = opt_fn(x)
res2 = fn(x)
self.assertTrue(same(res2 - res1, torch.ones(10)))
def test_store_global_cross_file(self):
def fn(x):
val = x + utils.g_tensor_export
utils.g_tensor_export = utils.g_tensor_export + 1
return val
x = torch.randn(10)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(fn, backend=cnts)
res1 = opt_fn(x)
res2 = fn(x)
self.assertTrue(same(res2 - res1, torch.ones(10)))
def test_store_global_inline_1(self):
# Borrowed from test_python_autograd.py
class Variable:
def __init__(self, value: torch.Tensor, name: Optional[str] = None):
self.value = value
self.name = name or fresh_name()
def fn(a, b):
a = Variable(a)
b = Variable(b)
return a.value + b.value, a.name + b.name
a = torch.randn(10)
b = torch.randn(10)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(fn, backend=cnts)
v0, s0 = opt_fn(a, b)
self.assertEqual(s0, "v0v1")
reset_name()
def test_store_global_inline_2(self):
# Borrowed from test_python_autograd.py
class Variable:
def __init__(self, value: torch.Tensor, name: Optional[str] = None):
self.value = value
self.name = name or fresh_name()
@staticmethod
def constant(value: torch.Tensor, name: Optional[str] = None):
return Variable(value, name)
def fn(a, b):
a = Variable.constant(a)
b = Variable.constant(b)
return a.value + b.value, a.name + b.name
a = torch.randn(10)
b = torch.randn(10)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(fn, backend=cnts)
v0, s0 = opt_fn(a, b)
self.assertEqual(s0, "v0v1")
reset_name()
def test_store_global_crossfile_inline(self):
try:
from . import mock_store_global_crossfile_inline
except ImportError:
import mock_store_global_crossfile_inline
@torch.compile()
def fn(x):
mock_store_global_crossfile_inline.set_flag_true()
mock_store_global_crossfile_inline.set_flag_false()
return x + 1
@torch.compile()
def fn_set_true(x):
mock_store_global_crossfile_inline.set_flag_true()
return x + 1
fn_set_true(torch.ones(2, 2))
self.assertTrue(mock_store_global_crossfile_inline.global_flag)
fn(torch.ones(2, 2))
self.assertFalse(mock_store_global_crossfile_inline.global_flag)
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
|
TestGlobals
|
python
|
aio-libs__aiohttp
|
aiohttp/client_reqrep.py
|
{
"start": 30774,
"end": 31433
}
|
class ____(TypedDict, total=False):
params: Query
headers: CIMultiDict[str]
skip_auto_headers: Iterable[str] | None
data: Any
cookies: BaseCookie[str]
auth: BasicAuth | None
version: HttpVersion
compress: str | bool
chunked: bool | None
expect100: bool
loop: asyncio.AbstractEventLoop
response_class: type[ClientResponse]
proxy: URL | None
proxy_auth: BasicAuth | None
timer: BaseTimerContext
session: "ClientSession"
ssl: SSLContext | bool | Fingerprint
proxy_headers: CIMultiDict[str] | None
traces: list["Trace"]
trust_env: bool
server_hostname: str | None
|
ClientRequestArgs
|
python
|
jazzband__django-simple-history
|
simple_history/tests/models.py
|
{
"start": 6617,
"end": 6792
}
|
class ____(PollParentWithManyToMany):
books = models.ManyToManyField("Book", related_name="books_poll_child")
_history_m2m_fields = ["books"]
|
PollChildBookWithManyToMany
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_cond_format12.py
|
{
"start": 315,
"end": 1344
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("cond_format12.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with conditional formatting."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
format1 = workbook.add_format(
{"bg_color": "#FFFF00", "fg_color": "#FF0000", "pattern": 12}
)
worksheet.write("A1", "Hello", format1)
worksheet.write("B3", 10)
worksheet.write("B4", 20)
worksheet.write("B5", 30)
worksheet.write("B6", 40)
worksheet.conditional_format(
"B3:B6",
{
"type": "cell",
"format": format1,
"criteria": "greater than",
"value": 20,
},
)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
facebook__pyre-check
|
source/interprocedural_analyses/taint/test/integration/long_access_path_taint.py
|
{
"start": 280,
"end": 1031
}
|
class ____:
def __init__(
self, id: int, params: Dict[str, Any], kind: str, request: str
) -> None:
self.id = id
self.timestamp = params.get("timestamp") or 0
self.app_id = params.get("app_id")
self.kind = kind
self.request = request
@classmethod
async def async_create(
cls, id: int, params: Dict, request: Optional[str] = None
) -> "C":
kind = str(params)
if kind == "special_kind":
request = "get_current_request()"
else:
if not request:
request = str(params)
return cls(id, params, kind, request)
def test():
obj = C.async_create(1, {_test_source(): _test_source()}, "")
_test_sink(obj.id)
|
C
|
python
|
PyCQA__pylint
|
tests/functional/a/arguments_differ.py
|
{
"start": 3282,
"end": 3487
}
|
class ____(Positional):
def test(self, *args):
"""
Acceptable use of vararg in subclass because it does not violate LSP.
"""
super().test(args[0], args[1])
|
PositionalChild
|
python
|
kamyu104__LeetCode-Solutions
|
Python/nth-digit.py
|
{
"start": 32,
"end": 489
}
|
class ____(object):
def findNthDigit(self, n):
"""
:type n: int
:rtype: int
"""
digit_len = 1
while n > digit_len * 9 * (10 ** (digit_len-1)):
n -= digit_len * 9 * (10 ** (digit_len-1))
digit_len += 1
num = 10 ** (digit_len-1) + (n-1)/digit_len
nth_digit = num / (10 ** ((digit_len-1) - ((n-1)%digit_len)))
nth_digit %= 10
return nth_digit
|
Solution
|
python
|
python-pillow__Pillow
|
src/PIL/IcnsImagePlugin.py
|
{
"start": 4621,
"end": 7902
}
|
class ____:
SIZES = {
(512, 512, 2): [(b"ic10", read_png_or_jpeg2000)],
(512, 512, 1): [(b"ic09", read_png_or_jpeg2000)],
(256, 256, 2): [(b"ic14", read_png_or_jpeg2000)],
(256, 256, 1): [(b"ic08", read_png_or_jpeg2000)],
(128, 128, 2): [(b"ic13", read_png_or_jpeg2000)],
(128, 128, 1): [
(b"ic07", read_png_or_jpeg2000),
(b"it32", read_32t),
(b"t8mk", read_mk),
],
(64, 64, 1): [(b"icp6", read_png_or_jpeg2000)],
(32, 32, 2): [(b"ic12", read_png_or_jpeg2000)],
(48, 48, 1): [(b"ih32", read_32), (b"h8mk", read_mk)],
(32, 32, 1): [
(b"icp5", read_png_or_jpeg2000),
(b"il32", read_32),
(b"l8mk", read_mk),
],
(16, 16, 2): [(b"ic11", read_png_or_jpeg2000)],
(16, 16, 1): [
(b"icp4", read_png_or_jpeg2000),
(b"is32", read_32),
(b"s8mk", read_mk),
],
}
def __init__(self, fobj: IO[bytes]) -> None:
"""
fobj is a file-like object as an icns resource
"""
# signature : (start, length)
self.dct = {}
self.fobj = fobj
sig, filesize = nextheader(fobj)
if not _accept(sig):
msg = "not an icns file"
raise SyntaxError(msg)
i = HEADERSIZE
while i < filesize:
sig, blocksize = nextheader(fobj)
if blocksize <= 0:
msg = "invalid block header"
raise SyntaxError(msg)
i += HEADERSIZE
blocksize -= HEADERSIZE
self.dct[sig] = (i, blocksize)
fobj.seek(blocksize, io.SEEK_CUR)
i += blocksize
def itersizes(self) -> list[tuple[int, int, int]]:
sizes = []
for size, fmts in self.SIZES.items():
for fmt, reader in fmts:
if fmt in self.dct:
sizes.append(size)
break
return sizes
def bestsize(self) -> tuple[int, int, int]:
sizes = self.itersizes()
if not sizes:
msg = "No 32bit icon resources found"
raise SyntaxError(msg)
return max(sizes)
def dataforsize(self, size: tuple[int, int, int]) -> dict[str, Image.Image]:
"""
Get an icon resource as {channel: array}. Note that
the arrays are bottom-up like windows bitmaps and will likely
need to be flipped or transposed in some way.
"""
dct = {}
for code, reader in self.SIZES[size]:
desc = self.dct.get(code)
if desc is not None:
dct.update(reader(self.fobj, desc, size))
return dct
def getimage(
self, size: tuple[int, int] | tuple[int, int, int] | None = None
) -> Image.Image:
if size is None:
size = self.bestsize()
elif len(size) == 2:
size = (size[0], size[1], 1)
channels = self.dataforsize(size)
im = channels.get("RGBA")
if im:
return im
im = channels["RGB"].copy()
try:
im.putalpha(channels["A"])
except KeyError:
pass
return im
##
# Image plugin for Mac OS icons.
|
IcnsFile
|
python
|
pytorch__pytorch
|
benchmarks/inductor_backends/cutlass.py
|
{
"start": 2374,
"end": 2921
}
|
class ____(ExperimentConfig):
cutlass_instantiation_level: str
def name(self) -> str:
level_name = (
self.cutlass_instantiation_level
if self.cutlass_instantiation_level != "0"
else "default"
)
return f"cutlass_lvl_{level_name}"
def to_options(self) -> dict[str, Any]:
return {
**super().to_options(),
"cuda.cutlass_instantiation_level": self.cutlass_instantiation_level,
}
@dataclass(frozen=True, kw_only=True)
|
CutlassExperimentConfig
|
python
|
python-poetry__poetry
|
tests/types.py
|
{
"start": 3215,
"end": 3327
}
|
class ____(Protocol):
def __call__(self, content: str, base_url: str | None = None) -> str: ...
|
HTMLPageGetter
|
python
|
sympy__sympy
|
sympy/physics/quantum/spin.py
|
{
"start": 41242,
"end": 48954
}
|
class ____(SpinState):
"""Base class for coupled angular momentum states."""
def __new__(cls, j, m, jn, *jcoupling):
# Check j and m values using SpinState
SpinState(j, m)
# Build and check coupling scheme from arguments
if len(jcoupling) == 0:
# Use default coupling scheme
jcoupling = []
for n in range(2, len(jn)):
jcoupling.append( (1, n, Add(*[jn[i] for i in range(n)])) )
jcoupling.append( (1, len(jn), j) )
elif len(jcoupling) == 1:
# Use specified coupling scheme
jcoupling = jcoupling[0]
else:
raise TypeError("CoupledSpinState only takes 3 or 4 arguments, got: %s" % (len(jcoupling) + 3) )
# Check arguments have correct form
if not isinstance(jn, (list, tuple, Tuple)):
raise TypeError('jn must be Tuple, list or tuple, got %s' %
jn.__class__.__name__)
if not isinstance(jcoupling, (list, tuple, Tuple)):
raise TypeError('jcoupling must be Tuple, list or tuple, got %s' %
jcoupling.__class__.__name__)
if not all(isinstance(term, (list, tuple, Tuple)) for term in jcoupling):
raise TypeError(
'All elements of jcoupling must be list, tuple or Tuple')
if not len(jn) - 1 == len(jcoupling):
raise ValueError('jcoupling must have length of %d, got %d' %
(len(jn) - 1, len(jcoupling)))
if not all(len(x) == 3 for x in jcoupling):
raise ValueError('All elements of jcoupling must have length 3')
# Build sympified args
j = sympify(j)
m = sympify(m)
jn = Tuple( *[sympify(ji) for ji in jn] )
jcoupling = Tuple( *[Tuple(sympify(
n1), sympify(n2), sympify(ji)) for (n1, n2, ji) in jcoupling] )
# Check values in coupling scheme give physical state
if any(2*ji != int(2*ji) for ji in jn if ji.is_number):
raise ValueError('All elements of jn must be integer or half-integer, got: %s' % jn)
if any(n1 != int(n1) or n2 != int(n2) for (n1, n2, _) in jcoupling):
raise ValueError('Indices in jcoupling must be integers')
if any(n1 < 1 or n2 < 1 or n1 > len(jn) or n2 > len(jn) for (n1, n2, _) in jcoupling):
raise ValueError('Indices must be between 1 and the number of coupled spin spaces')
if any(2*ji != int(2*ji) for (_, _, ji) in jcoupling if ji.is_number):
raise ValueError('All coupled j values in coupling scheme must be integer or half-integer')
coupled_n, coupled_jn = _build_coupled(jcoupling, len(jn))
jvals = list(jn)
for n, (n1, n2) in enumerate(coupled_n):
j1 = jvals[min(n1) - 1]
j2 = jvals[min(n2) - 1]
j3 = coupled_jn[n]
if sympify(j1).is_number and sympify(j2).is_number and sympify(j3).is_number:
if j1 + j2 < j3:
raise ValueError('All couplings must have j1+j2 >= j3, '
'in coupling number %d got j1,j2,j3: %d,%d,%d' % (n + 1, j1, j2, j3))
if abs(j1 - j2) > j3:
raise ValueError("All couplings must have |j1+j2| <= j3, "
"in coupling number %d got j1,j2,j3: %d,%d,%d" % (n + 1, j1, j2, j3))
if int_valued(j1 + j2):
pass
jvals[min(n1 + n2) - 1] = j3
if len(jcoupling) > 0 and jcoupling[-1][2] != j:
raise ValueError('Last j value coupled together must be the final j of the state')
# Return state
return State.__new__(cls, j, m, jn, jcoupling)
def _print_label(self, printer, *args):
label = [printer._print(self.j), printer._print(self.m)]
for i, ji in enumerate(self.jn, start=1):
label.append('j%d=%s' % (
i, printer._print(ji)
))
for jn, (n1, n2) in zip(self.coupled_jn[:-1], self.coupled_n[:-1]):
label.append('j(%s)=%s' % (
','.join(str(i) for i in sorted(n1 + n2)), printer._print(jn)
))
return ','.join(label)
def _print_label_pretty(self, printer, *args):
label = [self.j, self.m]
for i, ji in enumerate(self.jn, start=1):
symb = 'j%d' % i
symb = pretty_symbol(symb)
symb = prettyForm(symb + '=')
item = prettyForm(*symb.right(printer._print(ji)))
label.append(item)
for jn, (n1, n2) in zip(self.coupled_jn[:-1], self.coupled_n[:-1]):
n = ','.join(pretty_symbol("j%d" % i)[-1] for i in sorted(n1 + n2))
symb = prettyForm('j' + n + '=')
item = prettyForm(*symb.right(printer._print(jn)))
label.append(item)
return self._print_sequence_pretty(
label, self._label_separator, printer, *args
)
def _print_label_latex(self, printer, *args):
label = [
printer._print(self.j, *args),
printer._print(self.m, *args)
]
for i, ji in enumerate(self.jn, start=1):
label.append('j_{%d}=%s' % (i, printer._print(ji, *args)) )
for jn, (n1, n2) in zip(self.coupled_jn[:-1], self.coupled_n[:-1]):
n = ','.join(str(i) for i in sorted(n1 + n2))
label.append('j_{%s}=%s' % (n, printer._print(jn, *args)) )
return self._label_separator.join(label)
@property
def jn(self):
return self.label[2]
@property
def coupling(self):
return self.label[3]
@property
def coupled_jn(self):
return _build_coupled(self.label[3], len(self.label[2]))[1]
@property
def coupled_n(self):
return _build_coupled(self.label[3], len(self.label[2]))[0]
@classmethod
def _eval_hilbert_space(cls, label):
j = Add(*label[2])
if j.is_number:
return DirectSumHilbertSpace(*[ ComplexSpace(x) for x in range(int(2*j + 1), 0, -2) ])
else:
# TODO: Need hilbert space fix, see issue 5732
# Desired behavior:
#ji = symbols('ji')
#ret = Sum(ComplexSpace(2*ji + 1), (ji, 0, j))
# Temporary fix:
return ComplexSpace(2*j + 1)
def _represent_coupled_base(self, **options):
evect = self.uncoupled_class()
if not self.j.is_number:
raise ValueError(
'State must not have symbolic j value to represent')
if not self.hilbert_space.dimension.is_number:
raise ValueError(
'State must not have symbolic j values to represent')
result = zeros(self.hilbert_space.dimension, 1)
if self.j == int(self.j):
start = self.j**2
else:
start = (2*self.j - 1)*(1 + 2*self.j)/4
result[start:start + 2*self.j + 1, 0] = evect(
self.j, self.m)._represent_base(**options)
return result
def _eval_rewrite_as_Jx(self, *args, **options):
if isinstance(self, Bra):
return self._rewrite_basis(Jx, JxBraCoupled, **options)
return self._rewrite_basis(Jx, JxKetCoupled, **options)
def _eval_rewrite_as_Jy(self, *args, **options):
if isinstance(self, Bra):
return self._rewrite_basis(Jy, JyBraCoupled, **options)
return self._rewrite_basis(Jy, JyKetCoupled, **options)
def _eval_rewrite_as_Jz(self, *args, **options):
if isinstance(self, Bra):
return self._rewrite_basis(Jz, JzBraCoupled, **options)
return self._rewrite_basis(Jz, JzKetCoupled, **options)
|
CoupledSpinState
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.