language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | allegroai__clearml | clearml/backend_api/services/v2_20/tasks.py | {
"start": 224346,
"end": 226194
} | class ____(Response):
"""
Response of tasks.edit endpoint.
:param updated: Number of tasks updated (0 or 1)
:type updated: int
:param fields: Updated fields names and values
:type fields: dict
"""
_service = "tasks"
_action = "edit"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"fields": {
"additionalProperties": True,
"description": "Updated fields names and values",
"type": ["object", "null"],
},
"updated": {
"description": "Number of tasks updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(self, updated: Optional[int] = None, fields: Optional[dict] = None, **kwargs: Any) -> None:
super(EditResponse, self).__init__(**kwargs)
self.updated = updated
self.fields = fields
@schema_property("updated")
def updated(self) -> Optional[int]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[int]) -> None:
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
@schema_property("fields")
def fields(self) -> Optional[dict]:
return self._property_fields
@fields.setter
def fields(self, value: Optional[dict]) -> None:
if value is None:
self._property_fields = None
return
self.assert_isinstance(value, "fields", (dict,))
self._property_fields = value
| EditResponse |
python | walkccc__LeetCode | solutions/3405. Count the Number of Arrays with K Matching Adjacent Elements/3405.py | {
"start": 0,
"end": 170
} | class ____:
def countGoodArrays(self, n: int, m: int, k: int) -> int:
MOD = 1_000_000_007
return m * pow(m - 1, n - k - 1, MOD) * math.comb(n - 1, k) % MOD
| Solution |
python | huggingface__transformers | src/transformers/models/edgetam/configuration_edgetam.py | {
"start": 1277,
"end": 5641
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`EdgeTamVisionModel`]. It is used to instantiate a SAM
vision encoder according to the specified arguments, defining the model architecture. Instantiating a configuration
defaults will yield a similar configuration to that of SAM 2.1 Hiera-tiny
[facebook/EdgeTAM](https://huggingface.co/facebook/EdgeTAM) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
backbone_config (`Union[dict, "PreTrainedConfig"]`, *optional*):
Configuration for the vision backbone. This is used to instantiate the backbone using
`AutoModel.from_config`.
backbone_channel_list (`List[int]`, *optional*, defaults to `[384, 192, 96, 48]`):
The list of channel dimensions for the backbone.
backbone_feature_sizes (`List[List[int]]`, *optional*, defaults to `[[256, 256], [128, 128], [64, 64]]`):
The spatial sizes of the feature maps from the backbone.
fpn_hidden_size (`int`, *optional*, defaults to 256):
The hidden dimension of the FPN.
fpn_kernel_size (`int`, *optional*, defaults to 1):
The kernel size for the convolutions in the neck.
fpn_stride (`int`, *optional*, defaults to 1):
The stride for the convolutions in the neck.
fpn_padding (`int`, *optional*, defaults to 0):
The padding for the convolutions in the neck.
fpn_top_down_levels (`List[int]`, *optional*, defaults to `[2, 3]`):
The levels for the top-down FPN connections.
num_feature_levels (`int`, *optional*, defaults to 3):
The number of feature levels from the FPN to use.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function in the neck.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon for the layer normalization.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
"""
base_config_key = "vision_config"
model_type = "edgetam_vision_model"
sub_configs = {
"backbone_config": AutoConfig,
}
def __init__(
self,
backbone_config=None,
backbone_channel_list=None,
backbone_feature_sizes=None,
fpn_hidden_size=256,
fpn_kernel_size=1,
fpn_stride=1,
fpn_padding=0,
fpn_top_down_levels=None,
num_feature_levels=3,
hidden_act="gelu",
layer_norm_eps=1e-6,
initializer_range=0.02,
**kwargs,
):
backbone_channel_list = [384, 192, 96, 48] if backbone_channel_list is None else backbone_channel_list
backbone_feature_sizes = (
[[256, 256], [128, 128], [64, 64]] if backbone_feature_sizes is None else backbone_feature_sizes
)
fpn_top_down_levels = [2, 3] if fpn_top_down_levels is None else fpn_top_down_levels
if isinstance(backbone_config, dict):
backbone_config["model_type"] = backbone_config.get("model_type", "timm_wrapper")
backbone_config = CONFIG_MAPPING[backbone_config["model_type"]](**backbone_config)
elif backbone_config is None:
backbone_config = AutoConfig.from_pretrained(
"timm/repvit_m1.dist_in1k",
model_args={"in_chans": 3, "features_only": True, "out_indices": [0, 1, 2, 3]},
)
self.backbone_config = backbone_config
# Neck
self.backbone_channel_list = backbone_channel_list
self.backbone_feature_sizes = backbone_feature_sizes
self.fpn_hidden_size = fpn_hidden_size
self.fpn_kernel_size = fpn_kernel_size
self.fpn_stride = fpn_stride
self.fpn_padding = fpn_padding
self.fpn_top_down_levels = fpn_top_down_levels
self.num_feature_levels = num_feature_levels
self.hidden_act = hidden_act
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
super().__init__(**kwargs)
| EdgeTamVisionConfig |
python | pandas-dev__pandas | pandas/tests/reductions/test_reductions.py | {
"start": 18990,
"end": 42969
} | class ____:
# Note: the name TestSeriesReductions indicates these tests
# were moved from a series-specific test file, _not_ that these tests are
# intended long-term to be series-specific
def test_sum_inf(self):
s = Series(np.random.default_rng(2).standard_normal(10))
s2 = s.copy()
s[5:8] = np.inf
s2[5:8] = np.nan
assert np.isinf(s.sum())
arr = np.random.default_rng(2).standard_normal((100, 100)).astype("f4")
arr[:, 2] = np.inf
res = nanops.nansum(arr, axis=1)
assert np.isinf(res).all()
@pytest.mark.parametrize(
"dtype", ["float64", "Float32", "Int64", "boolean", "object"]
)
@pytest.mark.parametrize("use_bottleneck", [True, False])
@pytest.mark.parametrize("method, unit", [("sum", 0.0), ("prod", 1.0)])
def test_empty(self, method, unit, use_bottleneck, dtype):
item = pd.NA if dtype in ["Float32", "Int64"] else np.nan
with pd.option_context("use_bottleneck", use_bottleneck):
# GH#9422 / GH#18921
# Entirely empty
s = Series([], dtype=dtype)
# NA by default
result = getattr(s, method)()
assert result == unit
# Explicit
result = getattr(s, method)(min_count=0)
assert result == unit
result = getattr(s, method)(min_count=1)
assert isna(result)
# Skipna, default
result = getattr(s, method)(skipna=True)
result == unit
# Skipna, explicit
result = getattr(s, method)(skipna=True, min_count=0)
assert result == unit
result = getattr(s, method)(skipna=True, min_count=1)
assert isna(result)
result = getattr(s, method)(skipna=False, min_count=0)
assert result == unit
result = getattr(s, method)(skipna=False, min_count=1)
assert isna(result)
# All-NA
s = Series([item], dtype=dtype)
# NA by default
result = getattr(s, method)()
assert result == unit
# Explicit
result = getattr(s, method)(min_count=0)
assert result == unit
result = getattr(s, method)(min_count=1)
assert isna(result)
# Skipna, default
result = getattr(s, method)(skipna=True)
result == unit
# skipna, explicit
result = getattr(s, method)(skipna=True, min_count=0)
assert result == unit
result = getattr(s, method)(skipna=True, min_count=1)
assert isna(result)
# Mix of valid, empty
s = Series([item, 1], dtype=dtype)
# Default
result = getattr(s, method)()
assert result == 1.0
# Explicit
result = getattr(s, method)(min_count=0)
assert result == 1.0
result = getattr(s, method)(min_count=1)
assert result == 1.0
# Skipna
result = getattr(s, method)(skipna=True)
assert result == 1.0
result = getattr(s, method)(skipna=True, min_count=0)
assert result == 1.0
# GH#844 (changed in GH#9422)
df = DataFrame(np.empty((10, 0)), dtype=dtype)
assert (getattr(df, method)(axis=1) == unit).all()
s = Series([1], dtype=dtype)
result = getattr(s, method)(min_count=2)
assert isna(result)
result = getattr(s, method)(skipna=False, min_count=2)
assert isna(result)
s = Series([item], dtype=dtype)
result = getattr(s, method)(min_count=2)
assert isna(result)
s = Series([item, 1], dtype=dtype)
result = getattr(s, method)(min_count=2)
assert isna(result)
@pytest.mark.parametrize("method", ["mean", "var"])
@pytest.mark.parametrize("dtype", ["Float64", "Int64", "boolean"])
def test_ops_consistency_on_empty_nullable(self, method, dtype):
# GH#34814
# consistency for nullable dtypes on empty or ALL-NA mean
# empty series
eser = Series([], dtype=dtype)
result = getattr(eser, method)()
assert result is pd.NA
# ALL-NA series
nser = Series([pd.NA], dtype=dtype)
result = getattr(nser, method)()
assert result is pd.NA
@pytest.mark.parametrize("method", ["mean", "median", "std", "var"])
def test_ops_consistency_on_empty(self, method):
# GH#7869
# consistency on empty
# float
result = getattr(Series(dtype=float), method)()
assert isna(result)
# timedelta64[ns]
tdser = Series([], dtype="m8[ns]")
if method == "var":
msg = "|".join(
[
"operation 'var' not allowed",
r"cannot perform var with type timedelta64\[ns\]",
"does not support operation 'var'",
]
)
with pytest.raises(TypeError, match=msg):
getattr(tdser, method)()
else:
result = getattr(tdser, method)()
assert result is NaT
def test_nansum_buglet(self):
ser = Series([1.0, np.nan], index=[0, 1])
result = np.nansum(ser)
tm.assert_almost_equal(result, 1)
@pytest.mark.parametrize("use_bottleneck", [True, False])
@pytest.mark.parametrize("dtype", ["int32", "int64"])
def test_sum_overflow_int(self, use_bottleneck, dtype):
with pd.option_context("use_bottleneck", use_bottleneck):
# GH#6915
# overflowing on the smaller int dtypes
v = np.arange(5000000, dtype=dtype)
s = Series(v)
result = s.sum(skipna=False)
assert int(result) == v.sum(dtype="int64")
result = s.min(skipna=False)
assert int(result) == 0
result = s.max(skipna=False)
assert int(result) == v[-1]
@pytest.mark.parametrize("use_bottleneck", [True, False])
@pytest.mark.parametrize("dtype", ["float32", "float64"])
def test_sum_overflow_float(self, use_bottleneck, dtype):
with pd.option_context("use_bottleneck", use_bottleneck):
v = np.arange(5000000, dtype=dtype)
s = Series(v)
result = s.sum(skipna=False)
assert result == v.sum(dtype=dtype)
result = s.min(skipna=False)
assert np.allclose(float(result), 0.0)
result = s.max(skipna=False)
assert np.allclose(float(result), v[-1])
def test_mean_masked_overflow(self):
# GH#48378
val = 100_000_000_000_000_000
n_elements = 100
na = np.array([val] * n_elements)
ser = Series([val] * n_elements, dtype="Int64")
result_numpy = np.mean(na)
result_masked = ser.mean()
assert result_masked - result_numpy == 0
assert result_masked == 1e17
@pytest.mark.parametrize("ddof, exp", [(1, 2.5), (0, 2.0)])
def test_var_masked_array(self, ddof, exp):
# GH#48379
ser = Series([1, 2, 3, 4, 5], dtype="Int64")
ser_numpy_dtype = Series([1, 2, 3, 4, 5], dtype="int64")
result = ser.var(ddof=ddof)
result_numpy_dtype = ser_numpy_dtype.var(ddof=ddof)
assert result == result_numpy_dtype
assert result == exp
def test_var_complex_array(self):
# GH#61645
ser = Series([-1j, 0j, 1j], dtype=complex)
assert ser.var(ddof=1) == 1.0
assert ser.std(ddof=1) == 1.0
@pytest.mark.parametrize("dtype", ("m8[ns]", "M8[ns]", "M8[ns, UTC]"))
def test_empty_timeseries_reductions_return_nat(self, dtype, skipna):
# covers GH#11245
assert Series([], dtype=dtype).min(skipna=skipna) is NaT
assert Series([], dtype=dtype).max(skipna=skipna) is NaT
def test_numpy_argmin(self):
# See GH#16830
data = np.arange(1, 11)
s = Series(data, index=data)
result = np.argmin(s)
expected = np.argmin(data)
assert result == expected
result = s.argmin()
assert result == expected
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argmin(s, out=data)
def test_numpy_argmax(self):
# See GH#16830
data = np.arange(1, 11)
ser = Series(data, index=data)
result = np.argmax(ser)
expected = np.argmax(data)
assert result == expected
result = ser.argmax()
assert result == expected
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argmax(ser, out=data)
def test_idxmin_dt64index(self, unit):
# GH#43587 should have NaT instead of NaN
dti = DatetimeIndex(["NaT", "2015-02-08", "NaT"]).as_unit(unit)
ser = Series([1.0, 2.0, np.nan], index=dti)
with pytest.raises(ValueError, match="Encountered an NA value"):
ser.idxmin(skipna=False)
with pytest.raises(ValueError, match="Encountered an NA value"):
ser.idxmax(skipna=False)
df = ser.to_frame()
with pytest.raises(ValueError, match="Encountered an NA value"):
df.idxmin(skipna=False)
with pytest.raises(ValueError, match="Encountered an NA value"):
df.idxmax(skipna=False)
def test_idxmin(self):
# test idxmin
# _check_stat_op approach can not be used here because of isna check.
string_series = Series(range(20), dtype=np.float64, name="series")
# add some NaNs
string_series[5:15] = np.nan
# skipna or no
assert string_series[string_series.idxmin()] == string_series.min()
with pytest.raises(ValueError, match="Encountered an NA value"):
string_series.idxmin(skipna=False)
# no NaNs
nona = string_series.dropna()
assert nona[nona.idxmin()] == nona.min()
assert nona.index.values.tolist().index(nona.idxmin()) == nona.values.argmin()
# all NaNs
allna = string_series * np.nan
msg = "Encountered all NA values"
with pytest.raises(ValueError, match=msg):
allna.idxmin()
# datetime64[ns]
s = Series(date_range("20130102", periods=6))
result = s.idxmin()
assert result == 0
s[0] = np.nan
result = s.idxmin()
assert result == 1
def test_idxmax(self):
# test idxmax
# _check_stat_op approach can not be used here because of isna check.
string_series = Series(range(20), dtype=np.float64, name="series")
# add some NaNs
string_series[5:15] = np.nan
# skipna or no
assert string_series[string_series.idxmax()] == string_series.max()
with pytest.raises(ValueError, match="Encountered an NA value"):
assert isna(string_series.idxmax(skipna=False))
# no NaNs
nona = string_series.dropna()
assert nona[nona.idxmax()] == nona.max()
assert nona.index.values.tolist().index(nona.idxmax()) == nona.values.argmax()
# all NaNs
allna = string_series * np.nan
msg = "Encountered all NA values"
with pytest.raises(ValueError, match=msg):
allna.idxmax()
s = Series(date_range("20130102", periods=6))
result = s.idxmax()
assert result == 5
s[5] = np.nan
result = s.idxmax()
assert result == 4
# Index with float64 dtype
# GH#5914
s = Series([1, 2, 3], [1.1, 2.1, 3.1])
result = s.idxmax()
assert result == 3.1
result = s.idxmin()
assert result == 1.1
s = Series(s.index, s.index)
result = s.idxmax()
assert result == 3.1
result = s.idxmin()
assert result == 1.1
def test_all_any(self):
ts = Series(
np.arange(10, dtype=np.float64),
index=date_range("2020-01-01", periods=10),
name="ts",
)
bool_series = ts > 0
assert not bool_series.all()
assert bool_series.any()
# Alternative types, with implicit 'object' dtype.
s = Series(["abc", True])
assert s.any()
def test_numpy_all_any(self, index_or_series):
# GH#40180
idx = index_or_series([0, 1, 2])
assert not np.all(idx)
assert np.any(idx)
idx = Index([1, 2, 3])
assert np.all(idx)
def test_all_any_skipna(self):
# Check skipna, with implicit 'object' dtype.
s1 = Series([np.nan, True])
s2 = Series([np.nan, False])
assert s1.all(skipna=False) # nan && True => True
assert s1.all(skipna=True)
assert s2.any(skipna=False)
assert not s2.any(skipna=True)
def test_all_any_bool_only(self):
s = Series([False, False, True, True, False, True], index=[0, 0, 1, 1, 2, 2])
# GH#47500 - test bool_only works
assert s.any(bool_only=True)
assert not s.all(bool_only=True)
def test_any_all_object_dtype(self, all_boolean_reductions, skipna):
# GH#12863
ser = Series(["a", "b", "c", "d", "e"], dtype=object)
result = getattr(ser, all_boolean_reductions)(skipna=skipna)
expected = True
assert result == expected
@pytest.mark.parametrize(
"data", [[False, None], [None, False], [False, np.nan], [np.nan, False]]
)
def test_any_all_object_dtype_missing(self, data, all_boolean_reductions):
# GH#27709
ser = Series(data)
result = getattr(ser, all_boolean_reductions)(skipna=False)
# None is treated is False, but np.nan is treated as True
expected = all_boolean_reductions == "any" and None not in data
assert result == expected
@pytest.mark.parametrize("dtype", ["boolean", "Int64", "UInt64", "Float64"])
@pytest.mark.parametrize(
# expected_data indexed as [[skipna=False/any, skipna=False/all],
# [skipna=True/any, skipna=True/all]]
"data,expected_data",
[
([0, 0, 0], [[False, False], [False, False]]),
([1, 1, 1], [[True, True], [True, True]]),
([pd.NA, pd.NA, pd.NA], [[pd.NA, pd.NA], [False, True]]),
([0, pd.NA, 0], [[pd.NA, False], [False, False]]),
([1, pd.NA, 1], [[True, pd.NA], [True, True]]),
([1, pd.NA, 0], [[True, False], [True, False]]),
],
)
def test_any_all_nullable_kleene_logic(
self, all_boolean_reductions, skipna, data, dtype, expected_data
):
# GH-37506, GH-41967
ser = Series(data, dtype=dtype)
expected = expected_data[skipna][all_boolean_reductions == "all"]
result = getattr(ser, all_boolean_reductions)(skipna=skipna)
assert (result is pd.NA and expected is pd.NA) or result == expected
def test_any_axis1_bool_only(self):
# GH#32432
df = DataFrame({"A": [True, False], "B": [1, 2]})
result = df.any(axis=1, bool_only=True)
expected = Series([True, False])
tm.assert_series_equal(result, expected)
def test_any_all_datetimelike(self):
# GH#38723 these may not be the desired long-term behavior (GH#34479)
# but in the interim should be internally consistent
dta = date_range("1995-01-02", periods=3)._data
ser = Series(dta)
df = DataFrame(ser)
# GH#34479
msg = "datetime64 type does not support operation '(any|all)'"
with pytest.raises(TypeError, match=msg):
dta.all()
with pytest.raises(TypeError, match=msg):
dta.any()
with pytest.raises(TypeError, match=msg):
ser.all()
with pytest.raises(TypeError, match=msg):
ser.any()
with pytest.raises(TypeError, match=msg):
df.any().all()
with pytest.raises(TypeError, match=msg):
df.all().all()
dta = dta.tz_localize("UTC")
ser = Series(dta)
df = DataFrame(ser)
# GH#34479
with pytest.raises(TypeError, match=msg):
dta.all()
with pytest.raises(TypeError, match=msg):
dta.any()
with pytest.raises(TypeError, match=msg):
ser.all()
with pytest.raises(TypeError, match=msg):
ser.any()
with pytest.raises(TypeError, match=msg):
df.any().all()
with pytest.raises(TypeError, match=msg):
df.all().all()
tda = dta - dta[0]
ser = Series(tda)
df = DataFrame(ser)
assert tda.any()
assert not tda.all()
assert ser.any()
assert not ser.all()
assert df.any().all()
assert not df.all().any()
def test_any_all_string_dtype(self, any_string_dtype):
# GH#54591
if (
isinstance(any_string_dtype, pd.StringDtype)
and any_string_dtype.na_value is pd.NA
):
# the nullable string dtype currently still raise an error
# https://github.com/pandas-dev/pandas/issues/51939
ser = Series(["a", "b"], dtype=any_string_dtype)
with pytest.raises(TypeError):
ser.any()
with pytest.raises(TypeError):
ser.all()
return
ser = Series(["", "a"], dtype=any_string_dtype)
assert ser.any()
assert not ser.all()
assert ser.any(skipna=False)
assert not ser.all(skipna=False)
ser = Series([np.nan, "a"], dtype=any_string_dtype)
assert ser.any()
assert ser.all()
assert ser.any(skipna=False)
assert ser.all(skipna=False) # NaN is considered truthy
ser = Series([np.nan, ""], dtype=any_string_dtype)
assert not ser.any()
assert not ser.all()
assert ser.any(skipna=False) # NaN is considered truthy
assert not ser.all(skipna=False)
ser = Series(["a", "b"], dtype=any_string_dtype)
assert ser.any()
assert ser.all()
assert ser.any(skipna=False)
assert ser.all(skipna=False)
ser = Series([], dtype=any_string_dtype)
assert not ser.any()
assert ser.all()
assert not ser.any(skipna=False)
assert ser.all(skipna=False)
ser = Series([""], dtype=any_string_dtype)
assert not ser.any()
assert not ser.all()
assert not ser.any(skipna=False)
assert not ser.all(skipna=False)
ser = Series([np.nan], dtype=any_string_dtype)
assert not ser.any()
assert ser.all()
assert ser.any(skipna=False) # NaN is considered truthy
assert ser.all(skipna=False) # NaN is considered truthy
def test_timedelta64_analytics(self):
# index min/max
dti = date_range("2012-1-1", periods=3, freq="D")
td = Series(dti) - Timestamp("20120101")
result = td.idxmin()
assert result == 0
result = td.idxmax()
assert result == 2
# GH#2982
# with NaT
td[0] = np.nan
result = td.idxmin()
assert result == 1
result = td.idxmax()
assert result == 2
# abs
s1 = Series(date_range("20120101", periods=3))
s2 = Series(date_range("20120102", periods=3))
expected = Series(s2 - s1)
result = np.abs(s1 - s2)
tm.assert_series_equal(result, expected)
result = (s1 - s2).abs()
tm.assert_series_equal(result, expected)
# max/min
result = td.max()
expected = Timedelta("2 days")
assert result == expected
result = td.min()
expected = Timedelta("1 days")
assert result == expected
def test_assert_idxminmax_empty_raises(self):
"""
Cases where ``Series.argmax`` and related should raise an exception
"""
test_input = Series([], dtype="float64")
msg = "attempt to get argmin of an empty sequence"
with pytest.raises(ValueError, match=msg):
test_input.idxmin()
with pytest.raises(ValueError, match=msg):
test_input.idxmin(skipna=False)
msg = "attempt to get argmax of an empty sequence"
with pytest.raises(ValueError, match=msg):
test_input.idxmax()
with pytest.raises(ValueError, match=msg):
test_input.idxmax(skipna=False)
def test_idxminmax_object_dtype(self, using_infer_string):
# pre-2.1 object-dtype was disallowed for argmin/max
ser = Series(["foo", "bar", "baz"])
assert ser.idxmax() == 0
assert ser.idxmax(skipna=False) == 0
assert ser.idxmin() == 1
assert ser.idxmin(skipna=False) == 1
ser2 = Series([(1,), (2,)])
assert ser2.idxmax() == 1
assert ser2.idxmax(skipna=False) == 1
assert ser2.idxmin() == 0
assert ser2.idxmin(skipna=False) == 0
if not using_infer_string:
# attempting to compare np.nan with string raises
ser3 = Series(["foo", "foo", "bar", "bar", None, np.nan, "baz"])
msg = "'>' not supported between instances of 'float' and 'str'"
with pytest.raises(TypeError, match=msg):
ser3.idxmax()
with pytest.raises(TypeError, match=msg):
ser3.idxmax(skipna=False)
msg = "'<' not supported between instances of 'float' and 'str'"
with pytest.raises(TypeError, match=msg):
ser3.idxmin()
with pytest.raises(TypeError, match=msg):
ser3.idxmin(skipna=False)
def test_idxminmax_object_frame(self):
# GH#4279
df = DataFrame([["zimm", 2.5], ["biff", 1.0], ["bid", 12.0]])
res = df.idxmax()
exp = Series([0, 2])
tm.assert_series_equal(res, exp)
def test_idxminmax_object_tuples(self):
# GH#43697
ser = Series([(1, 3), (2, 2), (3, 1)])
assert ser.idxmax() == 2
assert ser.idxmin() == 0
assert ser.idxmax(skipna=False) == 2
assert ser.idxmin(skipna=False) == 0
def test_idxminmax_object_decimals(self):
# GH#40685
df = DataFrame(
{
"idx": [0, 1],
"x": [Decimal("8.68"), Decimal("42.23")],
"y": [Decimal("7.11"), Decimal("79.61")],
}
)
res = df.idxmax()
exp = Series({"idx": 1, "x": 1, "y": 1})
tm.assert_series_equal(res, exp)
res2 = df.idxmin()
exp2 = exp - 1
tm.assert_series_equal(res2, exp2)
def test_argminmax_object_ints(self):
# GH#18021
ser = Series([0, 1], dtype="object")
assert ser.argmax() == 1
assert ser.argmin() == 0
assert ser.argmax(skipna=False) == 1
assert ser.argmin(skipna=False) == 0
def test_idxminmax_with_inf(self):
# For numeric data with NA and Inf (GH #13595)
s = Series([0, -np.inf, np.inf, np.nan])
assert s.idxmin() == 1
with pytest.raises(ValueError, match="Encountered an NA value"):
s.idxmin(skipna=False)
assert s.idxmax() == 2
with pytest.raises(ValueError, match="Encountered an NA value"):
s.idxmax(skipna=False)
def test_sum_uint64(self):
# GH 53401
s = Series([10000000000000000000], dtype="uint64")
result = s.sum()
expected = np.uint64(10000000000000000000)
tm.assert_almost_equal(result, expected)
def test_signedness_preserved_after_sum(self):
# GH 37491
ser = Series([1, 2, 3, 4])
assert ser.astype("uint8").sum().dtype == "uint64"
| TestSeriesReductions |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/matchClass5.py | {
"start": 137,
"end": 203
} | class ____:
a: int
b: int
__match_args__ = ("a", "b")
| B |
python | Pylons__pyramid | tests/test_integration.py | {
"start": 28743,
"end": 33077
} | class ____(unittest.TestCase):
def _makeConfig(self):
def hello_view(request):
return {'message': 'Hello!'}
from pyramid.config import Configurator
config = Configurator()
config.add_route('hello', '/hello')
config.add_view(
hello_view,
route_name='hello',
accept='text/plain',
renderer='string',
)
config.add_view(
hello_view,
route_name='hello',
accept='application/json',
renderer='json',
)
def hello_fallback_view(request):
request.response.content_type = 'text/x-fallback'
return 'hello fallback'
config.add_view(
hello_fallback_view, route_name='hello', renderer='string'
)
return config
def _makeTestApp(self, config):
app = config.make_wsgi_app()
return TestApp(app)
def tearDown(self):
import pyramid.config
pyramid.config.global_registries.empty()
def test_client_side_ordering(self):
config = self._makeConfig()
app = self._makeTestApp(config)
res = app.get(
'/hello',
headers={'Accept': 'application/json; q=1.0, text/plain; q=0.9'},
status=200,
)
self.assertEqual(res.content_type, 'application/json')
res = app.get(
'/hello',
headers={'Accept': 'text/plain; q=0.9, application/json; q=1.0'},
status=200,
)
self.assertEqual(res.content_type, 'application/json')
res = app.get(
'/hello', headers={'Accept': 'application/*'}, status=200
)
self.assertEqual(res.content_type, 'application/json')
res = app.get('/hello', headers={'Accept': 'text/*'}, status=200)
self.assertEqual(res.content_type, 'text/plain')
res = app.get(
'/hello', headers={'Accept': 'something/else'}, status=200
)
self.assertEqual(res.content_type, 'text/x-fallback')
def test_default_server_side_ordering(self):
config = self._makeConfig()
app = self._makeTestApp(config)
res = app.get(
'/hello',
headers={'Accept': 'application/json, text/plain'},
status=200,
)
self.assertEqual(res.content_type, 'text/plain')
res = app.get(
'/hello',
headers={'Accept': 'text/plain, application/json'},
status=200,
)
self.assertEqual(res.content_type, 'text/plain')
res = app.get('/hello', headers={'Accept': '*/*'}, status=200)
self.assertEqual(res.content_type, 'text/plain')
res = app.get('/hello', status=200)
self.assertEqual(res.content_type, 'text/plain')
res = app.get('/hello', headers={'Accept': 'invalid'}, status=200)
self.assertEqual(res.content_type, 'text/plain')
res = app.get(
'/hello', headers={'Accept': 'something/else'}, status=200
)
self.assertEqual(res.content_type, 'text/x-fallback')
def test_custom_server_side_ordering(self):
config = self._makeConfig()
config.add_accept_view_order(
'application/json', weighs_more_than='text/plain'
)
app = self._makeTestApp(config)
res = app.get(
'/hello',
headers={'Accept': 'application/json, text/plain'},
status=200,
)
self.assertEqual(res.content_type, 'application/json')
res = app.get(
'/hello',
headers={'Accept': 'text/plain, application/json'},
status=200,
)
self.assertEqual(res.content_type, 'application/json')
res = app.get('/hello', headers={'Accept': '*/*'}, status=200)
self.assertEqual(res.content_type, 'application/json')
res = app.get('/hello', status=200)
self.assertEqual(res.content_type, 'application/json')
res = app.get('/hello', headers={'Accept': 'invalid'}, status=200)
self.assertEqual(res.content_type, 'application/json')
res = app.get(
'/hello', headers={'Accept': 'something/else'}, status=200
)
self.assertEqual(res.content_type, 'text/x-fallback')
| AcceptContentTypeTest |
python | pyinstaller__pyinstaller | PyInstaller/archive/writers.py | {
"start": 959,
"end": 4250
} | class ____:
"""
Writer for PyInstaller's PYZ (ZlibArchive) archive. The archive is used to store collected byte-compiled Python
modules, as individually-compressed entries.
"""
_PYZ_MAGIC_PATTERN = b'PYZ\0'
_HEADER_LENGTH = 12 + 5
_COMPRESSION_LEVEL = 6 # zlib compression level
def __init__(self, filename, entries, code_dict=None):
"""
filename
Target filename of the archive.
entries
An iterable containing entries in the form of tuples: (name, src_path, typecode), where `name` is the name
under which the resource is stored (e.g., python module name, without suffix), `src_path` is name of the
file from which the resource is read, and `typecode` is the Analysis-level TOC typecode (`PYMODULE`).
code_dict
Optional code dictionary containing code objects for analyzed/collected python modules.
"""
code_dict = code_dict or {}
with open(filename, "wb") as fp:
# Reserve space for the header.
fp.write(b'\0' * self._HEADER_LENGTH)
# Write entries' data and collect TOC entries
toc = []
for entry in entries:
toc_entry = self._write_entry(fp, entry, code_dict)
toc.append(toc_entry)
# Write TOC
toc_offset = fp.tell()
toc_data = marshal.dumps(toc)
fp.write(toc_data)
# Write header:
# - PYZ magic pattern (4 bytes)
# - python bytecode magic pattern (4 bytes)
# - TOC offset (32-bit int, 4 bytes)
# - 4 unused bytes
fp.seek(0, os.SEEK_SET)
fp.write(self._PYZ_MAGIC_PATTERN)
fp.write(BYTECODE_MAGIC)
fp.write(struct.pack('!i', toc_offset))
@classmethod
def _write_entry(cls, fp, entry, code_dict):
name, src_path, typecode = entry
assert typecode in {'PYMODULE', 'PYMODULE-1', 'PYMODULE-2'}
if src_path in {'-', None}:
# PEP-420 namespace package; these do not have code objects, but we still need an entry in PYZ to inform our
# run-time module finder/loader of the package's existence. So create a TOC entry for 0-byte data blob,
# and write no data.
return (name, (PYZ_ITEM_NSPKG, fp.tell(), 0))
code_object = code_dict[name]
src_basename, _ = os.path.splitext(os.path.basename(src_path))
if src_basename == '__init__':
typecode = PYZ_ITEM_PKG
co_filename = os.path.join(*name.split('.'), '__init__.py')
else:
typecode = PYZ_ITEM_MODULE
co_filename = os.path.join(*name.split('.')) + '.py'
# Replace co_filename on code object with anonymized version without absolute path to the module.
code_object = replace_filename_in_code_object(code_object, co_filename)
# Serialize
data = marshal.dumps(code_object)
# First compress, then encrypt.
obj = zlib.compress(data, cls._COMPRESSION_LEVEL)
# Create TOC entry
toc_entry = (name, (typecode, fp.tell(), len(obj)))
# Write data blob
fp.write(obj)
return toc_entry
| ZlibArchiveWriter |
python | kamyu104__LeetCode-Solutions | Python/cracking-the-safe.py | {
"start": 609,
"end": 1466
} | class ____(object):
def crackSafe(self, n, k):
"""
:type n: int
:type k: int
:rtype: str
"""
total = k**n
M = total//k
unique_rolling_hash = 0
result = [str(0)]*(n-1)
lookup = set()
while len(lookup) < total:
for i in reversed(xrange(k)): # preorder like traversal relative to initial result to avoid getting stuck, i.e. don't use 0 until there is no other choice
new_unique_rolling_hash = unique_rolling_hash*k + i
if new_unique_rolling_hash not in lookup:
lookup.add(new_unique_rolling_hash)
result.append(str(i))
unique_rolling_hash = new_unique_rolling_hash%M
break
return "".join(result)
# Time: O(k^n)
# Space: O(k^n)
| Solution2 |
python | getsentry__sentry | tests/sentry/workflow_engine/migrations/test_0088_remove_monitor_slug_conditions.py | {
"start": 161,
"end": 4022
} | class ____(TestMigrations):
migrate_from = "0087_relink_crons_to_compatible_issue_workflows"
migrate_to = "0088_remove_monitor_slug_conditions"
app = "workflow_engine"
def setup_initial_state(self) -> None:
self.org = self.create_organization(name="test-org")
self.project = self.create_project(organization=self.org)
self.dcg = DataConditionGroup.objects.create(organization_id=self.org.id)
# Conditions that should be deleted
self.monitor_slug_condition1 = DataCondition.objects.create(
type="tagged_event",
comparison={"key": "monitor.slug", "match": "eq", "value": "my-monitor-1"},
condition_result=True,
condition_group=self.dcg,
)
self.monitor_slug_condition2 = DataCondition.objects.create(
type="tagged_event",
comparison={"key": "monitor.slug", "match": "eq", "value": "my-monitor-2"},
condition_result=True,
condition_group=self.dcg,
)
# Conditions that should not be deleted
self.level_condition = DataCondition.objects.create(
type="tagged_event",
comparison={"key": "level", "match": "eq", "value": "error"},
condition_result=True,
condition_group=self.dcg,
)
self.environment_condition = DataCondition.objects.create(
type="tagged_event",
comparison={"key": "environment", "match": "eq", "value": "production"},
condition_result=True,
condition_group=self.dcg,
)
self.custom_tag_condition = DataCondition.objects.create(
type="tagged_event",
comparison={"key": "custom.tag", "match": "eq", "value": "some-value"},
condition_result=True,
condition_group=self.dcg,
)
self.first_seen_condition = DataCondition.objects.create(
type="first_seen_event",
comparison=True,
condition_result=True,
condition_group=self.dcg,
)
self.age_comparison_condition = DataCondition.objects.create(
type="age_comparison",
comparison={"comparison_type": "older", "value": 30, "time": "minute"},
condition_result=True,
condition_group=self.dcg,
)
self.edge_case_condition = DataCondition.objects.create(
type="tagged_event",
comparison={"key": "other_field", "match": "eq", "value": "monitor.slug"},
condition_result=True,
condition_group=self.dcg,
)
self.nested_condition = DataCondition.objects.create(
type="tagged_event",
comparison={"key": "nested.monitor.slug", "match": "eq", "value": "test"},
condition_result=True,
condition_group=self.dcg,
)
def test_migration(self) -> None:
# Verify that monitor.slug conditions are deleted
assert not DataCondition.objects.filter(id=self.monitor_slug_condition1.id).exists()
assert not DataCondition.objects.filter(id=self.monitor_slug_condition2.id).exists()
# Verify that other tagged_event conditions are not deleted
assert DataCondition.objects.filter(id=self.level_condition.id).exists()
assert DataCondition.objects.filter(id=self.environment_condition.id).exists()
assert DataCondition.objects.filter(id=self.custom_tag_condition.id).exists()
assert DataCondition.objects.filter(id=self.first_seen_condition.id).exists()
assert DataCondition.objects.filter(id=self.age_comparison_condition.id).exists()
assert DataCondition.objects.filter(id=self.edge_case_condition.id).exists()
assert DataCondition.objects.filter(id=self.nested_condition.id).exists()
| RemoveMonitorSlugConditionsTest |
python | Textualize__textual | src/textual/signal.py | {
"start": 634,
"end": 4241
} | class ____(Generic[SignalT]):
"""A signal that a widget may subscribe to, in order to invoke callbacks when an associated event occurs."""
def __init__(self, owner: DOMNode, name: str) -> None:
"""Initialize a signal.
Args:
owner: The owner of this signal.
name: An identifier for debugging purposes.
"""
self._owner = ref(owner)
self._name = name
self._subscriptions: WeakKeyDictionary[
DOMNode, list[SignalCallbackType[SignalT]]
] = WeakKeyDictionary()
def __rich_repr__(self) -> rich.repr.Result:
yield "owner", self.owner
yield "name", self._name
yield "subscriptions", list(self._subscriptions.keys())
@property
def owner(self) -> DOMNode | None:
"""The owner of this Signal, or `None` if there is no owner."""
return self._owner()
def subscribe(
self,
node: DOMNode,
callback: SignalCallbackType[SignalT],
immediate: bool = False,
) -> None:
"""Subscribe a node to this signal.
When the signal is published, the callback will be invoked.
Args:
node: Node to subscribe.
callback: A callback function which takes a single argument and returns anything (return type ignored).
immediate: Invoke the callback immediately on publish if `True`, otherwise post it to the DOM node to be
called once existing messages have been processed.
Raises:
SignalError: Raised when subscribing a non-mounted widget.
"""
if not node.is_running:
raise SignalError(
f"Node must be running to subscribe to a signal (has {node} been mounted)?"
)
if immediate:
def signal_callback(data: SignalT) -> None:
"""Invoke the callback immediately."""
callback(data)
else:
def signal_callback(data: SignalT) -> None:
"""Post the callback to the node, to call at the next opertunity."""
node.call_next(callback, data)
callbacks = self._subscriptions.setdefault(node, [])
callbacks.append(signal_callback)
def unsubscribe(self, node: DOMNode) -> None:
"""Unsubscribe a node from this signal.
Args:
node: Node to unsubscribe,
"""
self._subscriptions.pop(node, None)
def publish(self, data: SignalT) -> None:
"""Publish the signal (invoke subscribed callbacks).
Args:
data: An argument to pass to the callbacks.
"""
# Don't publish if the DOM is not ready or shutting down
owner = self.owner
if owner is None:
return
if not owner.is_attached or owner._pruning:
return
for ancestor_node in owner.ancestors_with_self:
if not ancestor_node.is_running:
return
for node, callbacks in list(self._subscriptions.items()):
if not (node.is_running and node.is_attached) or node._pruning:
# Removed nodes that are no longer running
self._subscriptions.pop(node)
else:
# Call callbacks
for callback in callbacks:
try:
callback(data)
except Exception as error:
log.error(
f"error publishing signal to {node} ignored (callback={callback}); {error}"
)
| Signal |
python | PyCQA__pylint | pylint/reporters/text.py | {
"start": 3000,
"end": 5400
} | class ____(BaseReporter):
"""Reports messages and layouts in plain text."""
name = "text"
extension = "txt"
line_format = "{path}:{line}:{column}: {msg_id}: {msg} ({symbol})"
def __init__(self, output: TextIO | None = None) -> None:
super().__init__(output)
self._modules: set[str] = set()
self._template = self.line_format
self._fixed_template = self.line_format
"""The output format template with any unrecognized arguments removed."""
def on_set_current_module(self, module: str, filepath: str | None) -> None:
"""Set the format template to be used and check for unrecognized arguments."""
template = str(self.linter.config.msg_template or self._template)
# Return early if the template is the same as the previous one
if template == self._template:
return
# Set template to the currently selected template
self._template = template
# Check to see if all parameters in the template are attributes of the Message
arguments = re.findall(r"\{(\w+?)(:.*)?\}", template)
for argument in arguments:
if argument[0] not in MESSAGE_FIELDS:
warnings.warn(
f"Don't recognize the argument '{argument[0]}' in the --msg-template. "
"Are you sure it is supported on the current version of pylint?",
stacklevel=2,
)
template = re.sub(r"\{" + argument[0] + r"(:.*?)?\}", "", template)
self._fixed_template = template
def write_message(self, msg: Message) -> None:
"""Convenience method to write a formatted message with class default
template.
"""
self_dict = asdict(msg)
for key in ("end_line", "end_column"):
self_dict[key] = self_dict[key] or ""
self.writeln(self._fixed_template.format(**self_dict))
def handle_message(self, msg: Message) -> None:
"""Manage message of different type and in the context of path."""
if msg.module not in self._modules:
self.writeln(make_header(msg))
self._modules.add(msg.module)
self.write_message(msg)
def _display(self, layout: Section) -> None:
"""Launch layouts display."""
print(file=self.out)
TextWriter().format(layout, self.out)
| TextReporter |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_multiarray.py | {
"start": 227563,
"end": 227882
} | class ____(TestCase):
@xpassIfTorchDynamo_np # (reason="TODO")
def test_flat_element_deletion(self):
it = np.ones(3).flat
try:
del it[1]
del it[1:2]
except TypeError:
pass
except Exception:
raise AssertionError from None
| TestDelMisc |
python | scrapy__scrapy | tests/test_cmdline/__init__.py | {
"start": 206,
"end": 2551
} | class ____:
def setup_method(self):
self.env = get_testenv()
tests_path = Path(__file__).parent.parent
self.env["PYTHONPATH"] += os.pathsep + str(tests_path.parent)
self.env["SCRAPY_SETTINGS_MODULE"] = "tests.test_cmdline.settings"
def _execute(self, *new_args, **kwargs):
encoding = sys.stdout.encoding or "utf-8"
args = (sys.executable, "-m", "scrapy.cmdline", *new_args)
proc = Popen(args, stdout=PIPE, stderr=PIPE, env=self.env, **kwargs)
comm = proc.communicate()[0].strip()
return comm.decode(encoding)
def test_default_settings(self):
assert self._execute("settings", "--get", "TEST1") == "default"
def test_override_settings_using_set_arg(self):
assert (
self._execute("settings", "--get", "TEST1", "-s", "TEST1=override")
== "override"
)
def test_profiling(self):
path = Path(tempfile.mkdtemp())
filename = path / "res.prof"
try:
self._execute("version", "--profile", str(filename))
assert filename.exists()
out = StringIO()
stats = pstats.Stats(str(filename), stream=out)
stats.print_stats()
out.seek(0)
stats = out.read()
assert str(Path("scrapy", "commands", "version.py")) in stats
assert "tottime" in stats
finally:
shutil.rmtree(path)
def test_override_dict_settings(self):
EXT_PATH = "tests.test_cmdline.extensions.DummyExtension"
EXTENSIONS = {EXT_PATH: 200}
settingsstr = self._execute(
"settings",
"--get",
"EXTENSIONS",
"-s",
"EXTENSIONS=" + json.dumps(EXTENSIONS),
)
# XXX: There's gotta be a smarter way to do this...
assert "..." not in settingsstr
for char in ("'", "<", ">"):
settingsstr = settingsstr.replace(char, '"')
settingsdict = json.loads(settingsstr)
assert set(settingsdict.keys()) == set(EXTENSIONS.keys())
assert settingsdict[EXT_PATH] == 200
def test_pathlib_path_as_feeds_key(self):
assert self._execute("settings", "--get", "FEEDS") == json.dumps(
{"items.csv": {"format": "csv", "fields": ["price", "name"]}}
)
| TestCmdline |
python | falconry__falcon | tests/test_uri_templates.py | {
"start": 1646,
"end": 1849
} | class ____:
def __init__(self):
self.file_id = None
self.called = False
def on_get(self, req, resp, file_id):
self.file_id = file_id
self.called = True
| FileResource |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyupgrade/UP039.py | {
"start": 138,
"end": 164
} | class ____():
pass
# OK
| A |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-qualaroo/components.py | {
"start": 360,
"end": 740
} | class ____(BasicHttpAuthenticator):
@property
def token(self):
key = str(self._username.eval(self.config)).encode("latin1")
token = self._password.eval(self.config).encode("latin1")
encoded_credentials = b64encode(b":".join((key, token))).strip()
token = "Basic " + encoded_credentials.decode("ascii")
return token
| CustomAuthenticator |
python | pyinstaller__pyinstaller | bootloader/waflib/Scripting.py | {
"start": 12470,
"end": 15382
} | class ____(Dist):
fun = 'distcheck'
cmd = 'distcheck'
def execute(self):
self.recurse([os.path.dirname(Context.g_module.root_path)])
self.archive()
self.check()
def make_distcheck_cmd(self, tmpdir):
cfg = []
if Options.options.distcheck_args:
cfg = shlex.split(Options.options.distcheck_args)
else:
cfg = [x for x in sys.argv if x.startswith('-')]
cmd = [sys.executable, sys.argv[0], 'configure', 'build', 'install', 'uninstall', '--destdir=' + tmpdir] + cfg
return cmd
def check(self):
import tempfile, tarfile
with tarfile.open(self.get_arch_name()) as t:
for x in t:
t.extract(x)
instdir = tempfile.mkdtemp('.inst', self.get_base_name())
cmd = self.make_distcheck_cmd(instdir)
ret = Utils.subprocess.Popen(cmd, cwd=self.get_base_name()).wait()
if ret:
raise Errors.WafError('distcheck failed with code %r' % ret)
if os.path.exists(instdir):
raise Errors.WafError('distcheck succeeded, but files were left in %s' % instdir)
shutil.rmtree(self.get_base_name())
def distcheck(ctx):
'''checks if the project compiles (tarball from 'dist')'''
pass
def autoconfigure(execute_method):
def execute(self):
if not Configure.autoconfig:
return execute_method(self)
env = ConfigSet.ConfigSet()
do_config = False
try:
env.load(os.path.join(Context.top_dir, Options.lockfile))
except EnvironmentError:
Logs.warn('Configuring the project')
do_config = True
else:
if env.run_dir != Context.run_dir:
do_config = True
else:
h = 0
for f in env.files:
try:
h = Utils.h_list((h, Utils.readf(f, 'rb')))
except EnvironmentError:
do_config = True
break
else:
do_config = h != env.hash
if do_config:
cmd = env.config_cmd or 'configure'
if Configure.autoconfig == 'clobber':
tmp = Options.options.__dict__
launch_dir_tmp = Context.launch_dir
if env.options:
Options.options.__dict__ = env.options
Context.launch_dir = env.launch_dir
try:
run_command(cmd)
finally:
Options.options.__dict__ = tmp
Context.launch_dir = launch_dir_tmp
else:
run_command(cmd)
run_command(self.cmd)
else:
return execute_method(self)
return execute
Build.BuildContext.execute = autoconfigure(Build.BuildContext.execute)
| DistCheck |
python | numpy__numpy | numpy/_core/_exceptions.py | {
"start": 3272,
"end": 5159
} | class ____(MemoryError):
""" Thrown when an array cannot be allocated"""
def __init__(self, shape, dtype):
self.shape = shape
self.dtype = dtype
@property
def _total_size(self):
num_bytes = self.dtype.itemsize
for dim in self.shape:
num_bytes *= dim
return num_bytes
@staticmethod
def _size_to_string(num_bytes):
""" Convert a number of bytes into a binary size string """
# https://en.wikipedia.org/wiki/Binary_prefix
LOG2_STEP = 10
STEP = 1024
units = ['bytes', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB']
unit_i = max(num_bytes.bit_length() - 1, 1) // LOG2_STEP
unit_val = 1 << (unit_i * LOG2_STEP)
n_units = num_bytes / unit_val
del unit_val
# ensure we pick a unit that is correct after rounding
if round(n_units) == STEP:
unit_i += 1
n_units /= STEP
# deal with sizes so large that we don't have units for them
if unit_i >= len(units):
new_unit_i = len(units) - 1
n_units *= 1 << ((unit_i - new_unit_i) * LOG2_STEP)
unit_i = new_unit_i
unit_name = units[unit_i]
# format with a sensible number of digits
if unit_i == 0:
# no decimal point on bytes
return f'{n_units:.0f} {unit_name}'
elif round(n_units) < 1000:
# 3 significant figures, if none are dropped to the left of the .
return f'{n_units:#.3g} {unit_name}'
else:
# just give all the digits otherwise
return f'{n_units:#.0f} {unit_name}'
def __str__(self):
size_str = self._size_to_string(self._total_size)
return (f"Unable to allocate {size_str} for an array with shape "
f"{self.shape} and data type {self.dtype}")
| _ArrayMemoryError |
python | scrapy__scrapy | tests/test_exporters.py | {
"start": 833,
"end": 904
} | class ____:
name: str
age: int
@dataclasses.dataclass
| MyDataClass |
python | getsentry__sentry | tests/sentry/workflow_engine/endpoints/validators/actions/test_ticketing.py | {
"start": 1244,
"end": 1378
} | class ____(BaseTicketingActionValidatorTest):
__test__ = True
provider = Action.Type.AZURE_DEVOPS
| TestAzureDevOpsActionValidator |
python | bokeh__bokeh | src/bokeh/protocol/message.py | {
"start": 3425,
"end": 11728
} | class ____(Generic[Content]):
''' The Message base class encapsulates creating, assembling, and
validating the integrity of Bokeh Server messages. Additionally, it
provide hooks
'''
msgtype: ClassVar[str]
_header: Header
_header_json: str | None
_content: Content
_content_json: str | None
_metadata: Metadata
_metadata_json: str | None
_buffers: list[Buffer]
def __init__(self, header: Header, metadata: Metadata, content: Content) -> None:
''' Initialize a new message from header, metadata, and content
dictionaries.
To assemble a message from existing JSON fragments, use the
``assemble`` method.
To create new messages with automatically generated headers,
use subclass ``create`` methods.
Args:
header (JSON-like) :
metadata (JSON-like) :
content (JSON-like) :
'''
self.header = header
self.metadata = metadata
self.content = content
self._buffers = []
def __repr__(self) -> str:
return f"Message {self.msgtype!r} content: {self.content!r}"
@classmethod
def assemble(cls, header_json: str, metadata_json: str, content_json: str) -> Message[Content]:
''' Creates a new message, assembled from JSON fragments.
Args:
header_json (``JSON``) :
metadata_json (``JSON``) :
content_json (``JSON``) :
Returns:
Message subclass
Raises:
MessageError
'''
try:
header = json.loads(header_json)
except ValueError:
raise MessageError("header could not be decoded")
try:
metadata = json.loads(metadata_json)
except ValueError:
raise MessageError("metadata could not be decoded")
try:
content = json.loads(content_json)
except ValueError:
raise MessageError("content could not be decoded")
msg = cls(header, metadata, content)
msg._header_json = header_json
msg._metadata_json = metadata_json
msg._content_json = content_json
return msg
def add_buffer(self, buffer: Buffer) -> None:
''' Associate a buffer header and payload with this message.
Args:
buffer (Buffer) : a buffer
Returns:
None
Raises:
MessageError
'''
if 'num_buffers' in self._header:
self._header['num_buffers'] += 1
else:
self._header['num_buffers'] = 1
self._header_json = None
self._buffers.append(buffer)
def assemble_buffer(self, buf_header: BufferHeader, buf_payload: bytes) -> None:
''' Add a buffer header and payload that we read from the socket.
This differs from add_buffer() because we're validating vs.
the header's num_buffers, instead of filling in the header.
Args:
buf_header (``JSON``) : a buffer header
buf_payload (``JSON`` or bytes) : a buffer payload
Returns:
None
Raises:
ProtocolError
'''
num_buffers = self.header.get("num_buffers", 0)
if num_buffers <= len(self._buffers):
raise ProtocolError(f"too many buffers received expecting {num_buffers}")
self._buffers.append(Buffer(buf_header["id"], buf_payload))
async def write_buffers(self, conn: WebSocketClientConnectionWrapper, locked: bool = True) -> int:
''' Write any buffer headers and payloads to the given connection.
Args:
conn (object) :
May be any object with a ``write_message`` method. Typically,
a Tornado ``WSHandler`` or ``WebSocketClientConnection``
locked (bool) :
Returns:
int : number of bytes sent
'''
if conn is None:
raise ValueError("Cannot write_buffers to connection None")
sent = 0
for buffer in self._buffers:
header = json.dumps(buffer.ref)
payload = buffer.to_bytes()
await conn.write_message(header, locked=locked)
await conn.write_message(payload, binary=True, locked=locked)
sent += len(header) + len(payload)
return sent
@classmethod
def create_header(cls, request_id: ID | None = None) -> Header:
''' Return a message header fragment dict.
Args:
request_id (str or None) :
Message ID of the message this message replies to
Returns:
dict : a message header
'''
header = Header(
msgid = bkserial.make_id(),
msgtype = cls.msgtype,
)
if request_id is not None:
header['reqid'] = request_id
return header
async def send(self, conn: WebSocketClientConnectionWrapper) -> int:
''' Send the message on the given connection.
Args:
conn (WebSocketHandler) : a WebSocketHandler to send messages
Returns:
int : number of bytes sent
'''
if conn is None:
raise ValueError("Cannot send to connection None")
with await conn.write_lock.acquire():
sent = 0
await conn.write_message(self.header_json, locked=False)
sent += len(self.header_json)
# uncomment this to make it a lot easier to reproduce lock-related bugs
#await asyncio.sleep(0.1)
await conn.write_message(self.metadata_json, locked=False)
sent += len(self.metadata_json)
# uncomment this to make it a lot easier to reproduce lock-related bugs
#await asyncio.sleep(0.1)
await conn.write_message(self.content_json, locked=False)
sent += len(self.content_json)
sent += await self.write_buffers(conn, locked=False)
return sent
@property
def complete(self) -> bool:
''' Returns whether all required parts of a message are present.
Returns:
bool : True if the message is complete, False otherwise
'''
return self.header is not None and \
self.metadata is not None and \
self.content is not None and \
self.header.get('num_buffers', 0) == len(self._buffers)
@property
def payload(self) -> Serialized[Content]:
return Serialized(self.content, self.buffers)
# header fragment properties
@property
def header(self) -> Header:
return self._header
@header.setter
def header(self, value: Header) -> None:
self._header = value
self._header_json = None
@property
def header_json(self) -> str:
if not self._header_json:
self._header_json = json.dumps(self.header)
return self._header_json
# content fragment properties
@property
def content(self) -> Content:
return self._content
@content.setter
def content(self, value: Content) -> None:
self._content = value
self._content_json = None
@property
def content_json(self) -> str:
if not self._content_json:
self._content_json = serialize_json(self.payload)
return self._content_json
# metadata fragment properties
@property
def metadata(self) -> Metadata:
return self._metadata
@metadata.setter
def metadata(self, value: Metadata) -> None:
self._metadata = value
self._metadata_json = None
@property
def metadata_json(self) -> str:
if not self._metadata_json:
self._metadata_json = json.dumps(self.metadata)
return self._metadata_json
# buffer properties
@property
def buffers(self) -> list[Buffer]:
return list(self._buffers)
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| Message |
python | kamyu104__LeetCode-Solutions | Python/remove-all-adjacent-duplicates-in-string-ii.py | {
"start": 29,
"end": 449
} | class ____(object):
def removeDuplicates(self, s, k):
"""
:type s: str
:type k: int
:rtype: str
"""
stk = [['^', 0]]
for c in s:
if stk[-1][0] == c:
stk[-1][1] += 1
if stk[-1][1] == k:
stk.pop()
else:
stk.append([c, 1])
return "".join(c*k for c, k in stk)
| Solution |
python | scipy__scipy | scipy/optimize/tests/test__basinhopping.py | {
"start": 2715,
"end": 12407
} | class ____:
def setup_method(self):
""" Tests setup.
Run tests based on the 1-D and 2-D functions described above.
"""
self.x0 = (1.0, [1.0, 1.0])
self.sol = (-0.195, np.array([-0.195, -0.1]))
self.tol = 3 # number of decimal places
self.niter = 100
self.disp = False
self.kwargs = {"method": "L-BFGS-B", "jac": True}
self.kwargs_nograd = {"method": "L-BFGS-B"}
def test_TypeError(self):
# test the TypeErrors are raised on bad input
i = 1
# if take_step is passed, it must be callable
assert_raises(TypeError, basinhopping, func2d, self.x0[i],
take_step=1)
# if accept_test is passed, it must be callable
assert_raises(TypeError, basinhopping, func2d, self.x0[i],
accept_test=1)
def test_input_validation(self):
msg = 'target_accept_rate has to be in range \\(0, 1\\)'
with assert_raises(ValueError, match=msg):
basinhopping(func1d, self.x0[0], target_accept_rate=0.)
with assert_raises(ValueError, match=msg):
basinhopping(func1d, self.x0[0], target_accept_rate=1.)
msg = 'stepwise_factor has to be in range \\(0, 1\\)'
with assert_raises(ValueError, match=msg):
basinhopping(func1d, self.x0[0], stepwise_factor=0.)
with assert_raises(ValueError, match=msg):
basinhopping(func1d, self.x0[0], stepwise_factor=1.)
def test_1d_grad(self):
# test 1-D minimizations with gradient
i = 0
res = basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs,
niter=self.niter, disp=self.disp)
assert_almost_equal(res.x, self.sol[i], self.tol)
def test_2d(self):
# test 2d minimizations with gradient
i = 1
res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
niter=self.niter, disp=self.disp)
assert_almost_equal(res.x, self.sol[i], self.tol)
assert_(res.nfev > 0)
def test_njev(self):
# test njev is returned correctly
i = 1
minimizer_kwargs = self.kwargs.copy()
# L-BFGS-B doesn't use njev, but BFGS does
minimizer_kwargs["method"] = "BFGS"
res = basinhopping(func2d, self.x0[i],
minimizer_kwargs=minimizer_kwargs, niter=self.niter,
disp=self.disp)
assert_(res.nfev > 0)
assert_equal(res.nfev, res.njev)
def test_jac(self):
# test Jacobian returned
minimizer_kwargs = self.kwargs.copy()
# BFGS returns a Jacobian
minimizer_kwargs["method"] = "BFGS"
res = basinhopping(func2d_easyderiv, [0.0, 0.0],
minimizer_kwargs=minimizer_kwargs, niter=self.niter,
disp=self.disp)
assert_(hasattr(res.lowest_optimization_result, "jac"))
# in this case, the Jacobian is just [df/dx, df/dy]
_, jacobian = func2d_easyderiv(res.x)
assert_almost_equal(res.lowest_optimization_result.jac, jacobian,
self.tol)
def test_2d_nograd(self):
# test 2-D minimizations without gradient
i = 1
res = basinhopping(func2d_nograd, self.x0[i],
minimizer_kwargs=self.kwargs_nograd,
niter=self.niter, disp=self.disp)
assert_almost_equal(res.x, self.sol[i], self.tol)
@pytest.mark.fail_slow(10)
def test_all_minimizers(self):
# Test 2-D minimizations with gradient. Nelder-Mead, Powell, COBYLA, and
# COBYQA don't accept jac=True, so aren't included here.
i = 1
methods = ['CG', 'BFGS', 'Newton-CG', 'L-BFGS-B', 'TNC', 'SLSQP']
minimizer_kwargs = copy.copy(self.kwargs)
for method in methods:
minimizer_kwargs["method"] = method
res = basinhopping(func2d, self.x0[i],
minimizer_kwargs=minimizer_kwargs,
niter=self.niter, disp=self.disp)
assert_almost_equal(res.x, self.sol[i], self.tol)
@pytest.mark.fail_slow(40)
@pytest.mark.parametrize("method", [
'CG', 'BFGS', 'L-BFGS-B', 'TNC', 'SLSQP',
'Nelder-Mead', 'Powell', 'COBYLA', 'COBYQA'])
def test_all_nograd_minimizers(self, method):
# Test 2-D minimizations without gradient. Newton-CG requires jac=True,
# so not included here.
i = 1
minimizer_kwargs = self.kwargs_nograd.copy()
minimizer_kwargs["method"] = method
# These methods take extensive amount of time on this problem
niter = 10 if method in ('COBYLA', 'COBYQA') else self.niter
res = basinhopping(func2d_nograd, self.x0[i],
minimizer_kwargs=minimizer_kwargs,
niter=niter, disp=self.disp, seed=1234)
tol = 2 if method == 'COBYLA' else self.tol
assert_almost_equal(res.x, self.sol[i], decimal=tol)
def test_pass_takestep(self):
# test that passing a custom takestep works
# also test that the stepsize is being adjusted
takestep = MyTakeStep1()
initial_step_size = takestep.stepsize
i = 1
res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
niter=self.niter, disp=self.disp,
take_step=takestep)
assert_almost_equal(res.x, self.sol[i], self.tol)
assert_(takestep.been_called)
# make sure that the build in adaptive step size has been used
assert_(initial_step_size != takestep.stepsize)
def test_pass_simple_takestep(self):
# test that passing a custom takestep without attribute stepsize
takestep = myTakeStep2
i = 1
res = basinhopping(func2d_nograd, self.x0[i],
minimizer_kwargs=self.kwargs_nograd,
niter=self.niter, disp=self.disp,
take_step=takestep)
assert_almost_equal(res.x, self.sol[i], self.tol)
def test_pass_accept_test(self):
# test passing a custom accept test
# makes sure it's being used and ensures all the possible return values
# are accepted.
accept_test = MyAcceptTest()
i = 1
# there's no point in running it more than a few steps.
basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
niter=10, disp=self.disp, accept_test=accept_test)
assert_(accept_test.been_called)
def test_pass_callback(self):
# test passing a custom callback function
# This makes sure it's being used. It also returns True after 10 steps
# to ensure that it's stopping early.
callback = MyCallBack()
i = 1
# there's no point in running it more than a few steps.
res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
niter=30, disp=self.disp, callback=callback)
assert_(callback.been_called)
assert_("callback" in res.message[0])
# One of the calls of MyCallBack is during BasinHoppingRunner
# construction, so there are only 9 remaining before MyCallBack stops
# the minimization.
assert_equal(res.nit, 9)
def test_minimizer_fail(self):
# test if a minimizer fails
i = 1
self.kwargs["options"] = dict(maxiter=0)
niter = 10
res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
niter=niter, disp=self.disp)
# the number of failed minimizations should be the number of
# iterations + 1
assert_equal(res.nit + 1, res.minimization_failures)
def test_niter_zero(self):
# gh5915, what happens if you call basinhopping with niter=0
i = 0
basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs,
niter=0, disp=self.disp)
def test_rng_reproducibility(self):
# rng should ensure reproducibility between runs
minimizer_kwargs = {"method": "L-BFGS-B", "jac": True}
f_1 = []
def callback(x, f, accepted):
f_1.append(f)
basinhopping(func2d, [1.0, 1.0], minimizer_kwargs=minimizer_kwargs,
niter=10, callback=callback, rng=10)
f_2 = []
def callback2(x, f, accepted):
f_2.append(f)
basinhopping(func2d, [1.0, 1.0], minimizer_kwargs=minimizer_kwargs,
niter=10, callback=callback2, rng=10)
assert_equal(np.array(f_1), np.array(f_2))
def test_random_gen(self):
# check that np.random.Generator can be used (numpy >= 1.17)
rng = np.random.default_rng(1)
minimizer_kwargs = {"method": "L-BFGS-B", "jac": True}
res1 = basinhopping(func2d, [1.0, 1.0],
minimizer_kwargs=minimizer_kwargs,
niter=10, rng=rng)
rng = np.random.default_rng(1)
res2 = basinhopping(func2d, [1.0, 1.0],
minimizer_kwargs=minimizer_kwargs,
niter=10, rng=rng)
assert_equal(res1.x, res2.x)
def test_monotonic_basin_hopping(self):
# test 1-D minimizations with gradient and T=0
i = 0
res = basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs,
niter=self.niter, disp=self.disp, T=0)
assert_almost_equal(res.x, self.sol[i], self.tol)
| TestBasinHopping |
python | openai__openai-python | src/openai/lib/azure.py | {
"start": 1368,
"end": 1628
} | class ____(OpenAIError):
def __init__(self) -> None:
super().__init__(
"The `api_key`, `azure_ad_token` and `azure_ad_token_provider` arguments are mutually exclusive; Only one can be passed at a time"
)
| MutuallyExclusiveAuthError |
python | scipy__scipy | scipy/special/_mptestutils.py | {
"start": 4661,
"end": 5444
} | class ____:
def __init__(self, a=-1000, b=1000):
self.a = a
self.b = b
def values(self, n):
v1 = Arg(self.a, self.b).values(max(1 + n//2, n-5)).astype(int)
v2 = np.arange(-5, 5)
v = np.unique(np.r_[v1, v2])
v = v[(v >= self.a) & (v < self.b)]
return v
def get_args(argspec, n):
if isinstance(argspec, np.ndarray):
args = argspec.copy()
else:
nargs = len(argspec)
ms = np.asarray(
[1.5 if isinstance(spec, ComplexArg) else 1.0 for spec in argspec]
)
ms = (n**(ms/sum(ms))).astype(int) + 1
args = [spec.values(m) for spec, m in zip(argspec, ms)]
args = np.array(np.broadcast_arrays(*np.ix_(*args))).reshape(nargs, -1).T
return args
| IntArg |
python | django__django | tests/inspectdb/models.py | {
"start": 3436,
"end": 3642
} | class ____(models.Model):
char_field = models.CharField(max_length=10, db_collation=test_collation)
class Meta:
required_db_features = {"supports_collation_on_charfield"}
| CharFieldDbCollation |
python | scrapy__scrapy | scrapy/exceptions.py | {
"start": 749,
"end": 960
} | class ____(Exception):
"""Raise this from callbacks to request the spider to be closed"""
def __init__(self, reason: str = "cancelled"):
super().__init__()
self.reason = reason
| CloseSpider |
python | numpy__numpy | benchmarks/benchmarks/bench_ufunc.py | {
"start": 17755,
"end": 18218
} | class ____(Benchmark):
params = [np.int32, np.int64]
param_names = ['dtype']
def setup(self, dtype):
N = 1000000
self.a = np.random.randint(20, size=N).astype(dtype)
self.b = np.random.randint(4, size=N).astype(dtype)
def time_pow(self, dtype):
np.power(self.a, self.b)
def time_pow_two(self, dtype):
np.power(self.a, 2)
def time_pow_five(self, dtype):
np.power(self.a, 5)
| BinaryBenchInteger |
python | huggingface__transformers | src/transformers/models/bridgetower/image_processing_bridgetower.py | {
"start": 4449,
"end": 4539
} | class ____(ImagesKwargs, total=False):
size_divisor: int
| BridgeTowerImageProcessorKwargs |
python | aio-libs__aiohttp | aiohttp/client_exceptions.py | {
"start": 9028,
"end": 9131
} | class ____(*ssl_error_bases): # type: ignore[misc]
"""Response ssl error."""
| ClientConnectorSSLError |
python | cython__cython | Cython/Compiler/Symtab.py | {
"start": 95054,
"end": 98333
} | class ____(Scope):
"""Scope for comprehensions (but not generator expressions, which use ClosureScope).
As opposed to generators, these can be easily inlined in some cases, so all
we really need is a scope that holds the loop variable(s).
"""
is_comprehension_scope = True
def __init__(self, outer_scope):
parent_scope = outer_scope
# TODO: also ignore class scopes?
while parent_scope.is_comprehension_scope:
parent_scope = parent_scope.parent_scope
name = parent_scope.global_scope().next_id(Naming.genexpr_id_ref)
Scope.__init__(self, name, outer_scope, parent_scope)
self.directives = outer_scope.directives
self.genexp_prefix = "%s%d%s" % (Naming.pyrex_prefix, len(name), name)
# Class/ExtType scopes are filled at class creation time, i.e. from the
# module init function or surrounding function.
while outer_scope.is_comprehension_scope or outer_scope.is_c_class_scope or outer_scope.is_py_class_scope:
outer_scope = outer_scope.outer_scope
self.var_entries = outer_scope.var_entries # keep declarations outside
outer_scope.subscopes.add(self)
def mangle(self, prefix, name):
return '%s%s' % (self.genexp_prefix, self.parent_scope.mangle(prefix, name))
def declare_var(self, name, type, pos,
cname=None, visibility='private',
api=False, in_pxd=False, is_cdef=True, pytyping_modifiers=None):
if type is unspecified_type:
# if the outer scope defines a type for this variable, inherit it
outer_entry = self.outer_scope.lookup(name)
if outer_entry and outer_entry.is_variable:
type = outer_entry.type # may still be 'unspecified_type' !
self._reject_pytyping_modifiers(pos, pytyping_modifiers)
# the parent scope needs to generate code for the variable, but
# this scope must hold its name exclusively
cname = '%s%s' % (self.genexp_prefix, self.parent_scope.mangle(Naming.var_prefix, name or self.next_id()))
entry = self.declare(name, cname, type, pos, visibility)
entry.is_variable = True
if self.parent_scope.is_module_scope:
entry.is_cglobal = True
else:
entry.is_local = True
entry.in_subscope = True
self.var_entries.append(entry)
self.entries[name] = entry
return entry
def declare_assignment_expression_target(self, name, type, pos):
# should be declared in the parent scope instead
return self.parent_scope.declare_var(name, type, pos)
def declare_pyfunction(self, name, pos, allow_redefine=False):
return self.outer_scope.declare_pyfunction(
name, pos, allow_redefine)
def declare_lambda_function(self, func_cname, pos):
return self.outer_scope.declare_lambda_function(func_cname, pos)
def add_lambda_def(self, def_node):
return self.outer_scope.add_lambda_def(def_node)
def lookup_assignment_expression_target(self, name):
entry = self.lookup_here(name)
if not entry:
entry = self.parent_scope.lookup_assignment_expression_target(name)
return entry
| ComprehensionScope |
python | instagram__MonkeyType | demo/inbox.py | {
"start": 3430,
"end": 5279
} | class ____:
def __init__(self, user: User, repo: RepoInterface) -> None:
self.user = user
self.repo = repo
self.events = self.repo.get_inbox_events_for_user_id(self.user.id)
def aggregate(self):
aggregators: List[AggregatorInterface] = [
CommentsAggregator(self.repo),
LikesAggregator(self.repo),
FollowersAggregator(self.repo),
]
aggregators_by_type: Dict[EventType, List[AggregatorInterface]] = {}
for agg in aggregators:
aggregators_by_type.setdefault(agg.type, []).append(agg)
for event in self.events:
for aggregator in aggregators_by_type.get(event.type, []):
aggregator.add(event)
items = chain.from_iterable(
agg.aggregate() for agg in chain.from_iterable(aggregators_by_type.values())
)
return sorted(items, key=attrgetter("published"), reverse=True)
def summarize(self):
counter = Counter(e.type for e in self.events)
clauses: List[str] = []
likes = counter[EventType.LIKED]
if likes:
clauses.append(f"{likes} new like{self._pluralize(likes)}")
follows = counter[EventType.FOLLOWED]
if follows:
clauses.append(f"{follows} new follower{self._pluralize(follows)}")
comments = counter[EventType.COMMENTED]
if comments:
clauses.append(f"{comments} new comment{self._pluralize(comments)}")
if not clauses:
combined = "no new activity"
elif len(clauses) == 1:
combined = clauses[0]
else:
initial = ", ".join(clauses[:-1])
combined = f"{initial} and {clauses[-1]}"
return f"You have {combined}."
def _pluralize(self, count):
return "" if count == 1 else "s"
| Inbox |
python | python-openxml__python-docx | tests/oxml/test_xmlchemy.py | {
"start": 24920,
"end": 25145
} | class ____(BaseIntType):
@classmethod
def validate(cls, value):
cls.validate_int(value)
if value < 1 or value > 42:
raise ValueError("value must be in range 1 to 42 inclusive")
| ST_IntegerType |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/property1.py | {
"start": 1626,
"end": 1794
} | class ____:
@property
def prop1(self) -> type[Self]: ...
def method1(self) -> None:
reveal_type(self.prop1, expected_text="type[Self@ClassC]")
| ClassC |
python | huggingface__transformers | src/transformers/models/rembert/modeling_rembert.py | {
"start": 19760,
"end": 20570
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.output_embedding_size)
self.decoder = nn.Linear(config.output_embedding_size, config.vocab_size)
self.activation = ACT2FN[config.hidden_act]
self.LayerNorm = nn.LayerNorm(config.output_embedding_size, eps=config.layer_norm_eps)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->RemBert
| RemBertLMPredictionHead |
python | coleifer__peewee | tests/pool.py | {
"start": 10795,
"end": 12389
} | class ____(ModelTestCase):
database = PooledTestDatabase('test_pooled.db')
requires = [Register]
def tearDown(self):
super(TestLivePooledDatabase, self).tearDown()
self.database.close_idle()
if os.path.exists('test_pooled.db'):
os.unlink('test_pooled.db')
def test_reuse_connection(self):
for i in range(5):
Register.create(value=i)
conn_id = id(self.database.connection())
self.database.close()
for i in range(5, 10):
Register.create(value=i)
self.assertEqual(id(self.database.connection()), conn_id)
self.assertEqual(
[x.value for x in Register.select().order_by(Register.id)],
list(range(10)))
def test_db_context(self):
with self.database:
Register.create(value=1)
with self.database.atomic() as sp:
self.assertTrue(isinstance(sp, _savepoint))
Register.create(value=2)
sp.rollback()
with self.database.atomic() as sp:
self.assertTrue(isinstance(sp, _savepoint))
Register.create(value=3)
with self.database:
values = [r.value for r in Register.select().order_by(Register.id)]
self.assertEqual(values, [1, 3])
def test_bad_connection(self):
self.database.connection()
try:
self.database.execute_sql('select 1/0')
except Exception as exc:
pass
self.database.close()
self.database.connect()
| TestLivePooledDatabase |
python | django__django | tests/db_functions/comparison/test_collate.py | {
"start": 182,
"end": 2083
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.author1 = Author.objects.create(alias="a", name="Jones 1")
cls.author2 = Author.objects.create(alias="A", name="Jones 2")
def test_collate_filter_ci(self):
collation = connection.features.test_collations.get("ci")
if not collation:
self.skipTest("This backend does not support case-insensitive collations.")
qs = Author.objects.filter(alias=Collate(Value("a"), collation))
self.assertEqual(qs.count(), 2)
def test_collate_order_by_cs(self):
collation = connection.features.test_collations.get("cs")
if not collation:
self.skipTest("This backend does not support case-sensitive collations.")
qs = Author.objects.order_by(Collate("alias", collation))
self.assertSequenceEqual(qs, [self.author2, self.author1])
def test_language_collation_order_by(self):
collation = connection.features.test_collations.get("swedish_ci")
if not collation:
self.skipTest("This backend does not support language collations.")
author3 = Author.objects.create(alias="O", name="Jones")
author4 = Author.objects.create(alias="Ö", name="Jones")
author5 = Author.objects.create(alias="P", name="Jones")
qs = Author.objects.order_by(Collate(F("alias"), collation), "name")
self.assertSequenceEqual(
qs,
[self.author1, self.author2, author3, author5, author4],
)
def test_invalid_collation(self):
tests = [
None,
"",
'et-x-icu" OR ',
'"schema"."collation"',
]
msg = "Invalid collation name: %r."
for value in tests:
with self.subTest(value), self.assertRaisesMessage(ValueError, msg % value):
Collate(F("alias"), value)
| CollateTests |
python | kubernetes-client__python | kubernetes/client/models/v1_windows_security_context_options.py | {
"start": 383,
"end": 8455
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'gmsa_credential_spec': 'str',
'gmsa_credential_spec_name': 'str',
'host_process': 'bool',
'run_as_user_name': 'str'
}
attribute_map = {
'gmsa_credential_spec': 'gmsaCredentialSpec',
'gmsa_credential_spec_name': 'gmsaCredentialSpecName',
'host_process': 'hostProcess',
'run_as_user_name': 'runAsUserName'
}
def __init__(self, gmsa_credential_spec=None, gmsa_credential_spec_name=None, host_process=None, run_as_user_name=None, local_vars_configuration=None): # noqa: E501
"""V1WindowsSecurityContextOptions - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._gmsa_credential_spec = None
self._gmsa_credential_spec_name = None
self._host_process = None
self._run_as_user_name = None
self.discriminator = None
if gmsa_credential_spec is not None:
self.gmsa_credential_spec = gmsa_credential_spec
if gmsa_credential_spec_name is not None:
self.gmsa_credential_spec_name = gmsa_credential_spec_name
if host_process is not None:
self.host_process = host_process
if run_as_user_name is not None:
self.run_as_user_name = run_as_user_name
@property
def gmsa_credential_spec(self):
"""Gets the gmsa_credential_spec of this V1WindowsSecurityContextOptions. # noqa: E501
GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. # noqa: E501
:return: The gmsa_credential_spec of this V1WindowsSecurityContextOptions. # noqa: E501
:rtype: str
"""
return self._gmsa_credential_spec
@gmsa_credential_spec.setter
def gmsa_credential_spec(self, gmsa_credential_spec):
"""Sets the gmsa_credential_spec of this V1WindowsSecurityContextOptions.
GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. # noqa: E501
:param gmsa_credential_spec: The gmsa_credential_spec of this V1WindowsSecurityContextOptions. # noqa: E501
:type: str
"""
self._gmsa_credential_spec = gmsa_credential_spec
@property
def gmsa_credential_spec_name(self):
"""Gets the gmsa_credential_spec_name of this V1WindowsSecurityContextOptions. # noqa: E501
GMSACredentialSpecName is the name of the GMSA credential spec to use. # noqa: E501
:return: The gmsa_credential_spec_name of this V1WindowsSecurityContextOptions. # noqa: E501
:rtype: str
"""
return self._gmsa_credential_spec_name
@gmsa_credential_spec_name.setter
def gmsa_credential_spec_name(self, gmsa_credential_spec_name):
"""Sets the gmsa_credential_spec_name of this V1WindowsSecurityContextOptions.
GMSACredentialSpecName is the name of the GMSA credential spec to use. # noqa: E501
:param gmsa_credential_spec_name: The gmsa_credential_spec_name of this V1WindowsSecurityContextOptions. # noqa: E501
:type: str
"""
self._gmsa_credential_spec_name = gmsa_credential_spec_name
@property
def host_process(self):
"""Gets the host_process of this V1WindowsSecurityContextOptions. # noqa: E501
HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. # noqa: E501
:return: The host_process of this V1WindowsSecurityContextOptions. # noqa: E501
:rtype: bool
"""
return self._host_process
@host_process.setter
def host_process(self, host_process):
"""Sets the host_process of this V1WindowsSecurityContextOptions.
HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. # noqa: E501
:param host_process: The host_process of this V1WindowsSecurityContextOptions. # noqa: E501
:type: bool
"""
self._host_process = host_process
@property
def run_as_user_name(self):
"""Gets the run_as_user_name of this V1WindowsSecurityContextOptions. # noqa: E501
The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. # noqa: E501
:return: The run_as_user_name of this V1WindowsSecurityContextOptions. # noqa: E501
:rtype: str
"""
return self._run_as_user_name
@run_as_user_name.setter
def run_as_user_name(self, run_as_user_name):
"""Sets the run_as_user_name of this V1WindowsSecurityContextOptions.
The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. # noqa: E501
:param run_as_user_name: The run_as_user_name of this V1WindowsSecurityContextOptions. # noqa: E501
:type: str
"""
self._run_as_user_name = run_as_user_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1WindowsSecurityContextOptions):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1WindowsSecurityContextOptions):
return True
return self.to_dict() != other.to_dict()
| V1WindowsSecurityContextOptions |
python | Pylons__pyramid | src/pyramid/viewderivers.py | {
"start": 1032,
"end": 17135
} | class ____:
def __init__(self, **kw):
self.attr = kw.get('attr')
def __call__(self, view):
if is_unbound_method(view) and self.attr is None:
raise ConfigurationError(
'Unbound method calls are not supported, please set the '
'class as your `view` and the method as your `attr`'
)
if inspect.isclass(view):
view = self.map_class(view)
else:
view = self.map_nonclass(view)
return view
def map_class(self, view):
ronly = requestonly(view, self.attr)
if ronly:
mapped_view = self.map_class_requestonly(view)
else:
mapped_view = self.map_class_native(view)
mapped_view.__text__ = 'method {} of {}'.format(
self.attr or '__call__',
object_description(view),
)
return mapped_view
def map_nonclass(self, view):
# We do more work here than appears necessary to avoid wrapping the
# view unless it actually requires wrapping (to avoid function call
# overhead).
mapped_view = view
ronly = requestonly(view, self.attr)
if ronly:
mapped_view = self.map_nonclass_requestonly(view)
elif self.attr:
mapped_view = self.map_nonclass_attr(view)
if inspect.isroutine(mapped_view):
# This branch will be true if the view is a function or a method.
# We potentially mutate an unwrapped object here if it's a
# function. We do this to avoid function call overhead of
# injecting another wrapper. However, we must wrap if the
# function is a bound method because we can't set attributes on a
# bound method.
if is_bound_method(view):
_mapped_view = mapped_view
def mapped_view(context, request):
return _mapped_view(context, request)
if self.attr is not None:
mapped_view.__text__ = 'attr {} of {}'.format(
self.attr,
object_description(view),
)
else:
mapped_view.__text__ = object_description(view)
return mapped_view
def map_class_requestonly(self, view):
# its a class that has an __init__ which only accepts request
attr = self.attr
def _class_requestonly_view(context, request):
inst = view(request)
request.__view__ = inst
if attr is None:
response = inst()
else:
response = getattr(inst, attr)()
return response
return _class_requestonly_view
def map_class_native(self, view):
# its a class that has an __init__ which accepts both context and
# request
attr = self.attr
def _class_view(context, request):
inst = view(context, request)
request.__view__ = inst
if attr is None:
response = inst()
else:
response = getattr(inst, attr)()
return response
return _class_view
def map_nonclass_requestonly(self, view):
# its a function that has a __call__ which accepts only a single
# request argument
attr = self.attr
def _requestonly_view(context, request):
if attr is None:
response = view(request)
else:
response = getattr(view, attr)(request)
return response
return _requestonly_view
def map_nonclass_attr(self, view):
# its a function that has a __call__ which accepts both context and
# request, but still has an attr
def _attr_view(context, request):
response = getattr(view, self.attr)(context, request)
return response
return _attr_view
def wraps_view(wrapper):
def inner(view, info):
wrapper_view = wrapper(view, info)
return preserve_view_attrs(view, wrapper_view)
return inner
def preserve_view_attrs(view, wrapper):
if view is None:
return wrapper
if wrapper is view:
return view
original_view = getattr(view, '__original_view__', None)
if original_view is None:
original_view = view
wrapper.__wraps__ = view
wrapper.__original_view__ = original_view
wrapper.__module__ = view.__module__
wrapper.__doc__ = view.__doc__
try:
wrapper.__name__ = view.__name__
except AttributeError:
wrapper.__name__ = repr(view)
# attrs that may not exist on "view", but, if so, must be attached to
# "wrapped view"
for attr in (
'__permitted__',
'__call_permissive__',
'__permission__',
'__predicated__',
'__predicates__',
'__accept__',
'__order__',
'__text__',
):
try:
setattr(wrapper, attr, getattr(view, attr))
except AttributeError:
pass
return wrapper
def mapped_view(view, info):
mapper = info.options.get('mapper')
if mapper is None:
mapper = getattr(view, '__view_mapper__', None)
if mapper is None:
mapper = info.registry.queryUtility(IViewMapperFactory)
if mapper is None:
mapper = DefaultViewMapper
mapped_view = mapper(**info.options)(view)
return mapped_view
mapped_view.options = ('mapper', 'attr')
def owrapped_view(view, info):
wrapper_viewname = info.options.get('wrapper')
viewname = info.options.get('name')
if not wrapper_viewname:
return view
def _owrapped_view(context, request):
response = view(context, request)
request.wrapped_response = response
request.wrapped_body = response.body
request.wrapped_view = view
wrapped_response = render_view_to_response(
context, request, wrapper_viewname
)
if wrapped_response is None:
raise ValueError(
'No wrapper view named %r found when executing view '
'named %r' % (wrapper_viewname, viewname)
)
return wrapped_response
return _owrapped_view
owrapped_view.options = ('name', 'wrapper')
def http_cached_view(view, info):
if info.settings.get('prevent_http_cache', False):
return view
seconds = info.options.get('http_cache')
if seconds is None:
return view
options = {}
if isinstance(seconds, (tuple, list)):
try:
seconds, options = seconds
except ValueError:
raise ConfigurationError(
'If http_cache parameter is a tuple or list, it must be '
'in the form (seconds, options); not %s' % (seconds,)
)
def wrapper(context, request):
response = view(context, request)
prevent_caching = getattr(
response.cache_control, 'prevent_auto', False
)
if not prevent_caching:
response.cache_expires(seconds, **options)
return response
return wrapper
http_cached_view.options = ('http_cache',)
def secured_view(view, info):
for wrapper in (_secured_view, _authdebug_view):
view = wraps_view(wrapper)(view, info)
return view
secured_view.options = ('permission',)
def _secured_view(view, info):
permission = info.options.get('permission')
if not info.exception_only and permission is None:
# no permission is specified on the view so we pull in the default
# permission - however if this is an exception view then we do not want
# to inherit the default permission by definition
permission = info.registry.queryUtility(IDefaultPermission)
if permission == NO_PERMISSION_REQUIRED:
# allow views registered within configurations that have a
# default permission to explicitly override the default
# permission, replacing it with no permission at all
permission = None
policy = info.registry.queryUtility(ISecurityPolicy)
if policy is None or permission is None:
# all security is disabled on this view if no policy or permission
return view
def permitted(context, request):
return policy.permits(request, context, permission)
def secured_view(context, request):
result = permitted(context, request)
if result:
return view(context, request)
view_name = getattr(view, '__name__', view)
msg = getattr(
request,
'authdebug_message',
'Unauthorized: %s failed permission check' % view_name,
)
raise HTTPForbidden(msg, result=result)
secured_view.__call_permissive__ = view
secured_view.__permitted__ = permitted
secured_view.__permission__ = permission
return secured_view
def _authdebug_view(view, info):
# XXX this view logic is slightly different from the _secured_view above
# because we want it to run in more situations than _secured_view - we are
# trying to log helpful info about basically any view that is executed -
# basically we only skip it if it's a default exception view with no
# special permissions
settings = info.settings
if not settings or not settings.get('debug_authorization', False):
# no-op if debug_authorization is disabled
return view
permission = info.options.get('permission')
if info.exception_only and (
permission is None or permission == NO_PERMISSION_REQUIRED
):
# no logging on any exception views with no permissions (the default)
return view
if permission is None:
# allow views registered within configurations that have a
# default permission to explicitly override the default
# permission, replacing it with no permission at all
permission = info.registry.queryUtility(IDefaultPermission)
policy = info.registry.queryUtility(ISecurityPolicy)
logger = info.registry.queryUtility(IDebugLogger)
def authdebug_view(context, request):
if policy:
if permission is NO_PERMISSION_REQUIRED:
msg = 'Allowed (NO_PERMISSION_REQUIRED)'
elif permission is None:
msg = 'Allowed (no permission registered)'
else:
result = policy.permits(request, context, permission)
msg = str(result)
else:
msg = 'Allowed (no security policy in use)'
view_name = getattr(request, 'view_name', None)
url = getattr(request, 'url', None)
msg = (
'debug_authorization of url %s (view name %r against '
'context %r): %s' % (url, view_name, context, msg)
)
if logger:
logger.debug(msg)
if request is not None:
request.authdebug_message = msg
return view(context, request)
return authdebug_view
def rendered_view(view, info):
# one way or another this wrapper must produce a Response (unless
# the renderer is a NullRendererHelper)
renderer = info.options.get('renderer')
if renderer is None:
# register a default renderer if you want super-dynamic
# rendering. registering a default renderer will also allow
# override_renderer to work if a renderer is left unspecified for
# a view registration.
def viewresult_to_response(context, request):
result = view(context, request)
if result.__class__ is Response: # common case
response = result
else:
response = info.registry.queryAdapterOrSelf(result, IResponse)
if response is None:
if result is None:
append = (
' You may have forgotten to return a value '
'from the view callable.'
)
elif isinstance(result, dict):
append = (
' You may have forgotten to define a '
'renderer in the view configuration.'
)
else:
append = ''
msg = (
'Could not convert return value of the view '
'callable %s into a response object. '
'The value returned was %r.' + append
)
raise ValueError(msg % (view_description(view), result))
return response
return viewresult_to_response
if renderer is renderers.null_renderer:
return view
def rendered_view(context, request):
result = view(context, request)
if result.__class__ is Response: # potential common case
response = result
else:
# this must adapt, it can't do a simple interface check
# (avoid trying to render webob responses)
response = info.registry.queryAdapterOrSelf(result, IResponse)
if response is None:
attrs = getattr(request, '__dict__', {})
if 'override_renderer' in attrs:
# renderer overridden by newrequest event or other
renderer_name = attrs.pop('override_renderer')
view_renderer = renderers.RendererHelper(
name=renderer_name,
package=info.package,
registry=info.registry,
)
else:
view_renderer = renderer.clone()
if '__view__' in attrs:
view_inst = attrs.pop('__view__')
else:
view_inst = getattr(view, '__original_view__', view)
response = view_renderer.render_view(
request, result, view_inst, context
)
return response
return rendered_view
rendered_view.options = ('renderer',)
def decorated_view(view, info):
decorator = info.options.get('decorator')
if decorator is None:
return view
return decorator(view)
decorated_view.options = ('decorator',)
def csrf_view(view, info):
explicit_val = info.options.get('require_csrf')
defaults = info.registry.queryUtility(IDefaultCSRFOptions)
if defaults is None:
default_val = False
token = 'csrf_token'
header = 'X-CSRF-Token'
safe_methods = frozenset(["GET", "HEAD", "OPTIONS", "TRACE"])
check_origin = True
allow_no_origin = False
callback = None
else:
default_val = defaults.require_csrf
token = defaults.token
header = defaults.header
safe_methods = defaults.safe_methods
check_origin = defaults.check_origin
allow_no_origin = defaults.allow_no_origin
callback = defaults.callback
enabled = (
explicit_val is True
or
# fallback to the default val if not explicitly enabled
# but only if the view is not an exception view
(explicit_val is not False and default_val and not info.exception_only)
)
# disable if both header and token are disabled
enabled = enabled and (token or header)
wrapped_view = view
if enabled:
def csrf_view(context, request):
if request.method not in safe_methods and (
callback is None or callback(request)
):
if check_origin:
check_csrf_origin(
request, raises=True, allow_no_origin=allow_no_origin
)
check_csrf_token(request, token, header, raises=True)
return view(context, request)
wrapped_view = csrf_view
return wrapped_view
csrf_view.options = ('require_csrf',)
VIEW = 'VIEW'
INGRESS = 'INGRESS'
| DefaultViewMapper |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/state.py | {
"start": 37512,
"end": 38576
} | class ____:
"""A writable placeholder for an unloaded collection.
Stores items appended to and removed from a collection that has not yet
been loaded. When the collection is loaded, the changes stored in
PendingCollection are applied to it to produce the final result.
"""
__slots__ = ("deleted_items", "added_items")
deleted_items: util.IdentitySet
added_items: util.OrderedIdentitySet
def __init__(self) -> None:
self.deleted_items = util.IdentitySet()
self.added_items = util.OrderedIdentitySet()
def merge_with_history(self, history: History) -> History:
return history._merge(self.added_items, self.deleted_items)
def append(self, value: Any) -> None:
if value in self.deleted_items:
self.deleted_items.remove(value)
else:
self.added_items.add(value)
def remove(self, value: Any) -> None:
if value in self.added_items:
self.added_items.remove(value)
else:
self.deleted_items.add(value)
| PendingCollection |
python | tiangolo__fastapi | docs_src/schema_extra_example/tutorial003_an_py310.py | {
"start": 114,
"end": 673
} | class ____(BaseModel):
name: str
description: str | None = None
price: float
tax: float | None = None
@app.put("/items/{item_id}")
async def update_item(
item_id: int,
item: Annotated[
Item,
Body(
examples=[
{
"name": "Foo",
"description": "A very nice Item",
"price": 35.4,
"tax": 3.2,
}
],
),
],
):
results = {"item_id": item_id, "item": item}
return results
| Item |
python | django__django | tests/multiple_database/models.py | {
"start": 1357,
"end": 1823
} | class ____(models.Model):
title = models.CharField(max_length=100)
published = models.DateField()
authors = models.ManyToManyField(Person)
editor = models.ForeignKey(
Person, models.SET_NULL, null=True, related_name="edited"
)
reviews = GenericRelation(Review)
pages = models.IntegerField(default=100)
objects = BookManager()
class Meta:
ordering = ("title",)
def __str__(self):
return self.title
| Book |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_inlinehilite.py | {
"start": 11344,
"end": 12195
} | class ____(util.MdCase):
"""Test custom InlineHilite cases."""
extension = [
'pymdownx.highlight',
'pymdownx.inlinehilite',
]
extension_configs = {
'pymdownx.inlinehilite': {
'css_class': 'inlinehilite',
'custom_inline': [
{
'name': 'math',
'class': 'arithmatex',
'format': arithmatex.arithmatex_inline_format(mode="mathjax", preview=True)
}
]
}
}
def test_preview_arithmatex(self):
"""Test preview Arithmatex."""
self.check_markdown(
r'`#!math 3 + 3`',
r'<p><span class="arithmatex"><span class="MathJax_Preview">3 + 3</span>'
r'<script type="math/tex">3 + 3</script></span></p>'
)
| TestInlineHiliteCustom2 |
python | TheAlgorithms__Python | sorts/external_sort.py | {
"start": 2182,
"end": 2867
} | class ____:
def __init__(self, merge_strategy):
self.merge_strategy = merge_strategy
def merge(self, filenames, outfilename, buffer_size):
buffers = FilesArray(self.get_file_handles(filenames, buffer_size))
with open(outfilename, "w", buffer_size) as outfile:
while buffers.refresh():
min_index = self.merge_strategy.select(buffers.get_dict())
outfile.write(buffers.unshift(min_index))
def get_file_handles(self, filenames, buffer_size):
files = {}
for i in range(len(filenames)):
files[i] = open(filenames[i], "r", buffer_size) # noqa: UP015
return files
| FileMerger |
python | streamlit__streamlit | lib/streamlit/errors.py | {
"start": 1741,
"end": 1915
} | class ____(Exception): # noqa: N818
"""An exception that is raised by the fragment
when it has handled the exception itself.
"""
pass
| FragmentHandledException |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 545398,
"end": 545719
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
node = sgqlc.types.Field("PushAllowance", graphql_name="node")
| PushAllowanceEdge |
python | huggingface__transformers | examples/modular-transformers/modular_my_new_model.py | {
"start": 151,
"end": 8464
} | class ____(LlamaConfig):
r"""
This is the configuration class to store the configuration of a [`MyNewModelModel`]. It is used to instantiate an MyNewModel
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the MyNewModel-7B.
e.g. [meta-my_new_model/MyNewModel-2-7b-hf](https://huggingface.co/meta-my_new_model/MyNewModel-2-7b-hf)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the MyNewModel model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`MyNewModelModel`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 11008):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with. MyNewModel 1 supports up to 2048 tokens,
MyNewModel 2 up to 4096, CodeLlama up to 16384.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 1):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 2):
End of stream token id.
pretraining_tp (`int`, *optional*, defaults to 1):
Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
document](https://huggingface.co/docs/transformers/main/perf_train_gpu_many#tensor-parallelism) to
understand more about it. This value is necessary to ensure exact reproducibility of the pretraining
results. Please refer to [this issue](https://github.com/pytorch/pytorch/issues/76232).
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
accordingly.
Expected contents:
`rope_type` (`str`):
The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
'my_new_model3'], with 'default' being the original RoPE implementation.
`factor` (`float`, *optional*):
Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
most scaling types, a `factor` of x will enable the model to handle sequences of length x *
original maximum pre-trained length.
`original_max_position_embeddings` (`int`, *optional*):
Used with 'dynamic', 'longrope' and 'my_new_model3'. The original max position embeddings used during
pretraining.
`attention_factor` (`float`, *optional*):
Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
computation. If unspecified, it defaults to value recommended by the implementation, using the
`factor` field to infer the suggested value.
`beta_fast` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
ramp function. If unspecified, it defaults to 32.
`beta_slow` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
ramp function. If unspecified, it defaults to 1.
`short_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to short contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`long_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to long contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`low_freq_factor` (`float`, *optional*):
Only used with 'my_new_model3'. Scaling factor applied to low frequency components of the RoPE
`high_freq_factor` (`float`, *optional*):
Only used with 'my_new_model3'. Scaling factor applied to high frequency components of the RoPE
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers.
head_dim (`int`, *optional*):
The attention head dimension. If None, it will default to hidden_size // num_attention_heads
```python
>>> from transformers import MyNewModelModel, MyNewModelConfig
>>> # Initializing a MyNewModel my_new_model-7b style configuration
>>> configuration = MyNewModelConfig()
>>> # Initializing a model from the my_new_model-7b style configuration
>>> model = MyNewModelModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
def __init__(self, mlp_bias=True, new_param=0, **super_kwargs):
super().__init__(self, **super_kwargs)
self.mlp_bias = mlp_bias
self.new_param = new_param
| MyNewModelConfig |
python | getsentry__sentry | src/sentry/seer/explorer/custom_tool_utils.py | {
"start": 301,
"end": 556
} | class ____(StrEnum):
"""Allowed parameter types for Explorer tools."""
STRING = "string"
INTEGER = "integer"
NUMBER = "number"
BOOLEAN = "boolean"
ARRAY = "array"
# Type specifications for different parameter types
| ExplorerParamType |
python | walkccc__LeetCode | solutions/110. Balanced Binary Tree/110-3.py | {
"start": 0,
"end": 534
} | class ____:
def isBalanced(self, root: TreeNode | None) -> bool:
def maxDepth(root: TreeNode | None) -> int:
"""Returns the height of root if root is balanced; otherwise, returns -1."""
if not root:
return 0
left = maxDepth(root.left)
if left == -1:
return -1
right = maxDepth(root.right)
if right == -1:
return -1
if abs(left - right) > 1:
return -1
return 1 + max(maxDepth(root.left), maxDepth(root.right))
return maxDepth(root) != -1
| Solution |
python | modin-project__modin | modin/experimental/core/storage_formats/pandas/parsers.py | {
"start": 4508,
"end": 5108
} | class ____(PandasParser):
@staticmethod
@doc(_doc_parse_func, parameters=_doc_parse_parameters_common)
def parse(fname, **kwargs):
warnings.filterwarnings("ignore")
num_splits = 1
single_worker_read = kwargs.pop("single_worker_read", None)
df = pandas.read_parquet(fname, **kwargs)
if single_worker_read:
return df
length = len(df)
width = len(df.columns)
return _split_result_for_readers(1, num_splits, df) + [length, width]
@doc(_doc_pandas_parser_class, data_type="json files")
| ExperimentalPandasParquetParser |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/cloud_sql.py | {
"start": 37205,
"end": 42620
} | class ____(CloudSQLBaseOperator):
"""
Export data from a Cloud SQL instance to a Cloud Storage bucket.
The exported format can be a SQL dump or CSV file.
Note: This operator is idempotent. If executed multiple times with the same
export file URI, the export file in GCS will simply be overridden.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudSQLExportInstanceOperator`
:param instance: Cloud SQL instance ID. This does not include the project ID.
:param body: The request body, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances/export#request-body
:param project_id: Optional, Google Cloud Project ID. If set to None or missing,
the default project_id from the Google Cloud connection is used.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param api_version: API version used (e.g. v1beta4).
:param validate_body: Whether the body should be validated. Defaults to True.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param deferrable: Run operator in the deferrable mode.
:param poke_interval: (Deferrable mode only) Time (seconds) to wait between calls
to check the run status.
"""
# [START gcp_sql_export_template_fields]
template_fields: Sequence[str] = (
"project_id",
"instance",
"body",
"gcp_conn_id",
"api_version",
"impersonation_chain",
)
# [END gcp_sql_export_template_fields]
ui_color = "#D4ECEA"
operator_extra_links = (CloudSQLInstanceLink(), FileDetailsLink())
def __init__(
self,
*,
instance: str,
body: dict,
project_id: str = PROVIDE_PROJECT_ID,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1beta4",
validate_body: bool = True,
impersonation_chain: str | Sequence[str] | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
poke_interval: int = 10,
**kwargs,
) -> None:
self.body = body
self.validate_body = validate_body
self.deferrable = deferrable
self.poke_interval = poke_interval
super().__init__(
project_id=project_id,
instance=instance,
gcp_conn_id=gcp_conn_id,
api_version=api_version,
impersonation_chain=impersonation_chain,
**kwargs,
)
def _validate_inputs(self) -> None:
super()._validate_inputs()
if not self.body:
raise AirflowException("The required parameter 'body' is empty")
def _validate_body_fields(self) -> None:
if self.validate_body:
GcpBodyFieldValidator(CLOUD_SQL_EXPORT_VALIDATION, api_version=self.api_version).validate(
self.body
)
def execute(self, context: Context) -> None:
self._validate_body_fields()
hook = CloudSQLHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
CloudSQLInstanceLink.persist(
context=context,
project_id=self.project_id or hook.project_id,
)
FileDetailsLink.persist(
context=context,
uri=self.body["exportContext"]["uri"][5:],
project_id=self.project_id or hook.project_id,
)
operation_name = hook.export_instance(
project_id=self.project_id, instance=self.instance, body=self.body
)
if not self.deferrable:
return hook._wait_for_operation_to_complete(
project_id=self.project_id, operation_name=operation_name
)
self.defer(
trigger=CloudSQLExportTrigger(
operation_name=operation_name,
project_id=self.project_id or hook.project_id,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
poke_interval=self.poke_interval,
),
method_name="execute_complete",
)
def execute_complete(self, context, event=None) -> None:
"""
Act as a callback for when the trigger fires - returns immediately.
Relies on trigger to throw an exception, otherwise it assumes execution was successful.
"""
if event["status"] == "success":
self.log.info("Operation %s completed successfully", event["operation_name"])
else:
self.log.exception("Unexpected error in the operation.")
raise AirflowException(event["message"])
| CloudSQLExportInstanceOperator |
python | rapidsai__cudf | python/cudf_polars/cudf_polars/experimental/rapidsmpf/io.py | {
"start": 1740,
"end": 21894
} | class ____:
"""
Linearizer that ensures ordered delivery from multiple concurrent producers.
Creates one input channel per producer and streams messages to output
in sequence-number order, buffering only out-of-order arrivals.
"""
def __init__(
self, context: Context, ch_out: Channel[TableChunk], num_producers: int
):
self.context = context
self.ch_out = ch_out
self.num_producers = num_producers
self.input_channels = [context.create_channel() for _ in range(num_producers)]
async def drain(self) -> None:
"""
Drain producer channels and forward messages in sequence-number order.
Streams messages to output as soon as they arrive in order, buffering
only out-of-order messages to minimize memory pressure.
"""
next_seq = 0
buffer = {}
pending_tasks = {
asyncio.create_task(ch.recv(self.context)): ch for ch in self.input_channels
}
while pending_tasks:
done, _ = await asyncio.wait(
pending_tasks.keys(), return_when=asyncio.FIRST_COMPLETED
)
for task in done:
ch = pending_tasks.pop(task)
msg = await task
if msg is not None:
buffer[msg.sequence_number] = msg
new_task = asyncio.create_task(ch.recv(self.context))
pending_tasks[new_task] = ch
# Forward consecutive messages
while next_seq in buffer:
await self.ch_out.send(self.context, buffer.pop(next_seq))
next_seq += 1
# Forward any remaining buffered messages
for seq in sorted(buffer.keys()):
await self.ch_out.send(self.context, buffer[seq])
await self.ch_out.drain(self.context)
@lower_ir_node.register(DataFrameScan)
def _(
ir: DataFrameScan, rec: LowerIRTransformer
) -> tuple[IR, MutableMapping[IR, PartitionInfo]]:
config_options = rec.state["config_options"]
assert config_options.executor.name == "streaming", (
"'in-memory' executor not supported in 'lower_ir_node_rapidsmpf'"
)
# NOTE: We calculate the expected partition count
# to help trigger fallback warnings in lower_ir_graph.
# The generate_ir_sub_network logic is NOT required
# to obey this partition count. However, the count
# WILL match after an IO operation (for now).
rows_per_partition = config_options.executor.max_rows_per_partition
nrows = max(ir.df.shape()[0], 1)
count = math.ceil(nrows / rows_per_partition)
return ir, {ir: PartitionInfo(count=count)}
@define_py_node()
async def dataframescan_node(
context: Context,
ir: DataFrameScan,
ir_context: IRExecutionContext,
ch_out: ChannelPair,
*,
num_producers: int,
rows_per_partition: int,
) -> None:
"""
DataFrameScan node for rapidsmpf.
Parameters
----------
context
The rapidsmpf context.
ir
The DataFrameScan node.
ir_context
The execution context for the IR node.
ch_out
The output ChannelPair.
num_producers
The number of producers to use for the DataFrameScan node.
rows_per_partition
The number of rows per partition.
"""
async with shutdown_on_error(context, ch_out.metadata, ch_out.data):
# Find local partition count.
nrows = max(ir.df.shape()[0], 1)
global_count = math.ceil(nrows / rows_per_partition)
# For single rank, simplify the logic
if context.comm().nranks == 1:
local_count = global_count
local_offset = 0
else:
local_count = math.ceil(global_count / context.comm().nranks)
local_offset = local_count * context.comm().rank
# Send basic metadata
await ch_out.send_metadata(context, Metadata(local_count))
# Build list of IR slices to read
ir_slices = []
for seq_num in range(local_count):
offset = local_offset * rows_per_partition + seq_num * rows_per_partition
if offset >= nrows:
break
ir_slices.append(
DataFrameScan(
ir.schema,
ir.df.slice(offset, rows_per_partition),
ir.projection,
)
)
# If there are no slices, send an empty chunk for now.
# TODO: We shouldn't need to do this.
if len(ir_slices) == 0:
stream = ir_context.get_cuda_stream()
await ch_out.data.send(
context,
Message(
0,
empty_table_chunk(ir, context, stream),
),
)
await ch_out.data.drain(context)
return
# Use Lineariser to ensure ordered delivery
num_producers = min(num_producers, len(ir_slices))
lineariser = Lineariser(context, ch_out.data, num_producers)
# Assign tasks to producers using round-robin
producer_tasks: list[list[tuple[int, DataFrameScan]]] = [
[] for _ in range(num_producers)
]
for task_idx, ir_slice in enumerate(ir_slices):
producer_id = task_idx % num_producers
producer_tasks[producer_id].append((task_idx, ir_slice))
async def _producer(producer_id: int, ch_out: Channel) -> None:
for task_idx, ir_slice in producer_tasks[producer_id]:
await read_chunk(
context,
ir_slice,
task_idx,
ch_out,
ir_context,
)
await ch_out.drain(context)
tasks = [lineariser.drain()]
tasks.extend(
_producer(i, ch_in) for i, ch_in in enumerate(lineariser.input_channels)
)
await asyncio.gather(*tasks)
@generate_ir_sub_network.register(DataFrameScan)
def _(
ir: DataFrameScan, rec: SubNetGenerator
) -> tuple[dict[IR, list[Any]], dict[IR, ChannelManager]]:
config_options = rec.state["config_options"]
assert config_options.executor.name == "streaming", (
"'in-memory' executor not supported in 'generate_ir_sub_network'"
)
rows_per_partition = config_options.executor.max_rows_per_partition
num_producers = rec.state["max_io_threads"]
context = rec.state["context"]
ir_context = rec.state["ir_context"]
channels: dict[IR, ChannelManager] = {ir: ChannelManager(rec.state["context"])}
nodes: dict[IR, list[Any]] = {
ir: [
dataframescan_node(
context,
ir,
ir_context,
channels[ir].reserve_input_slot(),
num_producers=num_producers,
rows_per_partition=rows_per_partition,
)
]
}
return nodes, channels
@lower_ir_node.register(Scan)
def _(
ir: Scan, rec: LowerIRTransformer
) -> tuple[IR, MutableMapping[IR, PartitionInfo]]:
config_options = rec.state["config_options"]
if (
ir.typ in ("csv", "parquet", "ndjson")
and ir.n_rows == -1
and ir.skip_rows == 0
and ir.row_index is None
):
# NOTE: We calculate the expected partition count
# to help trigger fallback warnings in lower_ir_graph.
# The generate_ir_sub_network logic is NOT required
# to obey this partition count. However, the count
# WILL match after an IO operation (for now).
plan = scan_partition_plan(ir, rec.state["stats"], config_options)
paths = list(ir.paths)
if plan.flavor == IOPartitionFlavor.SPLIT_FILES:
count = plan.factor * len(paths)
else:
count = math.ceil(len(paths) / plan.factor)
return ir, {ir: PartitionInfo(count=count, io_plan=plan)}
else:
plan = IOPartitionPlan(
flavor=IOPartitionFlavor.SINGLE_READ, factor=len(ir.paths)
)
return ir, {ir: PartitionInfo(count=1, io_plan=plan)}
async def read_chunk(
context: Context,
scan: IR,
seq_num: int,
ch_out: Channel[TableChunk],
ir_context: IRExecutionContext,
) -> None:
"""
Read a chunk from disk and send it to the output channel.
Parameters
----------
context
The rapidsmpf context.
scan
The Scan or DataFrameScan node.
seq_num
The sequence number.
ch_out
The output channel.
ir_context
The execution context for the IR node.
"""
# Evaluate and send the Scan-node result
df = await asyncio.to_thread(
scan.do_evaluate,
*scan._non_child_args,
context=ir_context,
)
await ch_out.send(
context,
Message(
seq_num,
TableChunk.from_pylibcudf_table(
df.table,
df.stream,
exclusive_view=True,
),
),
)
@define_py_node()
async def scan_node(
context: Context,
ir: Scan,
ir_context: IRExecutionContext,
ch_out: ChannelPair,
*,
num_producers: int,
plan: IOPartitionPlan,
parquet_options: ParquetOptions,
) -> None:
"""
Scan node for rapidsmpf.
Parameters
----------
context
The rapidsmpf context.
ir
The Scan node.
ir_context
The execution context for the IR node.
ch_out
The output ChannelPair.
num_producers
The number of producers to use for the scan node.
plan
The partitioning plan.
parquet_options
The Parquet options.
"""
async with shutdown_on_error(context, ch_out.metadata, ch_out.data):
# Build a list of local Scan operations
scans: list[Scan | SplitScan] = []
if plan.flavor == IOPartitionFlavor.SPLIT_FILES:
count = plan.factor * len(ir.paths)
local_count = math.ceil(count / context.comm().nranks)
local_offset = local_count * context.comm().rank
path_offset = local_offset // plan.factor
path_end = math.ceil((local_offset + local_count) / plan.factor)
path_count = path_end - path_offset
local_paths = ir.paths[path_offset : path_offset + path_count]
sindex = local_offset % plan.factor
splits_created = 0
for path in local_paths:
base_scan = Scan(
ir.schema,
ir.typ,
ir.reader_options,
ir.cloud_options,
[path],
ir.with_columns,
ir.skip_rows,
ir.n_rows,
ir.row_index,
ir.include_file_paths,
ir.predicate,
parquet_options,
)
while sindex < plan.factor and splits_created < local_count:
scans.append(
SplitScan(
ir.schema,
base_scan,
sindex,
plan.factor,
parquet_options,
)
)
sindex += 1
splits_created += 1
sindex = 0
else:
count = math.ceil(len(ir.paths) / plan.factor)
local_count = math.ceil(count / context.comm().nranks)
local_offset = local_count * context.comm().rank
paths_offset_start = local_offset * plan.factor
paths_offset_end = paths_offset_start + plan.factor * local_count
for offset in range(paths_offset_start, paths_offset_end, plan.factor):
local_paths = ir.paths[offset : offset + plan.factor]
if len(local_paths) > 0: # Only add scan if there are paths
scans.append(
Scan(
ir.schema,
ir.typ,
ir.reader_options,
ir.cloud_options,
local_paths,
ir.with_columns,
ir.skip_rows,
ir.n_rows,
ir.row_index,
ir.include_file_paths,
ir.predicate,
parquet_options,
)
)
# Send basic metadata
await ch_out.send_metadata(context, Metadata(max(1, len(scans))))
# If there are no scans, send an empty chunk for now.
# TODO: We shouldn't need to do this.
if len(scans) == 0:
stream = ir_context.get_cuda_stream()
await ch_out.data.send(
context,
Message(
0,
empty_table_chunk(ir, context, stream),
),
)
await ch_out.data.drain(context)
return
# Use Lineariser to ensure ordered delivery
num_producers = min(num_producers, len(scans))
lineariser = Lineariser(context, ch_out.data, num_producers)
# Assign tasks to producers using round-robin
producer_tasks: list[list[tuple[int, Scan | SplitScan]]] = [
[] for _ in range(num_producers)
]
for task_idx, scan in enumerate(scans):
producer_id = task_idx % num_producers
producer_tasks[producer_id].append((task_idx, scan))
async def _producer(producer_id: int, ch_out: Channel) -> None:
for task_idx, scan in producer_tasks[producer_id]:
await read_chunk(
context,
scan,
task_idx,
ch_out,
ir_context,
)
await ch_out.drain(context)
tasks = [lineariser.drain()]
tasks.extend(
_producer(i, ch_in) for i, ch_in in enumerate(lineariser.input_channels)
)
await asyncio.gather(*tasks)
def make_rapidsmpf_read_parquet_node(
context: Context,
ir: Scan,
num_producers: int,
ch_out: ChannelPair,
stats: StatsCollector,
partition_info: PartitionInfo,
) -> Any | None:
"""
Make a RapidsMPF read parquet node.
Parameters
----------
context
The rapidsmpf context.
ir
The Scan node.
num_producers
The number of producers to use for the scan node.
ch_out
The output ChannelPair.
stats
The statistics collector.
partition_info
The partition information.
Returns
-------
The RapidsMPF read parquet node, or None if the predicate cannot be
converted to a parquet filter (caller should fall back to scan_node).
"""
from rapidsmpf.streaming.cudf.parquet import Filter, read_parquet
# Build ParquetReaderOptions
try:
stream = context.get_stream_from_pool()
parquet_reader_options = plc.io.parquet.ParquetReaderOptions.builder(
plc.io.SourceInfo(ir.paths)
).build()
if ir.with_columns is not None:
parquet_reader_options.set_columns(ir.with_columns)
# Build predicate filter if present (passed separately to read_parquet)
filter_obj = None
if ir.predicate is not None:
filter_expr = to_parquet_filter(
_cast_literals_to_physical_types(
ir.predicate.value,
_parquet_physical_types(
ir.schema,
ir.paths,
ir.with_columns or list(ir.schema.keys()),
stream,
),
),
stream=stream,
)
if filter_expr is None:
# Predicate cannot be converted to parquet filter
# Return None to signal fallback to scan_node
return None
filter_obj = Filter(stream, filter_expr)
except Exception as e:
raise ValueError(f"Failed to build ParquetReaderOptions: {e}") from e
# Calculate num_rows_per_chunk from statistics
# Default to a reasonable chunk size if statistics are unavailable
estimated_row_count: ColumnStat[int] | None = stats.row_count.get(ir)
if estimated_row_count is None:
for cs in stats.column_stats.get(ir, {}).values():
if cs.source_info.row_count.value is not None:
estimated_row_count = cs.source_info.row_count
break
if estimated_row_count is not None and estimated_row_count.value is not None:
num_rows_per_chunk = int(
max(1, estimated_row_count.value // partition_info.count)
)
else:
# Fallback: use a default chunk size if statistics are not available
num_rows_per_chunk = 1_000_000 # 1 million rows as default
# Validate inputs
if num_rows_per_chunk <= 0:
raise ValueError(f"Invalid num_rows_per_chunk: {num_rows_per_chunk}")
if num_producers <= 0:
raise ValueError(f"Invalid num_producers: {num_producers}")
try:
return read_parquet(
context,
ch_out.data,
num_producers,
parquet_reader_options,
num_rows_per_chunk,
filter=filter_obj,
)
except Exception as e:
raise RuntimeError(
f"Failed to create read_parquet node: {e}\n"
f" paths: {ir.paths}\n"
f" num_producers: {num_producers}\n"
f" num_rows_per_chunk: {num_rows_per_chunk}\n"
f" partition_count: {partition_info.count}\n"
f" filter: {filter_obj}"
) from e
@generate_ir_sub_network.register(Scan)
def _(
ir: Scan, rec: SubNetGenerator
) -> tuple[dict[IR, list[Any]], dict[IR, ChannelManager]]:
config_options = rec.state["config_options"]
assert config_options.executor.name == "streaming", (
"'in-memory' executor not supported in 'generate_ir_sub_network'"
)
parquet_options = config_options.parquet_options
partition_info = rec.state["partition_info"][ir]
num_producers = rec.state["max_io_threads"]
channels: dict[IR, ChannelManager] = {ir: ChannelManager(rec.state["context"])}
assert partition_info.io_plan is not None, "Scan node must have a partition plan"
plan: IOPartitionPlan = partition_info.io_plan
# Use rapidsmpf native read_parquet for multi-partition Parquet scans.
ch_pair = channels[ir].reserve_input_slot()
nodes: dict[IR, list[Any]] = {}
native_node: Any = None
if (
partition_info.count > 1
and ir.typ == "parquet"
and ir.row_index is None
and ir.include_file_paths is None
and ir.n_rows == -1
and ir.skip_rows == 0
):
native_node = make_rapidsmpf_read_parquet_node(
rec.state["context"],
ir,
num_producers,
ch_pair,
rec.state["stats"],
partition_info,
)
if native_node is not None:
# Need metadata node, because the native read_parquet
# node does not send metadata.
metadata_node = metadata_feeder_node(
rec.state["context"],
ch_pair,
Metadata(partition_info.count),
)
nodes[ir] = [metadata_node, native_node]
else:
# Fall back to scan_node (predicate not convertible, or other constraint)
parquet_options = dataclasses.replace(parquet_options, chunked=False)
nodes[ir] = [
scan_node(
rec.state["context"],
ir,
rec.state["ir_context"],
ch_pair,
num_producers=num_producers,
plan=plan,
parquet_options=parquet_options,
)
]
return nodes, channels
| Lineariser |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/concepts/assets/asset_config.py | {
"start": 69,
"end": 283
} | class ____(Config):
api_endpoint: str
@asset
def my_downstream_asset(config: MyDownstreamAssetConfig):
data = requests.get(f"{config.api_endpoint}/data").json()
...
# end_example
| MyDownstreamAssetConfig |
python | openai__openai-python | src/openai/types/beta/thread.py | {
"start": 1097,
"end": 2132
} | class ____(BaseModel):
id: str
"""The identifier, which can be referenced in API endpoints."""
created_at: int
"""The Unix timestamp (in seconds) for when the thread was created."""
metadata: Optional[Metadata] = None
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
structured format, and querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
"""
object: Literal["thread"]
"""The object type, which is always `thread`."""
tool_resources: Optional[ToolResources] = None
"""
A set of resources that are made available to the assistant's tools in this
thread. The resources are specific to the type of tool. For example, the
`code_interpreter` tool requires a list of file IDs, while the `file_search`
tool requires a list of vector store IDs.
"""
| Thread |
python | falconry__falcon | falcon/redirects.py | {
"start": 1647,
"end": 2589
} | class ____(HTTPStatus):
"""302 Found.
The 302 (Found) status code indicates that the target resource
resides temporarily under a different URI. Since the redirection
might be altered on occasion, the client ought to continue to use the
effective request URI for future requests.
Note:
For historical reasons, a user agent MAY change the request
method from POST to GET for the subsequent request. If this
behavior is undesired, the 307 (Temporary Redirect) status code
can be used instead.
(See also: RFC 7231, Section 6.4.3)
Args:
location (str): URI to provide as the Location header in the
response.
"""
def __init__(self, location: str, headers: Headers | None = None) -> None:
if headers is None:
headers = {}
headers.setdefault('location', location)
super().__init__(falcon.HTTP_302, headers)
| HTTPFound |
python | numpy__numpy | tools/swig/test/testArray.py | {
"start": 8739,
"end": 13051
} | class ____(unittest.TestCase):
def setUp(self):
self.length = 5
self.array3 = Array.ArrayZ(self.length)
def testConstructor0(self):
"Test ArrayZ default constructor"
a = Array.ArrayZ()
self.assertTrue(isinstance(a, Array.ArrayZ))
self.assertTrue(len(a) == 0)
def testConstructor1(self):
"Test ArrayZ length constructor"
self.assertTrue(isinstance(self.array3, Array.ArrayZ))
def testConstructor2(self):
"Test ArrayZ array constructor"
na = np.arange(self.length, dtype=np.complex128)
aa = Array.ArrayZ(na)
self.assertTrue(isinstance(aa, Array.ArrayZ))
def testConstructor3(self):
"Test ArrayZ copy constructor"
for i in range(self.array3.length()):
self.array3[i] = complex(i, -i)
arrayCopy = Array.ArrayZ(self.array3)
self.assertTrue(arrayCopy == self.array3)
def testConstructorBad(self):
"Test ArrayZ length constructor, negative"
self.assertRaises(ValueError, Array.ArrayZ, -4)
def testLength(self):
"Test ArrayZ length method"
self.assertTrue(self.array3.length() == self.length)
def testLen(self):
"Test ArrayZ __len__ method"
self.assertTrue(len(self.array3) == self.length)
def testResize0(self):
"Test ArrayZ resize method, length"
newLen = 2 * self.length
self.array3.resize(newLen)
self.assertTrue(len(self.array3) == newLen)
def testResize1(self):
"Test ArrayZ resize method, array"
a = np.zeros((2 * self.length,), dtype=np.complex128)
self.array3.resize(a)
self.assertTrue(len(self.array3) == a.size)
def testResizeBad(self):
"Test ArrayZ resize method, negative length"
self.assertRaises(ValueError, self.array3.resize, -5)
def testSetGet(self):
"Test ArrayZ __setitem__, __getitem__ methods"
n = self.length
for i in range(n):
self.array3[i] = i * i
for i in range(n):
self.assertTrue(self.array3[i] == i * i)
def testSetBad1(self):
"Test ArrayZ __setitem__ method, negative index"
self.assertRaises(IndexError, self.array3.__setitem__, -1, 0)
def testSetBad2(self):
"Test ArrayZ __setitem__ method, out-of-range index"
self.assertRaises(IndexError, self.array3.__setitem__, self.length + 1, 0)
def testGetBad1(self):
"Test ArrayZ __getitem__ method, negative index"
self.assertRaises(IndexError, self.array3.__getitem__, -1)
def testGetBad2(self):
"Test ArrayZ __getitem__ method, out-of-range index"
self.assertRaises(IndexError, self.array3.__getitem__, self.length + 1)
def testAsString(self):
"Test ArrayZ asString method"
for i in range(self.array3.length()):
self.array3[i] = complex(i + 1, -i - 1)
self.assertTrue(self.array3.asString() ==
"[ (1,-1), (2,-2), (3,-3), (4,-4), (5,-5) ]")
def testStr(self):
"Test ArrayZ __str__ method"
for i in range(self.array3.length()):
self.array3[i] = complex(i - 2, (i - 2) * 2)
self.assertTrue(str(self.array3) == "[ (-2,-4), (-1,-2), (0,0), (1,2), (2,4) ]")
def testView(self):
"Test ArrayZ view method"
for i in range(self.array3.length()):
self.array3[i] = complex(i + 1, i + 2)
a = self.array3.view()
self.assertTrue(isinstance(a, np.ndarray))
self.assertTrue(len(a) == self.length)
self.assertTrue((a == [1 + 2j, 2 + 3j, 3 + 4j, 4 + 5j, 5 + 6j]).all())
######################################################################
if __name__ == "__main__":
# Build the test suite
suite = unittest.TestSuite()
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(Array1TestCase))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(Array2TestCase))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(ArrayZTestCase))
# Execute the test suite
print("Testing Classes of Module Array")
print("NumPy version", np.__version__)
print()
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(bool(result.errors + result.failures))
| ArrayZTestCase |
python | doocs__leetcode | solution/0100-0199/0123.Best Time to Buy and Sell Stock III/Solution.py | {
"start": 0,
"end": 350
} | class ____:
def maxProfit(self, prices: List[int]) -> int:
# 第一次买入,第一次卖出,第二次买入,第二次卖出
f1, f2, f3, f4 = -prices[0], 0, -prices[0], 0
for price in prices[1:]:
f1 = max(f1, -price)
f2 = max(f2, f1 + price)
f3 = max(f3, f2 - price)
f4 = max(f4, f3 + price)
return f4
| Solution |
python | ray-project__ray | doc/source/custom_directives.py | {
"start": 2714,
"end": 4998
} | class ____:
"""
This class downloads markdown readme files for various
ecosystem libraries, saves them in specified locations and preprocesses
them before sphinx build starts.
If you have ecosystem libraries that live in a separate repo from Ray,
adding them here will allow for their docs to be present in Ray docs
without the need for duplicate files. For more details, see ``doc/README.md``.
"""
def __init__(self, base_path: str) -> None:
self.base_path = pathlib.Path(base_path).absolute()
assert self.base_path.is_dir()
self.original_docs = {}
@staticmethod
def get_latest_release_tag(repo: str) -> str:
"""repo is just the repo name, eg. ray-project/ray"""
response = requests.get(f"https://api.github.com/repos/{repo}/releases/latest")
return response.json()["tag_name"]
@staticmethod
def get_file_from_github(
repo: str, ref: str, path_to_get_from_repo: str, path_to_save_on_disk: str
) -> None:
"""If ``ref == "latest"``, use latest release"""
if ref == "latest":
ref = DownloadAndPreprocessEcosystemDocs.get_latest_release_tag(repo)
urllib.request.urlretrieve(
f"https://raw.githubusercontent.com/{repo}/{ref}/{path_to_get_from_repo}",
path_to_save_on_disk,
)
def save_original_doc(self, path: str):
with open(path, "r") as f:
self.original_docs[path] = f.read()
def write_new_docs(self, *args, **kwargs):
for (
repo,
ref,
path_to_get_from_repo,
path_to_save_on_disk,
) in EXTERNAL_MARKDOWN_FILES:
path_to_save_on_disk = self.base_path.joinpath(path_to_save_on_disk)
self.save_original_doc(path_to_save_on_disk)
self.get_file_from_github(
repo, ref, path_to_get_from_repo, path_to_save_on_disk
)
preprocess_github_markdown_file(path_to_save_on_disk)
def write_original_docs(self, *args, **kwargs):
for path, content in self.original_docs.items():
with open(path, "w") as f:
f.write(content)
def __call__(self):
self.write_new_docs()
| DownloadAndPreprocessEcosystemDocs |
python | pytorch__pytorch | torch/_inductor/ir.py | {
"start": 341843,
"end": 342857
} | class ____(_CollectiveKernel):
def __init__(
self,
layout: OutputSpec,
kernel: _OpOverloads,
tensor_args: Sequence[IRNode],
nontensor_args: Sequence[Any],
unflatten_args: Callable[..., Any],
kwargs: Optional[dict[str, Any]] = None,
*,
unbacked_bindings: Optional[dict[sympy.Symbol, pytree.KeyPath]] = None,
) -> None:
super().__init__(
layout,
kernel,
tensor_args,
nontensor_args,
unflatten_args,
kwargs=None,
unbacked_bindings=unbacked_bindings,
)
self.set_cpp_kernel_name("aoti_torch_cpu__c10d_functional_all_reduce_")
def codegen(self, wrapper: PythonWrapperCodegen) -> None:
wrapper.include_extra_header("torch/csrc/inductor/aoti_torch/c/shim_cpu.h")
wrapper.generate_extern_kernel_alloc(self)
if isinstance(self.layout, Layout):
self.codegen_size_asserts(wrapper)
| _AllReduce_Kernel |
python | jina-ai__jina | jina/serve/runtimes/gateway/http/__init__.py | {
"start": 274,
"end": 427
} | class ____(HTTPServer, BaseGateway):
"""
:class:`HTTPGateway` is a FastAPIBaseGateway that uses the default FastAPI app
"""
pass
| HTTPGateway |
python | run-llama__llama_index | llama-index-core/llama_index/core/llms/mock.py | {
"start": 3151,
"end": 5068
} | class ____(MockLLM):
"""
Mock LLM that keeps track of chat messages of function calls.
The idea behind this is to be able to easily checks whether the right messages would have been passed to an actual LLM.
"""
last_chat_messages: Optional[Sequence[ChatMessage]] = Field(
default=None, exclude=True
)
last_called_chat_function: List[str] = Field(default=[], exclude=True)
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
r = super().chat(copy.deepcopy(messages), **kwargs)
self.last_chat_messages = messages
self.last_called_chat_function.append("chat")
return r
@llm_chat_callback()
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
r = super().stream_chat(copy.deepcopy(messages), **kwargs)
self.last_chat_messages = messages
self.last_called_chat_function.append("stream_chat")
return r
@llm_chat_callback()
async def achat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponse:
r = await super().achat(copy.deepcopy(messages), **kwargs)
self.last_chat_messages = messages
self.last_called_chat_function.append("achat")
return r
@llm_chat_callback()
async def astream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
r = await super().astream_chat(copy.deepcopy(messages), **kwargs)
self.last_chat_messages = messages
self.last_called_chat_function.append("astream_chat")
return r
def reset_memory(self) -> None:
self.last_chat_messages = None
self.last_called_chat_function = []
@classmethod
def class_name(cls) -> str:
return "MockLLMWithChatMemoryOfLastCall"
| MockLLMWithChatMemoryOfLastCall |
python | astropy__astropy | astropy/uncertainty/tests/test_containers.py | {
"start": 4662,
"end": 6061
} | class ____:
@classmethod
def setup_class(cls):
cls.lon = Distribution(np.linspace(0.0, 360 * u.deg, 10, endpoint=False))
cls.lat = Angle([-45.0, 0.0, 45.0], u.deg)
cls.r = 6000 * u.km # Sort of OK for Geodetic representations.
cls.sph = SphericalRepresentation(
cls.lon.distribution, cls.lat[:, np.newaxis], cls.r
)
cls.dsph = SphericalRepresentation(cls.lon, cls.lat, cls.r)
def get_distribution(self, rep):
return rep._apply(lambda x: getattr(x, "distribution", x[..., np.newaxis]))
def test_cartesian(self):
dcart = self.dsph.to_cartesian()
cart = self.sph.to_cartesian()
assert isinstance(dcart.x, Distribution)
assert_array_equal(dcart.x.distribution, cart.x)
assert_array_equal(dcart.y.distribution, cart.y)
assert_array_equal(dcart.z.distribution, cart.z)
def test_cartesian_roundtrip(self):
roundtrip = SphericalRepresentation.from_cartesian(self.dsph.to_cartesian())
assert_representation_allclose(self.get_distribution(roundtrip), self.sph)
@pytest.mark.parametrize("rep_cls", REPRESENTATION_CLASSES.values())
def test_other_reps(self, rep_cls):
drep = self.dsph.represent_as(rep_cls)
rep = self.sph.represent_as(rep_cls)
assert_representation_equal(self.get_distribution(drep), rep)
| TestRepresentation |
python | sphinx-doc__sphinx | sphinx/builders/epub3.py | {
"start": 1711,
"end": 12745
} | class ____(_epub_base.EpubBuilder):
"""Builder that outputs epub3 files.
It creates the metainfo files content.opf, nav.xhtml, toc.ncx, mimetype,
and META-INF/container.xml. Afterwards, all necessary files are zipped to
an epub file.
"""
name = 'epub'
epilog = __('The ePub file is in %(outdir)s.')
supported_remote_images = False
template_dir = package_dir.joinpath('templates', 'epub3')
doctype = DOCTYPE
html_tag = HTML_TAG
use_meta_charset = True
# Finish by building the epub file
def handle_finish(self) -> None:
"""Create the metainfo files and finally the epub."""
self.get_toc()
self.build_mimetype()
self.build_container()
self.build_content()
self.build_navigation_doc()
self.build_toc()
self.build_epub()
def content_metadata(self) -> dict[str, Any]:
"""Create a dictionary with all metadata for the content.opf
file properly escaped.
"""
writing_mode = self.config.epub_writing_mode
if (source_date_epoch := os.getenv('SOURCE_DATE_EPOCH')) is not None:
time_tuple = time.gmtime(int(source_date_epoch))
else:
time_tuple = time.gmtime()
metadata = super().content_metadata()
metadata['description'] = html.escape(self.config.epub_description)
metadata['contributor'] = html.escape(self.config.epub_contributor)
metadata['page_progression_direction'] = PAGE_PROGRESSION_DIRECTIONS.get(
writing_mode
)
metadata['ibook_scroll_axis'] = IBOOK_SCROLL_AXIS.get(writing_mode)
metadata['date'] = html.escape(time.strftime('%Y-%m-%dT%H:%M:%SZ', time_tuple))
metadata['version'] = html.escape(self.config.version)
metadata['epub_version'] = self.config.epub_version
return metadata
def prepare_writing(self, docnames: Set[str]) -> None:
super().prepare_writing(docnames)
writing_mode = self.config.epub_writing_mode
self.globalcontext['theme_writing_mode'] = THEME_WRITING_MODES.get(writing_mode)
self.globalcontext['html_tag'] = self.html_tag
self.globalcontext['use_meta_charset'] = self.use_meta_charset
self.globalcontext['skip_ua_compatible'] = True
def build_navlist(self, navnodes: list[dict[str, Any]]) -> list[NavPoint]:
"""Create the toc navigation structure.
This method is almost same as build_navpoints method in epub.py.
This is because the logical navigation structure of epub3 is not
different from one of epub2.
The difference from build_navpoints method is templates which are used
when generating navigation documents.
"""
navstack: list[NavPoint] = [NavPoint('', '', [])]
level = 0
for node in navnodes:
if not node['text']:
continue
file = node['refuri'].split('#')[0]
if file in self.ignored_files:
continue
if node['level'] > self.config.epub_tocdepth:
continue
navpoint = NavPoint(node['text'], node['refuri'], [])
if node['level'] == level:
navstack.pop()
navstack[-1].children.append(navpoint)
navstack.append(navpoint)
elif node['level'] == level + 1:
level += 1
navstack[-1].children.append(navpoint)
navstack.append(navpoint)
elif node['level'] < level:
while node['level'] < len(navstack):
navstack.pop()
level = node['level']
navstack[-1].children.append(navpoint)
navstack.append(navpoint)
else:
unreachable = 'Should never reach here. It might be a bug.'
raise RuntimeError(unreachable)
return navstack[0].children
def navigation_doc_metadata(self, navlist: list[NavPoint]) -> dict[str, Any]:
"""Create a dictionary with all metadata for the nav.xhtml file
properly escaped.
"""
return {
'lang': html.escape(self.config.epub_language),
'toc_locale': html.escape(self.guide_titles['toc']),
'navlist': navlist,
}
def build_navigation_doc(self) -> None:
"""Write the metainfo file nav.xhtml."""
logger.info(__('writing nav.xhtml file...'))
if self.config.epub_tocscope == 'default':
doctree = self.env.get_and_resolve_doctree(
self.config.root_doc,
self,
tags=self.tags,
prune_toctrees=False,
includehidden=False,
)
refnodes = self.get_refnodes(doctree, [])
self.toc_add_files(refnodes)
else:
# 'includehidden'
refnodes = self.refnodes
navlist = self.build_navlist(refnodes)
copy_asset_file(
self.template_dir / 'nav.xhtml.jinja',
self.outdir,
context=self.navigation_doc_metadata(navlist),
force=True,
)
# Add nav.xhtml to epub file
if 'nav.xhtml' not in self.files:
self.files.append('nav.xhtml')
def validate_config_values(app: Sphinx) -> None:
if app.builder.name != 'epub':
return
# <package> lang attribute, dc:language
if not app.config.epub_language:
logger.warning(
__(
'conf value "epub_language" (or "language") '
'should not be empty for EPUB3'
)
)
# <package> unique-identifier attribute
if not _XML_NAME_PATTERN.match(app.config.epub_uid):
logger.warning(__('conf value "epub_uid" should be XML NAME for EPUB3'))
# dc:title
if not app.config.epub_title:
logger.warning(
__(
'conf value "epub_title" (or "html_title") '
'should not be empty for EPUB3'
)
)
# dc:creator
if not app.config.epub_author:
logger.warning(__('conf value "epub_author" should not be empty for EPUB3'))
# dc:contributor
if not app.config.epub_contributor:
logger.warning(
__('conf value "epub_contributor" should not be empty for EPUB3')
)
# dc:description
if not app.config.epub_description:
logger.warning(
__('conf value "epub_description" should not be empty for EPUB3')
)
# dc:publisher
if not app.config.epub_publisher:
logger.warning(__('conf value "epub_publisher" should not be empty for EPUB3'))
# dc:rights
if not app.config.epub_copyright:
logger.warning(
__(
'conf value "epub_copyright" (or "copyright")'
'should not be empty for EPUB3'
)
)
# dc:identifier
if not app.config.epub_identifier:
logger.warning(__('conf value "epub_identifier" should not be empty for EPUB3'))
# meta ibooks:version
if not app.config.version:
logger.warning(__('conf value "version" should not be empty for EPUB3'))
def convert_epub_css_files(app: Sphinx, config: Config) -> None:
"""Convert string styled epub_css_files to tuple styled one."""
epub_css_files: list[tuple[str, dict[str, Any]]] = []
for entry in config.epub_css_files:
if isinstance(entry, str):
epub_css_files.append((entry, {}))
else:
try:
filename, attrs = entry
epub_css_files.append((filename, attrs))
except Exception:
logger.warning(__('invalid css_file: %r, ignored'), entry)
continue
config.epub_css_files = epub_css_files
def setup(app: Sphinx) -> ExtensionMetadata:
app.add_builder(Epub3Builder)
# config values
app.add_config_value(
'epub_basename',
lambda self: make_filename(self.project),
'',
types=frozenset({str}),
)
app.add_config_value(
'epub_version', 3.0, 'epub', types=frozenset({float})
) # experimental
app.add_config_value('epub_theme', 'epub', 'epub', types=frozenset({str}))
app.add_config_value('epub_theme_options', {}, 'epub', types=frozenset({dict}))
app.add_config_value(
'epub_title', lambda self: self.project, 'epub', types=frozenset({str})
)
app.add_config_value(
'epub_author', lambda self: self.author, 'epub', types=frozenset({str})
)
app.add_config_value(
'epub_language',
lambda self: self.language or 'en',
'epub',
types=frozenset({str}),
)
app.add_config_value(
'epub_publisher', lambda self: self.author, 'epub', types=frozenset({str})
)
app.add_config_value(
'epub_copyright', lambda self: self.copyright, 'epub', types=frozenset({str})
)
app.add_config_value('epub_identifier', 'unknown', 'epub', types=frozenset({str}))
app.add_config_value('epub_scheme', 'unknown', 'epub', types=frozenset({str}))
app.add_config_value('epub_uid', 'unknown', 'env', types=frozenset({str}))
app.add_config_value('epub_cover', (), 'env', types=frozenset({list, tuple}))
app.add_config_value('epub_guide', (), 'env', types=frozenset({list, tuple}))
app.add_config_value('epub_pre_files', [], 'env', types=frozenset({list, tuple}))
app.add_config_value('epub_post_files', [], 'env', types=frozenset({list, tuple}))
app.add_config_value(
'epub_css_files',
lambda config: config.html_css_files,
'epub',
types=frozenset({list, tuple}),
)
app.add_config_value(
'epub_exclude_files', [], 'env', types=frozenset({list, tuple})
)
app.add_config_value('epub_tocdepth', 3, 'env', types=frozenset({int}))
app.add_config_value('epub_tocdup', True, 'env', types=frozenset({bool}))
app.add_config_value('epub_tocscope', 'default', 'env', types=frozenset({str}))
app.add_config_value('epub_fix_images', False, 'env', types=frozenset({bool}))
app.add_config_value('epub_max_image_width', 0, 'env', types=frozenset({int}))
app.add_config_value('epub_show_urls', 'inline', 'epub', types=frozenset({str}))
app.add_config_value(
'epub_use_index',
lambda self: self.html_use_index,
'epub',
types=frozenset({bool}),
)
app.add_config_value('epub_description', 'unknown', 'epub', types=frozenset({str}))
app.add_config_value('epub_contributor', 'unknown', 'epub', types=frozenset({str}))
app.add_config_value(
'epub_writing_mode', 'horizontal', 'epub', types=ENUM('horizontal', 'vertical')
)
# event handlers
app.connect('config-inited', convert_epub_css_files, priority=800)
app.connect('builder-inited', validate_config_values)
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| Epub3Builder |
python | django__django | tests/bulk_create/tests.py | {
"start": 35293,
"end": 36758
} | class ____(TransactionTestCase):
available_apps = ["bulk_create"]
def get_unused_country_id(self):
# Find a serial ID that hasn't been used already and has enough of a
# buffer for the following `bulk_create` call without an explicit pk
# not to conflict.
return getattr(Country.objects.last(), "id", 10) + 100
def test_no_unnecessary_transaction(self):
unused_id = self.get_unused_country_id()
with self.assertNumQueries(1):
Country.objects.bulk_create(
[Country(id=unused_id, name="France", iso_two_letter="FR")]
)
with self.assertNumQueries(1):
Country.objects.bulk_create([Country(name="Canada", iso_two_letter="CA")])
def test_objs_with_and_without_pk(self):
unused_id = self.get_unused_country_id()
with self.assertNumQueries(4):
Country.objects.bulk_create(
[
Country(id=unused_id, name="France", iso_two_letter="FR"),
Country(name="Canada", iso_two_letter="CA"),
]
)
def test_multiple_batches(self):
with self.assertNumQueries(4):
Country.objects.bulk_create(
[
Country(name="France", iso_two_letter="FR"),
Country(name="Canada", iso_two_letter="CA"),
],
batch_size=1,
)
| BulkCreateTransactionTests |
python | spyder-ide__spyder | spyder/plugins/ipythonconsole/utils/websocket_client.py | {
"start": 13312,
"end": 15089
} | class ____:
def __init__(
self,
queue: asyncio.Queue[dict[str, t.Any]],
websocket: aiohttp.ClientWebSocketResponse,
session: _Session,
channel_name: str,
):
self._queue = queue
self._websocket = websocket
self.session = session
self.channel_name = channel_name
self._running: t.Optional[asyncio.Task[None]] = None
self._inspect = None
async def _loop(self):
"""Receive messages from the websocket stream."""
while True:
msg = await self._queue.get()
self.call_handlers(msg)
if self._inspect is not None:
self._inspect(msg)
def start(self):
"""Start the channel."""
_LOGGER.debug("Starting channel %s", self.channel_name)
self._running = asyncio.create_task(
self._loop(), name=f"{self.session.session}-{self.channel_name}"
)
def stop(self) -> None:
"""Stop the channel."""
if self._running is not None:
self._running.cancel()
self._running = None
if not self._queue.empty():
_LOGGER.warning(
"Channel %s has messages in the queue, but is being stopped.",
self.channel_name,
)
def is_alive(self) -> bool:
"""Test whether the channel is alive."""
return self._running is not None and not self._running.done()
@AsyncDispatcher(loop="ipythonconsole", early_return=False)
async def send(self, msg: dict[str, t.Any]) -> None:
"""Send a message to the channel."""
await self.session.send(self._websocket, self.channel_name, msg)
def call_handlers(self, msg: dict[str, t.Any]):
pass
| _WebSocketChannel |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-iterable/source_iterable/streams.py | {
"start": 17502,
"end": 17595
} | class ____(IterableExportEventsStreamAdjustableRange):
data_field = "inAppClose"
| InAppClose |
python | dagster-io__dagster | python_modules/libraries/dagster-dg-core/dagster_dg_core/config.py | {
"start": 14201,
"end": 14397
} | class ____:
path: Path
@classmethod
def from_raw(cls, raw: "DgRawWorkspaceProjectSpec") -> Self:
return cls(
path=Path(raw["path"]),
)
| DgWorkspaceProjectSpec |
python | keon__algorithms | tests/test_maths.py | {
"start": 1925,
"end": 2516
} | class ____(unittest.TestCase):
"""
Test for the file decimal_to_binary_ip.py
Arguments:
unittest {[type]} -- [description]
"""
def test_decimal_to_binary_ip(self):
self.assertEqual("00000000.00000000.00000000.00000000",
decimal_to_binary_ip("0.0.0.0"))
self.assertEqual("11111111.11111111.11111111.11111111",
decimal_to_binary_ip("255.255.255.255"))
self.assertEqual("11000000.10101000.00000000.00000001",
decimal_to_binary_ip("192.168.0.1"))
| TestDecimalToBinaryIP |
python | tiangolo__fastapi | fastapi/exceptions.py | {
"start": 4081,
"end": 4172
} | class ____(RuntimeError):
"""
A generic, FastAPI-specific error.
"""
| FastAPIError |
python | tensorflow__tensorflow | tensorflow/python/keras/optimizer_v2/adagrad.py | {
"start": 1152,
"end": 6699
} | class ____(optimizer_v2.OptimizerV2):
r"""Optimizer that implements the Adagrad algorithm.
Adagrad is an optimizer with parameter-specific learning rates,
which are adapted relative to how frequently a parameter gets
updated during training. The more updates a parameter receives,
the smaller the updates.
Args:
learning_rate: Initial value for the learning rate:
either a floating point value,
or a `tf.keras.optimizers.schedules.LearningRateSchedule` instance.
Defaults to 0.001.
Note that `Adagrad` tends to benefit from higher initial learning rate
values compared to other optimizers.
To match the exact form in the original paper, use 1.0.
initial_accumulator_value: Floating point value.
Starting value for the accumulators (per-parameter momentum values).
Must be non-negative.
epsilon: Small floating point value used to maintain numerical stability.
name: Optional name prefix for the operations created when applying
gradients. Defaults to `"Adagrad"`.
**kwargs: Keyword arguments. Allowed to be one of
`"clipnorm"` or `"clipvalue"`.
`"clipnorm"` (float) clips gradients by norm and represents
the maximum L2 norm of each weight variable;
`"clipvalue"` (float) clips gradient by value and represents the
maximum absolute value of each weight variable.
Reference:
- [Duchi et al., 2011](
http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf).
"""
_HAS_AGGREGATE_GRAD = True
def __init__(self,
learning_rate=0.001,
initial_accumulator_value=0.1,
epsilon=1e-7,
name='Adagrad',
**kwargs):
if initial_accumulator_value < 0.0:
raise ValueError('initial_accumulator_value must be non-negative: %s' %
initial_accumulator_value)
if epsilon is None:
epsilon = backend_config.epsilon()
super(Adagrad, self).__init__(name, **kwargs)
self._set_hyper('learning_rate', kwargs.get('lr', learning_rate))
self._set_hyper('decay', self._initial_decay)
self._initial_accumulator_value = initial_accumulator_value
self.epsilon = epsilon or backend_config.epsilon()
def _create_slots(self, var_list):
for var in var_list:
dtype = var.dtype.base_dtype
init = init_ops.constant_initializer(
self._initial_accumulator_value, dtype=dtype)
self.add_slot(var, 'accumulator', init)
def _prepare_local(self, var_device, var_dtype, apply_state):
super(Adagrad, self)._prepare_local(var_device, var_dtype, apply_state)
apply_state[(var_device, var_dtype)].update(
dict(
epsilon=tensor_conversion.convert_to_tensor_v2_with_dispatch(
self.epsilon, var_dtype
),
neg_lr_t=-apply_state[(var_device, var_dtype)]['lr_t'],
zero=array_ops.zeros((), dtype=dtypes.int64),
)
)
def set_weights(self, weights):
params = self.weights
# Override set_weights for backward compatibility of Keras V1 optimizer
# since it does not include iteration at head of the weight list. Set
# iteration to 0.
if len(params) == len(weights) + 1:
weights = [np.array(0)] + weights
super(Adagrad, self).set_weights(weights)
@classmethod
def from_config(cls, config, custom_objects=None):
"""Creates an optimizer from its config.
This method is the reverse of `get_config`,
capable of instantiating the same optimizer from the config
dictionary.
Args:
config: A Python dictionary, typically the output of get_config.
custom_objects: A Python dictionary mapping names to additional Python
objects used to create this optimizer, such as a function used for a
hyperparameter.
Returns:
An optimizer instance.
"""
if 'initial_accumulator_value' not in config:
config['initial_accumulator_value'] = 0.1
if 'lr' in config:
config['learning_rate'] = config.pop('lr')
return cls(**config)
def _resource_apply_dense(self, grad, var, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = ((apply_state or {}).get((var_device, var_dtype))
or self._fallback_apply_state(var_device, var_dtype))
acc = self.get_slot(var, 'accumulator')
return gen_training_ops.ResourceApplyAdagradV2(
var=var.handle,
accum=acc.handle,
lr=coefficients['lr_t'],
epsilon=coefficients['epsilon'],
grad=grad,
use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = ((apply_state or {}).get((var_device, var_dtype))
or self._fallback_apply_state(var_device, var_dtype))
acc = self.get_slot(var, 'accumulator')
return gen_training_ops.ResourceSparseApplyAdagradV2(
var=var.handle,
accum=acc.handle,
lr=coefficients['lr_t'],
epsilon=coefficients['epsilon'],
grad=grad,
indices=indices,
use_locking=self._use_locking)
def get_config(self):
config = super(Adagrad, self).get_config()
config.update({
'learning_rate': self._serialize_hyperparameter('learning_rate'),
'decay': self._initial_decay,
'initial_accumulator_value': self._initial_accumulator_value,
'epsilon': self.epsilon,
})
return config
| Adagrad |
python | jina-ai__jina | tests/integration/v2_api/test_docs_matrix_tail_pea.py | {
"start": 771,
"end": 1173
} | class ____(Executor):
@requests
def merge(self, docs_matrix, **kwargs):
results = OrderedDict()
for docs in docs_matrix:
for doc in docs:
if doc.id in results:
results[doc.id].matches.extend(doc.matches)
else:
results[doc.id] = doc
return DocumentArray(list(results.values()))
| MatchMerger |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_blocks/test_captions.py | {
"start": 62,
"end": 8771
} | class ____(util.MdCase):
"""Test Blocks caption cases with default configuration."""
extension = ['pymdownx.blocks.caption', 'md_in_html', 'pymdownx.blocks.html']
extension_configs = {
'pymdownx.blocks.caption': {
'auto': False
}
}
def test_caption(self):
"""Test basic caption."""
self.check_markdown(
R'''
A paragraph with a caption.
/// caption
This is the caption.
///
''',
R'''
<figure>
<p>A paragraph with a caption.</p>
<figcaption>
<p>This is the caption.</p>
</figcaption>
</figure>
''',
True
)
def test_caption_with_markdown(self):
"""Test caption with markdown."""
self.check_markdown(
R'''
A paragraph with a caption.
/// caption
This is the **caption**.
///
''',
R'''
<figure>
<p>A paragraph with a caption.</p>
<figcaption>
<p>This is the <strong>caption</strong>.</p>
</figcaption>
</figure>
''',
True
)
def test_image_caption(self):
"""Test image caption."""
self.check_markdown(
R'''

/// caption
This is the caption.
///
''',
R'''
<figure>
<p><img alt="Alt text" src="/path/to/img.jpg" /></p>
<figcaption>
<p>This is the caption.</p>
</figcaption>
</figure>
''',
True
)
def test_nested_caption(self):
"""Test nested caption."""
self.check_markdown(
R'''
A paragraph with a caption.
/// caption
This is the nested caption.
///
/// caption
This is the caption.
///
''',
R'''
<figure>
<figure>
<p>A paragraph with a caption.</p>
<figcaption>
<p>This is the nested caption.</p>
</figcaption>
</figure>
<figcaption>
<p>This is the caption.</p>
</figcaption>
</figure>
''',
True
)
def test_caption_in_figure_block(self):
"""Test that captions are injected into existing figures."""
self.check_markdown(
R"""
<figure markdown>
Some content
</figure>
/// caption
Caption.
///
/// html | figure
Some content
///
/// caption
Caption.
///
""",
"""
<figure>
<p>Some content</p>
<figcaption>
<p>Caption.</p>
</figcaption>
</figure>
<figure>
<p>Some content</p>
<figcaption>
<p>Caption.</p>
</figcaption>
</figure>
""",
True
)
def test_caption_not_in_figure_block(self):
"""Test that captions are not injected into existing figures that already have captions."""
self.check_markdown(
R"""
<figure markdown>
Some content
<figcaption markdown>
Existing caption
</figcaption>
</figure>
/// caption
Caption.
///
/// html | figure
Some content
//// html | figcaption
Existing caption
////
///
/// caption
Caption.
///
""",
R"""
<figure>
<figure>
<p>Some content</p>
<figcaption>
<p>Existing caption</p>
</figcaption>
</figure>
<figcaption>
<p>Caption.</p>
</figcaption>
</figure>
<figure>
<figure>
<p>Some content</p>
<figcaption>
<p>Existing caption</p>
</figcaption>
</figure>
<figcaption>
<p>Caption.</p>
</figcaption>
</figure>
""",
True
)
def test_manual_prepend(self):
"""Test manual prepend."""
self.check_markdown(
R"""
Text
/// caption | <
Prepended
///
Text
/// caption | >
Appended
///
""",
R"""
<figure>
<figcaption>
<p>Prepended</p>
</figcaption>
<p>Text</p>
</figure>
<figure>
<p>Text</p>
<figcaption>
<p>Appended</p>
</figcaption>
</figure>
""",
True
)
def test_caption_inline_id(self):
"""Test caption with inline shorthand ID."""
self.check_markdown(
R'''
Paragraph
/// figure-caption | #custom-id
Caption text.
///
''',
R'''
<figure id="custom-id">
<p>Paragraph</p>
<figcaption>
<p>Caption text.</p>
</figcaption>
</figure>
''',
True
)
def test_caption_inline_id_prepend(self):
"""Test caption with inline shorthand ID and prepend marker."""
self.check_markdown(
R'''
Text
/// figure-caption | < #custom-prepend
Prepended caption.
///
''',
R'''
<figure id="custom-prepend">
<figcaption>
<p>Prepended caption.</p>
</figcaption>
<p>Text</p>
</figure>
''',
True
)
def test_caption_inline_id_with_extra_token(self):
"""Ensure inline shorthand ID with extra tokens is rejected."""
self.check_markdown(
R"""
Paragraph
/// figure-caption | #custom-id extra
Caption text.
///
""",
"""
<p>Paragraph
/// figure-caption | #custom-id extra
Caption text.
///</p>
""",
True
)
def test_caption_inline_id_invalid_identifier(self):
"""Ensure inline shorthand ID with invalid characters is rejected."""
self.check_markdown(
R"""
Paragraph
/// figure-caption | #invalid!
Caption text.
///
""",
"""
<p>Paragraph
/// figure-caption | #invalid!
Caption text.
///</p>
""",
True
)
def test_table_caption_inline_id(self):
"""Test table caption with inline shorthand ID."""
self.check_markdown(
R'''
Paragraph
/// table-caption | #custom-table
Table caption text.
///
''',
R'''
<figure id="custom-table">
<p>Paragraph</p>
<figcaption>
<p>Table caption text.</p>
</figcaption>
</figure>
''',
True
)
def test_caption_inline_id_attribute_override(self):
"""Ensure inline shorthand ID does not override attribute-defined ID."""
self.check_markdown(
R'''
Paragraph
/// figure-caption | #inline-id
attrs: {id: attribute-id}
Caption text.
///
''',
R'''
<figure id="attribute-id">
<p>Paragraph</p>
<figcaption>
<p>Caption text.</p>
</figcaption>
</figure>
''',
True
)
def test_bad_header(self):
"""Test a bad header."""
self.check_markdown(
R"""
Test
/// caption | bad
///
""",
"""
<p>Test
/// caption | bad
///</p>
""",
True
)
| TestBlocksCaption |
python | Textualize__textual | docs/examples/how-to/containers02.py | {
"start": 270,
"end": 555
} | class ____(App):
"""Simple app to play with containers."""
def compose(self) -> ComposeResult:
with Vertical(): # (1)!
yield Box()
yield Box()
yield Box()
if __name__ == "__main__":
app = ContainerApp()
app.run()
| ContainerApp |
python | huggingface__transformers | src/transformers/models/nemotron/modeling_nemotron.py | {
"start": 24415,
"end": 28113
} | class ____(GradientCheckpointingLayer):
# Ignore copy
def __init__(self, config: NemotronConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = NEMOTRON_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx)
self.mlp = NemotronMLP(config)
self.input_layernorm = NemotronLayerNorm1P(config.hidden_size, eps=config.norm_eps)
self.post_attention_layernorm = NemotronLayerNorm1P(config.hidden_size, eps=config.norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs,
) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*):
attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
query_sequence_length, key_sequence_length)` if default attention is used.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
past_key_values (`Cache`, *optional*): cached past key and value projection states
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence
position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
with `head_dim` being the embedding dimension of each attention head.
kwargs (`dict`, *optional*):
Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
into the model
"""
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
output_attentions=output_attentions,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
return outputs
@auto_docstring
| NemotronDecoderLayer |
python | kubernetes-client__python | kubernetes/client/models/v1_node_swap_status.py | {
"start": 383,
"end": 3500
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'capacity': 'int'
}
attribute_map = {
'capacity': 'capacity'
}
def __init__(self, capacity=None, local_vars_configuration=None): # noqa: E501
"""V1NodeSwapStatus - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._capacity = None
self.discriminator = None
if capacity is not None:
self.capacity = capacity
@property
def capacity(self):
"""Gets the capacity of this V1NodeSwapStatus. # noqa: E501
Total amount of swap memory in bytes. # noqa: E501
:return: The capacity of this V1NodeSwapStatus. # noqa: E501
:rtype: int
"""
return self._capacity
@capacity.setter
def capacity(self, capacity):
"""Sets the capacity of this V1NodeSwapStatus.
Total amount of swap memory in bytes. # noqa: E501
:param capacity: The capacity of this V1NodeSwapStatus. # noqa: E501
:type: int
"""
self._capacity = capacity
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1NodeSwapStatus):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1NodeSwapStatus):
return True
return self.to_dict() != other.to_dict()
| V1NodeSwapStatus |
python | getsentry__sentry | tests/sentry/utils/test_committers.py | {
"start": 1857,
"end": 2917
} | class ____(unittest.TestCase):
def test_forward_slash(self) -> None:
assert list(tokenize_path("foo/bar")) == ["bar", "foo"]
def test_back_slash(self) -> None:
assert list(tokenize_path("foo\\bar")) == ["bar", "foo"]
def test_dot_does_not_separate(self) -> None:
assert list(tokenize_path("foo.bar")) == ["foo.bar"]
def test_additional_slash_in_front(self) -> None:
assert list(tokenize_path("/foo/bar")) == ["bar", "foo"]
assert list(tokenize_path("\\foo\\bar")) == ["bar", "foo"]
def test_relative_paths(self) -> None:
assert list(tokenize_path("./")) == ["."]
assert list(tokenize_path("./../")) == ["..", "."]
assert list(tokenize_path("./foo/bar")) == ["bar", "foo", "."]
assert list(tokenize_path(".\\foo\\bar")) == ["bar", "foo", "."]
def test_path_with_spaces(self) -> None:
assert list(tokenize_path("\\foo bar\\bar")) == ["bar", "foo bar"]
def test_no_path(self) -> None:
assert list(tokenize_path("/")) == []
| TokenizePathTestCase |
python | ray-project__ray | python/ray/tune/search/optuna/optuna_search.py | {
"start": 2336,
"end": 26571
} | class ____(Searcher):
"""A wrapper around Optuna to provide trial suggestions.
`Optuna <https://optuna.org/>`_ is a hyperparameter optimization library.
In contrast to other libraries, it employs define-by-run style
hyperparameter definitions.
This Searcher is a thin wrapper around Optuna's search algorithms.
You can pass any Optuna sampler, which will be used to generate
hyperparameter suggestions.
Multi-objective optimization is supported.
Args:
space: Hyperparameter search space definition for
Optuna's sampler. This can be either a :class:`dict` with
parameter names as keys and ``optuna.distributions`` as values,
or a Callable - in which case, it should be a define-by-run
function using ``optuna.trial`` to obtain the hyperparameter
values. The function should return either a :class:`dict` of
constant values with names as keys, or None.
For more information, see https://optuna.readthedocs.io\
/en/stable/tutorial/10_key_features/002_configurations.html.
.. warning::
No actual computation should take place in the define-by-run
function. Instead, put the training logic inside the function
or class trainable passed to ``tune.Tuner()``.
metric: The training result objective value attribute. If
None but a mode was passed, the anonymous metric ``_metric``
will be used per default. Can be a list of metrics for
multi-objective optimization.
mode: One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute. Can be a list of
modes for multi-objective optimization (corresponding to
``metric``).
points_to_evaluate: Initial parameter suggestions to be run
first. This is for when you already have some good parameters
you want to run first to help the algorithm make better suggestions
for future parameters. Needs to be a list of dicts containing the
configurations.
sampler: Optuna sampler used to
draw hyperparameter configurations. Defaults to ``MOTPESampler``
for multi-objective optimization with Optuna<2.9.0, and
``TPESampler`` in every other case.
See https://optuna.readthedocs.io/en/stable/reference/samplers/index.html
for available Optuna samplers.
.. warning::
Please note that with Optuna 2.10.0 and earlier
default ``MOTPESampler``/``TPESampler`` suffer
from performance issues when dealing with a large number of
completed trials (approx. >100). This will manifest as
a delay when suggesting new configurations.
This is an Optuna issue and may be fixed in a future
Optuna release.
study_name: Optuna study name that uniquely identifies the trial
results. Defaults to ``"optuna"``.
storage: Optuna storage used for storing trial results to
storages other than in-memory storage,
for instance optuna.storages.RDBStorage.
seed: Seed to initialize sampler with. This parameter is only
used when ``sampler=None``. In all other cases, the sampler
you pass should be initialized with the seed already.
evaluated_rewards: If you have previously evaluated the
parameters passed in as points_to_evaluate you can avoid
re-running those trials by passing in the reward attributes
as a list so the optimiser can be told the results without
needing to re-compute the trial. Must be the same length as
points_to_evaluate.
.. warning::
When using ``evaluated_rewards``, the search space ``space``
must be provided as a :class:`dict` with parameter names as
keys and ``optuna.distributions`` instances as values. The
define-by-run search space definition is not yet supported with
this functionality.
Tune automatically converts search spaces to Optuna's format:
.. code-block:: python
from ray.tune.search.optuna import OptunaSearch
config = {
"a": tune.uniform(6, 8)
"b": tune.loguniform(1e-4, 1e-2)
}
optuna_search = OptunaSearch(
metric="loss",
mode="min")
tuner = tune.Tuner(
trainable,
tune_config=tune.TuneConfig(
search_alg=optuna_search,
),
param_space=config,
)
tuner.fit()
If you would like to pass the search space manually, the code would
look like this:
.. code-block:: python
from ray.tune.search.optuna import OptunaSearch
import optuna
space = {
"a": optuna.distributions.FloatDistribution(6, 8),
"b": optuna.distributions.FloatDistribution(1e-4, 1e-2, log=True),
}
optuna_search = OptunaSearch(
space,
metric="loss",
mode="min")
tuner = tune.Tuner(
trainable,
tune_config=tune.TuneConfig(
search_alg=optuna_search,
),
)
tuner.fit()
# Equivalent Optuna define-by-run function approach:
def define_search_space(trial: optuna.Trial):
trial.suggest_float("a", 6, 8)
trial.suggest_float("b", 1e-4, 1e-2, log=True)
# training logic goes into trainable, this is just
# for search space definition
optuna_search = OptunaSearch(
define_search_space,
metric="loss",
mode="min")
tuner = tune.Tuner(
trainable,
tune_config=tune.TuneConfig(
search_alg=optuna_search,
),
)
tuner.fit()
Multi-objective optimization is supported:
.. code-block:: python
from ray.tune.search.optuna import OptunaSearch
import optuna
space = {
"a": optuna.distributions.FloatDistribution(6, 8),
"b": optuna.distributions.FloatDistribution(1e-4, 1e-2, log=True),
}
# Note you have to specify metric and mode here instead of
# in tune.TuneConfig
optuna_search = OptunaSearch(
space,
metric=["loss1", "loss2"],
mode=["min", "max"])
# Do not specify metric and mode here!
tuner = tune.Tuner(
trainable,
tune_config=tune.TuneConfig(
search_alg=optuna_search,
),
)
tuner.fit()
You can pass configs that will be evaluated first using
``points_to_evaluate``:
.. code-block:: python
from ray.tune.search.optuna import OptunaSearch
import optuna
space = {
"a": optuna.distributions.FloatDistribution(6, 8),
"b": optuna.distributions.FloatDistribution(1e-4, 1e-2, log=True),
}
optuna_search = OptunaSearch(
space,
points_to_evaluate=[{"a": 6.5, "b": 5e-4}, {"a": 7.5, "b": 1e-3}]
metric="loss",
mode="min")
tuner = tune.Tuner(
trainable,
tune_config=tune.TuneConfig(
search_alg=optuna_search,
),
)
tuner.fit()
Avoid re-running evaluated trials by passing the rewards together with
`points_to_evaluate`:
.. code-block:: python
from ray.tune.search.optuna import OptunaSearch
import optuna
space = {
"a": optuna.distributions.FloatDistribution(6, 8),
"b": optuna.distributions.FloatDistribution(1e-4, 1e-2, log=True),
}
optuna_search = OptunaSearch(
space,
points_to_evaluate=[{"a": 6.5, "b": 5e-4}, {"a": 7.5, "b": 1e-3}]
evaluated_rewards=[0.89, 0.42]
metric="loss",
mode="min")
tuner = tune.Tuner(
trainable,
tune_config=tune.TuneConfig(
search_alg=optuna_search,
),
)
tuner.fit()
.. versionadded:: 0.8.8
"""
def __init__(
self,
space: Optional[
Union[
Dict[str, "OptunaDistribution"],
List[Tuple],
Callable[["OptunaTrial"], Optional[Dict[str, Any]]],
]
] = None,
metric: Optional[Union[str, List[str]]] = None,
mode: Optional[Union[str, List[str]]] = None,
points_to_evaluate: Optional[List[Dict]] = None,
sampler: Optional["BaseSampler"] = None,
study_name: Optional[str] = None,
storage: Optional["BaseStorage"] = None,
seed: Optional[int] = None,
evaluated_rewards: Optional[List] = None,
):
assert ot is not None, "Optuna must be installed! Run `pip install optuna`."
super(OptunaSearch, self).__init__(metric=metric, mode=mode)
if isinstance(space, dict) and space:
resolved_vars, domain_vars, grid_vars = parse_spec_vars(space)
if domain_vars or grid_vars:
logger.warning(
UNRESOLVED_SEARCH_SPACE.format(par="space", cls=type(self).__name__)
)
space = self.convert_search_space(space)
else:
# Flatten to support nested dicts
space = flatten_dict(space, "/")
self._space = space
self._points_to_evaluate = points_to_evaluate or []
self._evaluated_rewards = evaluated_rewards
if study_name:
self._study_name = study_name
else:
self._study_name = "optuna" # Fixed study name for in-memory storage
if sampler and seed:
logger.warning(
"You passed an initialized sampler to `OptunaSearch`. The "
"`seed` parameter has to be passed to the sampler directly "
"and will be ignored."
)
elif sampler:
assert isinstance(sampler, BaseSampler), (
"You can only pass an instance of "
"`optuna.samplers.BaseSampler` "
"as a sampler to `OptunaSearcher`."
)
self._sampler = sampler
self._seed = seed
if storage:
assert isinstance(storage, BaseStorage), (
"The `storage` parameter in `OptunaSearcher` must be an instance "
"of `optuna.storages.BaseStorage`."
)
# If storage is not provided, just set self._storage to None
# so that the default in-memory storage is used.
self._storage = storage
self._completed_trials = set()
self._ot_trials = {}
self._ot_study = None
if self._space:
self._setup_study(mode)
def _setup_study(self, mode: Union[str, list]):
if self._metric is None and self._mode:
if isinstance(self._mode, list):
raise ValueError(
"If ``mode`` is a list (multi-objective optimization "
"case), ``metric`` must be defined."
)
# If only a mode was passed, use anonymous metric
self._metric = DEFAULT_METRIC
pruner = ot.pruners.NopPruner()
if self._sampler:
sampler = self._sampler
elif isinstance(mode, list) and version.parse(ot.__version__) < version.parse(
"2.9.0"
):
# MOTPESampler deprecated in Optuna>=2.9.0
sampler = ot.samplers.MOTPESampler(seed=self._seed)
else:
sampler = ot.samplers.TPESampler(seed=self._seed)
if isinstance(mode, list):
study_direction_args = dict(
directions=["minimize" if m == "min" else "maximize" for m in mode],
)
else:
study_direction_args = dict(
direction="minimize" if mode == "min" else "maximize",
)
self._ot_study = ot.study.create_study(
storage=self._storage,
sampler=sampler,
pruner=pruner,
study_name=self._study_name,
load_if_exists=True,
**study_direction_args,
)
if self._points_to_evaluate:
validate_warmstart(
self._space,
self._points_to_evaluate,
self._evaluated_rewards,
validate_point_name_lengths=not callable(self._space),
)
if self._evaluated_rewards:
for point, reward in zip(
self._points_to_evaluate, self._evaluated_rewards
):
self.add_evaluated_point(point, reward)
else:
for point in self._points_to_evaluate:
self._ot_study.enqueue_trial(point)
def set_search_properties(
self, metric: Optional[str], mode: Optional[str], config: Dict, **spec
) -> bool:
if self._space:
return False
space = self.convert_search_space(config)
self._space = space
if metric:
self._metric = metric
if mode:
self._mode = mode
self._setup_study(self._mode)
return True
def _suggest_from_define_by_run_func(
self,
func: Callable[["OptunaTrial"], Optional[Dict[str, Any]]],
ot_trial: "OptunaTrial",
) -> Dict:
captor = _OptunaTrialSuggestCaptor(ot_trial)
time_start = time.time()
ret = func(captor)
time_taken = time.time() - time_start
if time_taken > DEFINE_BY_RUN_WARN_THRESHOLD_S:
warnings.warn(
"Define-by-run function passed in the `space` argument "
f"took {time_taken} seconds to "
"run. Ensure that actual computation, training takes "
"place inside Tune's train functions or Trainables "
"passed to `tune.Tuner()`."
)
if ret is not None:
if not isinstance(ret, dict):
raise TypeError(
"The return value of the define-by-run function "
"passed in the `space` argument should be "
"either None or a `dict` with `str` keys. "
f"Got {type(ret)}."
)
if not all(isinstance(k, str) for k in ret.keys()):
raise TypeError(
"At least one of the keys in the dict returned by the "
"define-by-run function passed in the `space` argument "
"was not a `str`."
)
return {**captor.captured_values, **ret} if ret else captor.captured_values
def suggest(self, trial_id: str) -> Optional[Dict]:
if not self._space:
raise RuntimeError(
UNDEFINED_SEARCH_SPACE.format(
cls=self.__class__.__name__, space="space"
)
)
if not self._metric or not self._mode:
raise RuntimeError(
UNDEFINED_METRIC_MODE.format(
cls=self.__class__.__name__, metric=self._metric, mode=self._mode
)
)
if callable(self._space):
# Define-by-run case
if trial_id not in self._ot_trials:
self._ot_trials[trial_id] = self._ot_study.ask()
ot_trial = self._ot_trials[trial_id]
params = self._suggest_from_define_by_run_func(self._space, ot_trial)
else:
# Use Optuna ask interface (since version 2.6.0)
if trial_id not in self._ot_trials:
self._ot_trials[trial_id] = self._ot_study.ask(
fixed_distributions=self._space
)
ot_trial = self._ot_trials[trial_id]
params = ot_trial.params
return unflatten_dict(params)
def on_trial_result(self, trial_id: str, result: Dict):
if isinstance(self.metric, list):
# Optuna doesn't support incremental results
# for multi-objective optimization
return
if trial_id in self._completed_trials:
logger.warning(
f"Received additional result for trial {trial_id}, but "
f"it already finished. Result: {result}"
)
return
metric = result[self.metric]
step = result[TRAINING_ITERATION]
ot_trial = self._ot_trials[trial_id]
ot_trial.report(metric, step)
def on_trial_complete(
self, trial_id: str, result: Optional[Dict] = None, error: bool = False
):
if trial_id in self._completed_trials:
logger.warning(
f"Received additional completion for trial {trial_id}, but "
f"it already finished. Result: {result}"
)
return
ot_trial = self._ot_trials[trial_id]
if result:
if isinstance(self.metric, list):
val = [result.get(metric, None) for metric in self.metric]
else:
val = result.get(self.metric, None)
else:
val = None
ot_trial_state = OptunaTrialState.COMPLETE
if val is None:
if error:
ot_trial_state = OptunaTrialState.FAIL
else:
ot_trial_state = OptunaTrialState.PRUNED
try:
self._ot_study.tell(ot_trial, val, state=ot_trial_state)
except Exception as exc:
logger.warning(exc) # E.g. if NaN was reported
self._completed_trials.add(trial_id)
def add_evaluated_point(
self,
parameters: Dict,
value: float,
error: bool = False,
pruned: bool = False,
intermediate_values: Optional[List[float]] = None,
):
if not self._space:
raise RuntimeError(
UNDEFINED_SEARCH_SPACE.format(
cls=self.__class__.__name__, space="space"
)
)
if not self._metric or not self._mode:
raise RuntimeError(
UNDEFINED_METRIC_MODE.format(
cls=self.__class__.__name__, metric=self._metric, mode=self._mode
)
)
if callable(self._space):
raise TypeError(
"Define-by-run function passed in `space` argument is not "
"yet supported when using `evaluated_rewards`. Please provide "
"an `OptunaDistribution` dict or pass a Ray Tune "
"search space to `tune.Tuner()`."
)
ot_trial_state = OptunaTrialState.COMPLETE
if error:
ot_trial_state = OptunaTrialState.FAIL
elif pruned:
ot_trial_state = OptunaTrialState.PRUNED
if intermediate_values:
intermediate_values_dict = dict(enumerate(intermediate_values))
else:
intermediate_values_dict = None
# If the trial state is FAILED, the value must be `None` in Optuna==4.1.0
# Reference: https://github.com/optuna/optuna/pull/5211
# This is a temporary fix for the issue that Optuna enforces the value
# to be `None` if the trial state is FAILED.
# TODO (hpguo): A better solution may requires us to update the base class
# to allow the `value` arg in `add_evaluated_point` being `Optional[float]`.
if ot_trial_state == OptunaTrialState.FAIL:
value = None
trial = ot.trial.create_trial(
state=ot_trial_state,
value=value,
params=parameters,
distributions=self._space,
intermediate_values=intermediate_values_dict,
)
self._ot_study.add_trial(trial)
def save(self, checkpoint_path: str):
save_object = self.__dict__.copy()
with open(checkpoint_path, "wb") as outputFile:
pickle.dump(save_object, outputFile)
def restore(self, checkpoint_path: str):
with open(checkpoint_path, "rb") as inputFile:
save_object = pickle.load(inputFile)
if isinstance(save_object, dict):
self.__dict__.update(save_object)
else:
# Backwards compatibility
(
self._sampler,
self._ot_trials,
self._ot_study,
self._points_to_evaluate,
self._evaluated_rewards,
) = save_object
@staticmethod
def convert_search_space(spec: Dict) -> Dict[str, Any]:
resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)
if not domain_vars and not grid_vars:
return {}
if grid_vars:
raise ValueError(
"Grid search parameters cannot be automatically converted "
"to an Optuna search space."
)
# Flatten and resolve again after checking for grid search.
spec = flatten_dict(spec, prevent_delimiter=True)
resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)
def resolve_value(domain: Domain) -> ot.distributions.BaseDistribution:
quantize = None
sampler = domain.get_sampler()
if isinstance(sampler, Quantized):
quantize = sampler.q
sampler = sampler.sampler
if isinstance(sampler, LogUniform):
logger.warning(
"Optuna does not handle quantization in loguniform "
"sampling. The parameter will be passed but it will "
"probably be ignored."
)
if isinstance(domain, Float):
if isinstance(sampler, LogUniform):
if quantize:
logger.warning(
"Optuna does not support both quantization and "
"sampling from LogUniform. Dropped quantization."
)
return ot.distributions.FloatDistribution(
domain.lower, domain.upper, log=True
)
elif isinstance(sampler, Uniform):
if quantize:
return ot.distributions.FloatDistribution(
domain.lower, domain.upper, step=quantize
)
return ot.distributions.FloatDistribution(
domain.lower, domain.upper
)
elif isinstance(domain, Integer):
if isinstance(sampler, LogUniform):
return ot.distributions.IntDistribution(
domain.lower, domain.upper - 1, step=quantize or 1, log=True
)
elif isinstance(sampler, Uniform):
# Upper bound should be inclusive for quantization and
# exclusive otherwise
return ot.distributions.IntDistribution(
domain.lower,
domain.upper - int(bool(not quantize)),
step=quantize or 1,
)
elif isinstance(domain, Categorical):
if isinstance(sampler, Uniform):
return ot.distributions.CategoricalDistribution(domain.categories)
raise ValueError(
"Optuna search does not support parameters of type "
"`{}` with samplers of type `{}`".format(
type(domain).__name__, type(domain.sampler).__name__
)
)
# Parameter name is e.g. "a/b/c" for nested dicts
values = {"/".join(path): resolve_value(domain) for path, domain in domain_vars}
return values
| OptunaSearch |
python | pytorch__pytorch | torch/testing/_internal/distributed/rpc/rpc_test.py | {
"start": 31885,
"end": 158805
} | class ____(RpcAgentTestFixture, RpcTestCommon):
@dist_init
def test_worker_id(self):
n = self.rank + 1
peer_rank = n % self.world_size
self_worker_info = rpc.get_worker_info()
peer_worker_info = rpc.get_worker_info(worker_name(peer_rank))
self.assertEqual(self_worker_info.name, worker_name(self.rank))
self.assertEqual(peer_worker_info.name, worker_name(peer_rank))
with self.assertRaisesRegex(RuntimeError, "could not find destination"):
rpc.get_worker_info("WorkerUnknown")
@dist_init
def test_get_worker_infos(self):
worker_infos = rpc.api._get_current_rpc_agent().get_worker_infos()
worker_names = {worker_info.name for worker_info in worker_infos}
expected_worker_names = {worker_name(rank) for rank in range(self.world_size)}
self.assertEqual(worker_names, expected_worker_names)
worker_ids = {worker_info.id for worker_info in worker_infos}
expected_worker_ids = set(range(self.world_size))
self.assertEqual(worker_ids, expected_worker_ids)
@dist_init
def test_self_add(self):
self_worker_info = rpc.get_worker_info()
fut = rpc.rpc_async(self_worker_info, torch.add, args=(torch.ones(2, 2), 1))
ret = rpc.rpc_sync(self_worker_info, torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(fut.wait(), torch.ones(2, 2) + 1)
self.assertEqual(ret, torch.ones(2, 2) + 1)
@dist_init
def test_send_to_rank(self):
dst_rank = (self.rank + 1) % self.world_size
# Test dense tensor
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
ret = self._run_func_in_mode(
dst_rank, torch.add, exec_mode, args=(torch.ones(2, 2), 1)
)
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test invalid ranks
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(RuntimeError):
self._run_func_in_mode(
self.world_size + 1,
torch.add,
exec_mode,
args=(torch.ones(2, 2), 1),
)
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(RuntimeError):
self._run_func_in_mode(
-1, torch.add, exec_mode, args=(torch.ones(2, 2), 1)
)
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(ValueError):
self._run_func_in_mode(
dst_rank + 0.5, torch.add, exec_mode, args=(torch.ones(2, 2), 1)
)
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(ValueError):
self._run_func_in_mode(
dst_rank - 0.5, torch.add, exec_mode, args=(torch.ones(2, 2), 1)
)
@dist_init
def test_self_py_udf_remote(self):
self._self_py_udf_remote(rpc.get_worker_info(), torch.ones(2, 2), 1, 3)
@dist_init
def test_self_remote_rref_as_rpc_arg(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._self_remote_rref_as_rpc_arg(dst, torch.ones(2, 2), 1, 3)
@dist_init
def test_self_remote_rref_as_self_rpc_arg(self):
self._self_remote_rref_as_rpc_arg(rpc.get_worker_info(), torch.ones(2, 2), 1, 3)
@dist_init
def test_self_remote_rref_as_remote_arg(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._self_remote_rref_as_remote_arg(dst, torch.ones(2, 2), 1, 3)
@dist_init
def test_self_remote_rref_as_self_remote_arg(self):
self._self_remote_rref_as_remote_arg(
rpc.get_worker_info(), torch.ones(2, 2), 1, 3
)
@dist_init
def test_rref_proxy_non_exist(self):
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, my_function, args=(torch.ones(2, 2), 1, 3))
msg = "has no attribute 'non_exist'"
with self.assertRaisesRegex(AttributeError, msg):
rref.rpc_sync().non_exist()
with self.assertRaisesRegex(AttributeError, msg):
rref.rpc_async().non_exist().wait()
with self.assertRaisesRegex(AttributeError, msg):
rref.remote().non_exist()
def _test_rref_proxy_tensor(self, dst):
rref = rpc.remote(dst, my_function, args=(torch.ones(2, 2), 1, 3))
expected = torch.ones(2, 2) + 1 + 3
self.assertEqual(expected.size(), rref.rpc_sync().size())
self.assertEqual(expected + 1, rref.rpc_async().add(1).wait())
self.assertEqual(expected.view(1, 4), rref.remote().view(1, 4).to_here())
@dist_init
def test_rref_proxy_tensor(self):
self._test_rref_proxy_tensor(worker_name((self.rank + 1) % self.world_size))
@dist_init
def test_rref_proxy_tensor_self(self):
self._test_rref_proxy_tensor(rpc.get_worker_info())
@dist_init
def test_rref_proxy_reuse(self):
rref = rpc.remote(
worker_name((self.rank + 1) % self.world_size),
my_function,
args=(torch.ones(2, 2), 1, 3),
)
expected = torch.ones(2, 2) + 1 + 3
proxy_rpc_sync = rref.rpc_sync()
proxy_rpc_async = rref.rpc_async()
proxy_remote = rref.remote()
self.assertEqual(expected.size(), proxy_rpc_sync.size())
self.assertEqual(expected + 1, proxy_rpc_sync.add(1))
self.assertEqual(expected.view(1, 4), proxy_rpc_sync.view(1, 4))
self.assertEqual(expected.size(), proxy_rpc_async.size().wait())
self.assertEqual(expected + 3, proxy_rpc_async.add(3).wait())
self.assertEqual(expected.view(4, 1), proxy_rpc_async.view(4, 1).wait())
self.assertEqual(expected.size(), proxy_remote.size().to_here())
self.assertEqual(expected + 5, proxy_remote.add(5).to_here())
self.assertEqual(expected.view(-1), proxy_remote.view(-1).to_here())
def _test_rref_proxy_class(self, dst):
rref = rpc.remote(dst, MyClass, args=(7,))
expected = MyClass(7)
self.assertEqual(expected.get_value(), rref.rpc_sync().get_value())
self.assertEqual(expected.get_value(), rref.rpc_async().get_value().wait())
self.assertEqual(expected.get_value(), rref.remote().get_value().to_here())
expected.increment_value(3)
self.assertEqual(None, rref.rpc_sync().increment_value(1))
self.assertEqual(None, rref.rpc_async().increment_value(1).wait())
self.assertEqual(None, rref.remote().increment_value(1).to_here())
self.assertEqual(expected.get_value(), rref.rpc_sync().get_value())
self.assertEqual(expected.get_value(), rref.rpc_async().get_value().wait())
self.assertEqual(expected.get_value(), rref.remote().get_value().to_here())
self.assertEqual(
expected.my_instance_method(2), rref.rpc_sync().my_instance_method(2)
)
self.assertEqual(
expected.my_instance_method(3),
rref.rpc_async().my_instance_method(3).wait(),
)
self.assertEqual(
expected.my_instance_method(4),
rref.remote().my_instance_method(4).to_here(),
)
self.assertEqual(
expected.my_static_method(9), rref.rpc_sync().my_static_method(9)
)
self.assertEqual(
expected.my_static_method(10), rref.rpc_async().my_static_method(10).wait()
)
self.assertEqual(
expected.my_static_method(11), rref.remote().my_static_method(11).to_here()
)
self.assertEqual(
expected.my_class_method(2, torch.zeros(2, 2)),
rref.rpc_sync().my_class_method(2, torch.zeros(2, 2)),
)
self.assertEqual(
expected.my_class_method(2, torch.ones(3, 3)),
rref.rpc_async().my_class_method(2, torch.ones(3, 3)).wait(),
)
self.assertEqual(
expected.my_class_method(2, torch.ones(4, 4)),
rref.remote().my_class_method(2, torch.ones(4, 4)).to_here(),
)
@dist_init
def test_rref_proxy_class(self):
self._test_rref_proxy_class(worker_name((self.rank + 1) % self.world_size))
@dist_init
def test_rref_proxy_class_self(self):
self._test_rref_proxy_class(rpc.get_worker_info())
@mock.patch.object(torch.distributed.autograd, "_init")
@mock.patch.object(torch.distributed.rpc.api, "_set_and_start_rpc_agent")
@dist_init(setup_rpc=False)
def test_register_rpc_backend_and_set_and_start_rpc_backend(
self, mock_rpc_agent, mock_dist_autograd_init
):
backend_name = "stub_backend"
backend = rpc.backend_registry.register_backend(
backend_name,
_stub_construct_rpc_backend_options_handler,
_stub_init_rpc_backend_handler,
)
with self.assertRaisesRegex(
RuntimeError, "^RPC backend .+: already registered$"
):
backend = rpc.backend_registry.register_backend(
backend_name,
_stub_construct_rpc_backend_options_handler,
_stub_init_rpc_backend_handler,
)
rpc.init_rpc(
name="worker1",
backend=backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_duplicate_name(self):
with self.assertRaisesRegex(RuntimeError, "is not unique"):
store, _, _ = next(
torch.distributed.rendezvous(
self.init_method, rank=self.rank, world_size=self.world_size
)
)
rpc._init_rpc_backend(
backend=self.rpc_backend,
store=store,
name="duplicate_name",
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_duplicate_name_2(self):
with self.assertRaisesRegex(RuntimeError, "is not unique"):
rpc.init_rpc(
name=worker_name(self.rank % (self.world_size - 1)),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_reinit(self):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Wait for all init to complete.
dist.barrier()
# TODO: with TCP init, rank 0 raises Address already in use because
# rank 0 is the start daemon and the store is created before checking if
# RPC is already initialized in init_rpc.
if os.environ.get("RPC_INIT_WITH_TCP", None) == "1" and self.rank == 0:
expected_reinit_err = "Address already in use"
else:
expected_reinit_err = "is already initialized"
with self.assertRaisesRegex(RuntimeError, expected_reinit_err):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc.shutdown()
@dist_init(setup_rpc=False)
def test_pg_init_no_rpc_init(self):
dist.init_process_group(
backend="gloo",
init_method=self.file_init_method,
rank=self.rank,
world_size=self.world_size,
)
class MyModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.lin = torch.nn.Linear(3, 4)
def forward(self, x):
return self.lin(x)
model = MyModel()
model.train()
model = torch.nn.parallel.DistributedDataParallel(model)
with self.assertRaisesRegex(
RuntimeError,
"Current RPC agent is not set! Did you initialize the RPC framework",
):
[RRef(param) for param in model.parameters()]
def test_world_size_one(self):
self._world_size_one(torch.ones(2, 2), torch.ones(2, 2))
@dist_init(setup_rpc=False)
def test_invalid_names(self):
worker_id = 0
with self.assertRaisesRegex(RuntimeError, "Worker name must match"):
WorkerInfo("abc*", worker_id)
with self.assertRaisesRegex(RuntimeError, "Worker name must match"):
WorkerInfo(" ", worker_id)
with self.assertRaisesRegex(RuntimeError, "must be non-empty"):
WorkerInfo("", worker_id)
# If the number in the message does not match, it is likely that the
# value of MAX_NAME_LEN in RPC WorkerInfo has changed.
with self.assertRaisesRegex(RuntimeError, "shorter than 128"):
WorkerInfo("".join(["a" for i in range(500)]), worker_id)
# Test that WorkerInfo can be pickled and sent in RPC call
@dist_init
def test_worker_info_pickle(self):
dst_rank = (self.rank + 1) % self.world_size
worker_info = rpc.api.get_worker_info()
ret = rpc.rpc_sync(worker_name(dst_rank), identity, args=(worker_info,))
self.assertEqual(ret, worker_info)
@dist_init
def test_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, torch.ones(n, n) * 2)
@staticmethod
def return_callee_id():
return rpc.get_worker_info().id
@dist_init
def test_int_callee(self):
dst_rank = (self.rank + 1) % self.world_size
ret = rpc.rpc_sync(dst_rank, RpcTest.return_callee_id)
self.assertEqual(ret, dst_rank)
@dist_init
def test_add_with_id(self):
n = self.rank + 1
dst_rank = n % self.world_size
workder_info = rpc.get_worker_info(worker_name(dst_rank))
ret = rpc.rpc_sync(
workder_info, torch.add, args=(torch.ones(n, n), torch.ones(n, n))
)
self.assertEqual(ret, torch.ones(n, n) * 2)
@dist_init
def test_scalar_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), torch.add, args=(torch.ones(n, n), n))
self.assertEqual(ret, (torch.ones(n, n) + n))
@dist_init
def test_async_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_nonzero(self):
n = self.rank + 1
dst_rank = n % self.world_size
x = torch.ones(self.world_size, self.world_size)
x[self.rank][self.rank] = 0
ret = rpc.rpc_sync(worker_name(dst_rank), torch.nonzero, args=(x,))
self.assertEqual(ret, x.nonzero())
@dist_init
def test_multi_rpc(self):
self._multi_rpc(False)
@dist_init
def test_future_wait_twice(self):
dst = worker_name((self.rank + 1) % self.world_size)
futs = [rpc.rpc_async(dst, raise_func) for _ in range(20)]
with self.assertRaisesRegex(ValueError, "Expected error"):
torch.futures.wait_all(futs)
for fut in futs:
with self.assertRaisesRegex(ValueError, "Expected error"):
fut.wait()
@dist_init(setup_rpc=False)
def test_wait_all_workers_timeout(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
og_func = rpc.api._wait_all_workers
def wait_all_workers_sleep(timeout):
rpc.api._all_gather(SlowPickleClass(0.5), timeout=timeout)
rpc.api._wait_all_workers = wait_all_workers_sleep
try:
with self.assertRaisesRegex(RuntimeError, ""):
rpc.shutdown(graceful=True, timeout=0.01)
finally:
rpc.api._wait_all_workers = og_func
dist.barrier()
def test_wait_all_workers_dense(self):
self._wait_all_workers(heavy_rpc, torch.ones(100, 100))
def test_wait_all_workers_twice_dense(self):
self._wait_all_workers_twice(heavy_rpc, torch.ones(100, 100))
@dist_init
def test_all_gather(self):
info = rpc.get_worker_info()
results = rpc.api._all_gather(info.id)
expected = {}
for info in rpc._get_current_rpc_agent().get_worker_infos():
expected[info.name] = info.id
self.assertEqual(expected, results)
@dist_init
def test_all_gather_timeout(self):
rpc._set_rpc_timeout(0.1)
if self.rank == 0:
with self.assertRaisesRegex(
RuntimeError, "timed out in _all_gather after 0\\.10 seconds"
):
rpc.api._all_gather(SlowPickleClass(0.5))
else:
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.api._all_gather(SlowPickleClass(0.5))
def _test_barrier_helper(self, info, names, multi_threaded=False):
names = sorted(names)
leader = names[0]
rpc.rpc_sync(leader, _reset_count)
if not multi_threaded and info.name == leader:
self.assertEqual(_rpc_barrier_count, 0)
rpc.api._barrier(names)
rpc.rpc_sync(leader, _increment_count)
rpc.api._barrier(names)
if not multi_threaded and info.name == leader:
self.assertEqual(_rpc_barrier_count, len(names))
@dist_init
def test_rpc_barrier_all(self):
# Test rpc barrier when called with full list of workers
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
names = [worker.name for worker in all_worker_info]
self._test_barrier_helper(info, names)
@dist_init
def test_rpc_barrier_subset(self):
# Test rpc barrier when processes are called with different subsets of the full list
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
if info.id % 2:
names = [worker.name for worker in all_worker_info if worker.id % 2]
else:
names = [worker.name for worker in all_worker_info if not worker.id % 2]
self._test_barrier_helper(info, names)
@dist_init
def test_rpc_barrier_partial_subset(self):
# Test rpc barrier when some processes are not involved in the barrier
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
if info.id % 2:
names = [worker.name for worker in all_worker_info if worker.id % 2]
else:
names = [f"worker{info.id}"]
self._test_barrier_helper(info, names)
@dist_init
def test_rpc_barrier_multithreaded(self):
# This tests validates the implementation of barrier when multiple threads call into it
# We only need to check that it does not hang in this case
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
names = [worker.name for worker in all_worker_info]
threads = []
for _ in range(3):
th = threading.Thread(
target=self._test_barrier_helper, args=(info, names, True)
)
threads.append(th)
th.start()
for th in threads:
th.join()
@dist_init
def test_graceful_shutdown_with_uneven_workload(self):
"""Test graceful termination."""
self._run_uneven_workload(heavy_rpc, torch.ones(100, 100))
@dist_init(setup_rpc=False)
def test_shutdown_followed_by_rpc(self):
# Initialize RPC.
rpc.init_rpc(
name=f"worker{self.rank:d}",
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, torch.ones(n, n) * 2)
rpc.shutdown()
with self.assertRaisesRegex(RuntimeError, "^RPC has not been initialized"):
rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
@dist_init
def test_expected_src(self):
dst_rank = (self.rank + 1) % self.world_size
expected_src_rank = (self.rank - 1) % self.world_size
rpc.rpc_sync(worker_name(dst_rank), set_value, args=(self.rank,))
value = VALUE_FUTURE.result()
self.assertEqual(value, expected_src_rank)
@dist_init
def test_py_built_in(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), min, args=(n, n + 1, n + 2))
self.assertEqual(ret, min(n, n + 1, n + 2))
@dist_init
def test_py_user_defined(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_function,
kwargs={"a": n, "b": n + 1, "c": n + 2},
)
self.assertEqual(ret, my_function(n, n + 1, n + 2))
def test_build_rpc_profiling_key(self):
# Tests that the name that shows up as an Event in profiling RPCs has all
# the necessary information.
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
rpc_profiling_key = _build_rpc_profiling_key(
exec_mode, "foo", "worker0", "worker1"
)
self.assertIn(exec_mode.value, rpc_profiling_key)
self.assertIn("foo", rpc_profiling_key)
self.assertIn("worker0", rpc_profiling_key)
self.assertIn("worker1", rpc_profiling_key)
def check_profiling_info(
self, self_worker_name, dst_worker_name, func, rpc_event, rpc_exec_mode
):
self.assertTrue(self_worker_name in rpc_event.name)
self.assertTrue(dst_worker_name in rpc_event.name)
if isinstance(func, torch.jit.ScriptFunction):
self.assertTrue(torch._jit_internal._qualified_name(func) in rpc_event.name)
else:
self.assertTrue(func.__name__ in rpc_event.name)
self.assertTrue(rpc_exec_mode.value in rpc_event.name)
self.assertEqual(rpc_event.count, 1)
@dist_init
def test_profiler_rpc_record_shapes(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
t1, t2 = torch.ones(100), torch.ones(100)
with _profile(record_shapes=True) as prof:
rpc.rpc_sync(dst_worker, torch.add, args=(t1, t2))
function_events = prof.function_events
remote_events = [event for event in function_events if event.is_remote]
remote_add_event = next(
event for event in remote_events if "aten::add" in event.name
)
remote_add_input_shapes = remote_add_event.input_shapes
# Run profiler on equivalent local op and validate shapes are the same.
with _profile(record_shapes=True) as prof:
torch.add(t1, t2)
local_function_events = prof.function_events
local_add_event = next(
event for event in local_function_events if "aten::add" in event.name
)
local_add_input_shapes = local_add_event.input_shapes
self.assertEqual(remote_add_input_shapes, local_add_input_shapes)
@dist_init
def test_profiler_rpc_memory(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
with _profile(profile_memory=True) as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
fut.wait()
function_events = p.function_events
event_cpu_mem_usages = {event.cpu_memory_usage for event in function_events}
# if cpu_memory_usage was not propagated over the wire, this set would
# only contain 0 (indicates no memory being profiled)
self.assertNotEqual({0}, event_cpu_mem_usages)
# No memory profiled if profile_memory=False
with _profile(profile_memory=False) as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
fut.wait()
function_events = p.function_events
event_cpu_mem_usages = {event.cpu_memory_usage for event in function_events}
self.assertEqual({0}, event_cpu_mem_usages)
@dist_init
def test_profiler_export_trace(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
with _profile() as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
fut.wait()
with TemporaryFileName() as fname:
path = fname
p.export_chrome_trace(path)
with open(path) as f:
trace = json.load(f)
event_names = [event["name"] for event in trace]
for expected_event_name in EXPECTED_REMOTE_EVENTS + [
RPCExecMode.ASYNC.value
]:
event_exists = any(
expected_event_name in event_name for event_name in event_names
)
self.assertTrue(event_exists)
@dist_init
def test_profiler_rpc_key_names(self):
# tests that remote events are properly prefixed with the RPC profiling key.
if self.rank != 1:
return
# Spawn multiple threads that send RPCs to ensure keys are correctly
# prefixed when there are multiple RPCs being created/in flight at the
# same time.
dst_ranks = [rank for rank in range(self.world_size) if rank != self.rank]
def rpc_with_profiling(dst_worker):
with _profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
fut.wait()
events = prof.function_events
remote_event_names = {
event.name: event for event in events if event.is_remote
}
rpc_profiling_key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
udf_with_torch_ops.__qualname__,
worker_name(self.rank),
dst_worker,
)
remote_event_name_set = set(EXPECTED_REMOTE_EVENTS)
for name, event in remote_event_names.items():
# Ensure that we have the expected key as part of the remote
# event.
self.assertTrue(name.startswith(rpc_profiling_key))
self.assertTrue(event.is_remote)
self.assertTrue(event.node_id == rpc.get_worker_info(dst_worker).id)
# Ensure that the remote event name also contains the operator.
operator_name_substr = name[len(rpc_profiling_key) :]
# Note: we don't assert that every remote event needs to be
# in the above set, the set is just a representative set of
# what we expect to see. The profiler can change and add more
# events, but we should always expect to see this representative
# set.
matching_event = {
remote_event_name
for remote_event_name in remote_event_name_set
if remote_event_name in operator_name_substr
}
remote_event_name_set -= matching_event
# The set should be empty, otherwise its contained elements did
# not show up in the remote profiler output.
self.assertTrue(
remote_event_name_set == set(),
f"Expected {remote_event_name_set} to be included in remote profiler output.",
)
for dst in dst_ranks:
dst_worker = worker_name(dst)
num_parallel_rpcs = 2
with concurrent.futures.ThreadPoolExecutor(
max_workers=num_parallel_rpcs
) as executor:
futs = [
executor.submit(rpc_with_profiling, dst_worker)
for _ in range(num_parallel_rpcs)
]
# Wait for workers to finish test
for fut in futs:
fut.result()
def _run_test_profiler_remote_events_profiled(self):
# Tests that we can successfully invoke the profiler on a remote node,
# and collect the remote events back in the local profiler.
if self.rank != 1:
return
dst_ranks = [rank for rank in range(self.world_size) if rank != self.rank]
for dst in dst_ranks:
dst_worker = worker_name(dst)
with _profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
fut.wait()
events = prof.function_events
rpc_event = get_function_event(events, RPCExecMode.ASYNC.value)
self.check_profiling_info(
worker_name(self.rank),
dst_worker,
udf_with_torch_ops,
rpc_event,
RPCExecMode.ASYNC,
)
remote_events = {event.name: event for event in events if event.is_remote}
rpc_profiling_key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
udf_with_torch_ops.__qualname__,
worker_name(self.rank),
worker_name(dst),
)
for expected_remote_event_name in EXPECTED_REMOTE_EVENTS:
expected_key = (
rpc_profiling_key + REMOTE_OP_STR + expected_remote_event_name
)
self.assertTrue(expected_key in remote_events)
remote_event = remote_events[expected_key]
# Remote event should have a node ID corresponding to the worker
# it ran on.
self.assertEqual(remote_event.node_id, dst)
# Validate order remote events show up in profiling output.
def convert_remote_to_local(event_name):
remote_op_key = rpc_profiling_key + REMOTE_OP_STR
return event_name[event_name.find(remote_op_key) + len(remote_op_key) :]
remote_events_list = [
convert_remote_to_local(event.name)
for event in events
if convert_remote_to_local(event.name) in EXPECTED_REMOTE_EVENTS
]
self.assertEqual(
set(remote_events_list),
set(EXPECTED_REMOTE_EVENTS),
f"Mismatch between profiled events: {set(remote_events_list)} and expected events: {set(EXPECTED_REMOTE_EVENTS)}",
)
@dist_init
def test_profiler_remote_events_profiled(self):
self._run_test_profiler_remote_events_profiled()
@dist_init
def test_profiler_remote_events_profiled_single_threaded(self):
self._run_test_profiler_remote_events_profiled()
def run_profiling_workload(self, dst):
fut = rpc.rpc_async(
worker_name(dst),
torch.mul,
args=(
torch.tensor(1.0, requires_grad=True),
torch.tensor(1.0, requires_grad=True),
),
)
fut.wait()
def _run_rpc_profiling_async_function(self, device="cpu"):
if self.rank != 1:
return
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
x = torch.ones(2)
y = torch.ones(2)
with _profile() as prof:
ret = rpc.rpc_async(
dst1, slow_async_add, args=(dst2, x, y, device), timeout=20
)
ret.wait()
function_events = prof.function_events
# slow_async_add resulted in an RPC from dst1 -> dst2, so this should be
# recorded.
key_prefix = _build_rpc_profiling_key(
RPCExecMode.ASYNC, slow_async_add.__qualname__, worker_name(self.rank), dst1
)
nested_rpc_key_prefix = _build_rpc_profiling_key(
RPCExecMode.ASYNC, slow_add.__qualname__, dst1, dst2
)
expected_key = key_prefix + REMOTE_OP_STR + nested_rpc_key_prefix
remote_events = [event for event in function_events if event.is_remote]
rpc_remote_event = [
event for event in remote_events if event.name == expected_key
]
self.assertEqual(1, len(rpc_remote_event))
rpc_remote_event = rpc_remote_event[0]
self.assertEqual(rpc_remote_event.node_id, (self.rank + 1) % self.world_size)
# slow_async_add's RPC does an add on dst2, which should be reflected as well.
remote_add_key = (
expected_key + REMOTE_OP_STR + torch.jit._builtins._find_builtin(torch.add)
)
remote_add_event = [
event for event in remote_events if event.name == remote_add_key
]
self.assertEqual(1, len(remote_add_event))
remote_add_event = remote_add_event[0]
# Validate that node_id is dst2.
self.assertEqual(remote_add_event.node_id, (self.rank + 2) % self.world_size)
@dist_init
def test_rpc_profiling_async_function(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
self._run_rpc_profiling_async_function()
if torch.cuda.is_available():
dist.barrier()
self._run_rpc_profiling_async_function(device="cuda:0")
@dist_init
def test_rpc_profiling_async_function_single_threaded(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
self._run_rpc_profiling_async_function()
if torch.cuda.is_available():
dist.barrier()
self._run_rpc_profiling_async_function(device="cuda:0")
@dist_init
def test_rpc_profiling_remote_record_function(self):
# test that functions run over RPC with record_function show the expected
# profiled block.
if self.rank != 1:
return
dst_ranks = [i for i in range(self.world_size) if i != self.rank]
for dst_rank in dst_ranks:
dst_worker = worker_name(dst_rank)
with _profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=(-1, True))
fut.wait()
function_events = prof.function_events
record_function_remote_event = [
evt for evt in function_events if "##forward##" in evt.name
]
self.assertEqual(1, len(record_function_remote_event))
record_function_remote_event = record_function_remote_event[0]
self.assertEqual(record_function_remote_event.node_id, dst_rank)
# cpu_children only returns direct children, so here we get all
# children recursively.
def get_cpu_children(event):
if not event.cpu_children:
return []
cpu_children = event.cpu_children
for e in event.cpu_children:
cpu_children.extend(get_cpu_children(e))
return cpu_children
remote_children = get_cpu_children(record_function_remote_event)
# Get local children and verify parity.
with _profile() as prof:
udf_with_torch_ops(-1, True)
local_function_events = prof.function_events
local_record_function_event = next(
evt for evt in local_function_events if "##forward##" in evt.name
)
local_children = get_cpu_children(local_record_function_event)
local_children_names = [evt.name for evt in local_children]
REMOTE_OP_STR = "#remote_op: "
def convert_remote_to_local(event_name):
remote_op_key = REMOTE_OP_STR
return event_name[event_name.find(remote_op_key) + len(remote_op_key) :]
for evt in remote_children:
local_name = convert_remote_to_local(evt.name)
self.assertTrue(local_name in local_children_names)
def validate_profiling_workload(self, dst, prof):
def convert_remote_to_local(event_name):
return event_name[event_name.find(REMOTE_OP_STR) + len(REMOTE_OP_STR) :]
events = prof.function_events
remote_events = {
convert_remote_to_local(event.name): event
for event in events
if event.is_remote
}
self.assertTrue("aten::mul" in remote_events)
remote_mul_event = remote_events["aten::mul"]
self.assertEqual(remote_mul_event.node_id, dst)
self.check_profiling_info(
worker_name(self.rank),
worker_name(dst),
torch.mul,
remote_mul_event,
RPCExecMode.ASYNC,
)
def _run_test_profiler_with_autograd_context(self):
dst = (self.rank + 1) % self.world_size
if self.rank == 1:
# Cases where we can double wrap messages with profiling information and autograd info.
with dist_autograd.context(), _profile() as prof:
self.run_profiling_workload(dst)
self.validate_profiling_workload(dst, prof)
# Ensure that flipped order of ctx managers results in events being
# recorded as expected.
with _profile() as prof, dist_autograd.context():
self.run_profiling_workload(dst)
self.validate_profiling_workload(dst, prof)
@dist_init
def test_profiler_with_autograd_context_single_threaded(self):
self._run_test_profiler_with_autograd_context()
@dist_init
def test_profiler_with_autograd_context(self):
self._run_test_profiler_with_autograd_context()
def _profiler_test_with_rpc(
self,
rpc_exec_mode,
func,
args,
use_record_function=False,
dst=None,
kineto_profile=False,
):
dst = dst if dst is not None else (self.rank + 1) % self.world_size
# only run profiler on rank 1.
p = _profile if not kineto_profile else torch.profiler.profile # kineto
if self.rank == 1:
with p() as prof:
record_function_ctx_mgr = (
contextlib.nullcontext()
if not use_record_function
else torch.autograd.profiler.record_function("foo")
)
with record_function_ctx_mgr:
if rpc_exec_mode == RPCExecMode.SYNC:
rpc.rpc_sync(worker_name(dst), func, args=args)
elif rpc_exec_mode == RPCExecMode.ASYNC:
fut = rpc.rpc_async(worker_name(dst), func, args=args)
if kineto_profile:
# Ensure multiple async RPCs don't cause issues.
# Would have raised
# "RuntimeError: Cannot call
# RemoteProfilerManager::setCurrentKey when current
# key is already set." error if RPC profiling was
# not disabled properly for kineto.
fut2 = rpc.rpc_async(worker_name(dst), func, args=args)
fut2.wait()
fut.wait()
else:
self.assertTrue(rpc_exec_mode == RPCExecMode.REMOTE)
rref = rpc.remote(worker_name(dst), func, args=args)
rref.to_here()
# To avoid flakiness, wait for the RRef to be profiled. This
# means that we received the acknowledgement of successful
# creation on the owner and ran the callbacks responsible
# for recording the profiling event.
rref._get_profiling_future().wait()
events = prof.function_events if not kineto_profile else prof.events()
if kineto_profile:
# RPC profiling is disabled so there should be no rpc related
# events.
with self.assertRaises(IndexError):
get_function_event(events, rpc_exec_mode.value)
return
rpc_event = get_function_event(events, rpc_exec_mode.value)
# verify Node ID for this rpc event.
self.assertEqual(rpc_event.node_id, self.rank)
# Ensure recording of remote events.
remote_events = {event for event in events if event.node_id == dst} - {
rpc_event
}
self.assertGreaterEqual(len(remote_events), 1)
for remote_event in remote_events:
self.assertEqual(remote_event.node_id, dst)
if use_record_function:
scope_event = get_function_event(events, "foo")
# Since RPC call is within the scope, its CPU interval should be
# contained within foo's interval.
self.assertLessEqual(
scope_event.time_range.start, rpc_event.time_range.start
)
self.assertGreaterEqual(
scope_event.time_range.end, rpc_event.time_range.end
)
# the sender, dest worker, function run, and type of RPC should all
# be recorded.
self_worker_name = worker_name(self.rank)
dst_worker_name = worker_name(dst)
self.check_profiling_info(
self_worker_name, dst_worker_name, func, rpc_event, rpc_exec_mode
)
if use_record_function:
# verify order by ensuring that the outer context comes
# before the rpc event.
foo_event_ix = next(
i for i, event in enumerate(events) if "foo" in event.name
)
rpc_event_idx = next(
i
for i, event in enumerate(events)
if rpc_exec_mode.value in event.name
)
self.assertLess(foo_event_ix, rpc_event_idx)
def _run_test_profiler_with_sync_rpc_udf(self):
self._profiler_test_with_rpc(RPCExecMode.SYNC, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(
RPCExecMode.SYNC, my_sleep_func, args=(1,), use_record_function=True
)
@dist_init
def test_profiler_with_sync_rpc_udf(self):
self._run_test_profiler_with_sync_rpc_udf()
@dist_init
def test_profiler_with_sync_rpc_udf_single_threaded(self):
self._run_test_profiler_with_sync_rpc_udf()
def _run_test_profiler_with_sync_rpc_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.SYNC, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.SYNC,
torch.mul,
args=(torch.ones(1), torch.ones(1)),
use_record_function=True,
)
@dist_init
def test_profiler_with_sync_rpc_builtin(self):
self._run_test_profiler_with_sync_rpc_builtin()
@dist_init
def test_profiler_with_sync_rpc_builtin_single_threaded(self):
self._run_test_profiler_with_sync_rpc_builtin()
def _run_test_profiler_with_async_rpc_udf(self):
self._profiler_test_with_rpc(RPCExecMode.ASYNC, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, my_sleep_func, args=(1,), use_record_function=True
)
# Test to ensure that kineto profiler enabled in RPC does not enable
# RPC profiling (it is unsupported) and does not result in issues.
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, my_sleep_func, args=(1,), kineto_profile=True
)
@dist_init
def test_profiler_with_async_rpc_udf(self):
self._run_test_profiler_with_async_rpc_udf()
@dist_init
def test_profiler_with_async_rpc_udf_single_threaded(self):
self._run_test_profiler_with_async_rpc_udf()
def _run_test_profiler_with_async_rpc_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.ASYNC,
torch.mul,
args=(torch.ones(1), torch.ones(1)),
use_record_function=True,
)
@dist_init
def test_profiler_with_async_rpc_builtin(self):
self._run_test_profiler_with_async_rpc_builtin()
@dist_init
def test_profiler_with_async_rpc_builtin_single_threaded(self):
self._run_test_profiler_with_async_rpc_builtin()
def _run_test_profiler_with_remote_udf(self):
self._profiler_test_with_rpc(RPCExecMode.REMOTE, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_sleep_func, args=(1,), use_record_function=True
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_sleep_func, args=(1,), dst=self.rank
)
@dist_init
def test_profiler_with_remote_udf(self):
self._run_test_profiler_with_remote_udf()
@dist_init
def test_profiler_with_remote_udf_single_threaded(self):
self._run_test_profiler_with_remote_udf()
def _run_test_profiler_with_remote_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.REMOTE,
torch.mul,
args=(torch.ones(1), torch.ones(1)),
use_record_function=True,
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE,
torch.mul,
args=(torch.ones(1), torch.ones(1)),
dst=self.rank,
)
@dist_init
def test_profiler_with_remote_builtin(self):
self._run_test_profiler_with_remote_builtin()
@dist_init
def test_profiler_with_remote_builtin_single_threaded(self):
self._run_test_profiler_with_remote_builtin()
def _run_test_profiler_with_script_async_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.ASYNC,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
@dist_init
def test_profiler_with_script_async_rpc(self):
self._run_test_profiler_with_script_async_rpc()
@dist_init
def test_profiler_with_script_async_rpc_single_threaded(self):
self._run_test_profiler_with_script_async_rpc()
def _run_test_profiler_with_script_sync_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.SYNC, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.SYNC,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
@dist_init
def test_profiler_with_script_sync_rpc(self):
self._run_test_profiler_with_script_sync_rpc()
@dist_init
def test_profiler_with_script_sync_rpc_single_threaded(self):
self._run_test_profiler_with_script_sync_rpc()
def _run_test_profiler_with_script_remote_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.REMOTE,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_script_func, args=(torch.tensor(1),), dst=self.rank
)
@dist_init
def test_profiler_with_script_remote_rpc(self):
self._run_test_profiler_with_script_remote_rpc()
@dist_init
def test_profiler_with_script_remote_rpc_single_threaded(self):
self._run_test_profiler_with_script_remote_rpc()
def _assert_top_level_events(
self, process_global_events, expected_top_level_event_names
):
top_level_event_names = []
for thread_local_events in process_global_events:
# Get top-level events from all events happened on a thread.
last_end_time = 0
for event in thread_local_events:
event_name = event.name
time_range = event.time_range
if time_range.start > last_end_time:
top_level_event_names.append(event_name)
last_end_time = time_range.end
top_level_event_names = sorted(top_level_event_names)
expected_top_level_event_names = sorted(expected_top_level_event_names)
self.assertEqual(
top_level_event_names,
expected_top_level_event_names,
f"Expected events {expected_top_level_event_names}, but got {top_level_event_names}",
)
@dist_init
def test_server_process_global_profiler(self):
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker_name = worker_name(dst_rank)
x = torch.tensor(1)
y = torch.tensor(2)
outer_profile_rref = rpc.remote(
dst_worker_name, rpc._server_process_global_profile
)
outer_profile_rref.rpc_sync().__enter__()
rpc.rpc_sync(dst_worker_name, torch.add, (x, y))
inner_profile_rref = rpc.remote(
dst_worker_name, rpc._server_process_global_profile
)
inner_profile_rref.rpc_sync().__enter__()
rpc.rpc_sync(dst_worker_name, torch.sub, (x, y))
inner_profile_rref.rpc_sync().__exit__(None, None, None)
outer_profile_rref.rpc_sync().__exit__(None, None, None)
inner_events = rpc.rpc_sync(
dst_worker_name, get_events_from_profile, (inner_profile_rref,)
)
expected_inner_events = ["aten::sub"]
expected_outer_events = expected_inner_events + ["aten::add"]
self._assert_top_level_events(inner_events, expected_inner_events)
outer_events = rpc.rpc_sync(
dst_worker_name, get_events_from_profile, (outer_profile_rref,)
)
self._assert_top_level_events(outer_events, expected_outer_events)
inner_profile_rref.rpc_sync().key_averages()
outer_profile_rref.rpc_sync().key_averages()
@dist_init
def test_async_record_function_double_end_callbacks(self):
num_sleep_seconds = 1
if self.rank == 1:
# Validate that calling the function twice results in an error.
with _profile():
with torch.autograd.profiler.record_function("foo") as rf:
fut = rpc.rpc_async(
worker_name(0), my_sleep_func, args=(num_sleep_seconds,)
)
rf._call_end_callbacks_on_future(fut)
with self.assertRaisesRegex(
RuntimeError, "can only be called once."
):
rf._call_end_callbacks_on_future(fut)
fut.wait()
@dist_init
def test_async_record_function_legacy(self):
# Test the legacy _record_function ops work
# Note: These exist for backward compatibility with TorchScript
num_sleep_seconds = 1
if self.rank == 1:
with _profile():
try:
handle = torch.ops.profiler._record_function_enter("foo", None)
fut = rpc.rpc_async(
worker_name(0), my_sleep_func, args=(num_sleep_seconds,)
)
torch.ops.profiler._call_end_callbacks_on_jit_fut(handle, fut)
finally:
torch.ops.profiler._record_function_exit(handle)
fut.wait()
@dist_init
def test_async_record_function_cbs_jit_call(self):
if self.rank == 1:
with _profile() as pf:
key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
torch._jit_internal._qualified_name(my_script_func),
"worker1",
"worker0",
)
with torch.autograd.profiler.record_function(key) as rf:
fut = rpc.rpc_async(
worker_name(0), my_script_func, args=(torch.tensor(1),)
)
# Intentionally calling record_function internals
fut = torch.ops.profiler._call_end_callbacks_on_jit_fut(
rf.record, fut
)
result = fut.wait()
# Validate that the profiling future returns the same value as the RPC
# future.
expected = torch.add(torch.tensor(1), torch.tensor(1))
self.assertEqual(result, expected)
events = pf.function_events
rpc_event = get_function_event(
events, torch._jit_internal._qualified_name(my_script_func)
)
self.assertTrue(
torch._jit_internal._qualified_name(my_script_func) in rpc_event.name
)
@dist_init
def test_py_class_constructor(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), MyClass, args=(n,))
self.assertEqual(ret.a, n)
@dist_init
def test_py_class_instance_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass(2).my_instance_method, args=(n,)
)
self.assertEqual(ret, MyClass(2).my_instance_method(n))
@dist_init
def test_py_class_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass.my_class_method, args=(n, n + 1)
)
self.assertEqual(ret, MyClass.my_class_method(n, n + 1))
@dist_init
def test_py_class_static_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass.my_static_method, args=(n + 10,)
)
self.assertEqual(ret, MyClass.my_static_method(n + 10))
@dist_init
def test_py_multi_async_call(self):
n = self.rank + 1
dst_rank = n % self.world_size
dst_worker_info = rpc.get_worker_info(worker_name(dst_rank))
fut1 = rpc.rpc_async(dst_worker_info, MyClass.my_static_method, args=(n + 10,))
fut2 = rpc.rpc_async(dst_worker_info, min, args=(n, n + 1, n + 2))
self.assertEqual(fut1.wait(), MyClass.my_static_method(n + 10))
self.assertEqual(fut2.wait(), min(n, n + 1, n + 2))
@dist_init
def test_py_no_return_result(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), no_result)
self.assertEqual(ret, no_result())
@dist_init
def test_py_tensors(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_tensor_function,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, my_tensor_function(torch.ones(n, n), torch.ones(n, n)))
@dist_init
def test_py_tensors_multi_async_call(self):
futs = []
n = self.rank + 1
dst_rank = n % self.world_size
for i in range(100):
fut = rpc.rpc_async(
worker_name(dst_rank),
my_tensor_function,
args=(torch.ones(i, i), torch.ones(i, i)),
)
futs.append(fut)
for j, val in enumerate(torch.futures.wait_all(futs)):
self.assertEqual(
val, my_tensor_function(torch.ones(j, j), torch.ones(j, j))
)
@dist_init
def test_py_tensors_in_container(self):
n = self.rank + 1
dst_rank = n % self.world_size
a = [torch.ones(n, n), torch.ones(n, n)]
b = TensorClass(build_complex_tensors())
c = {"foo": torch.ones(n, n), "bar": torch.ones(n, n)}
ret = rpc.rpc_sync(
worker_name(dst_rank), my_complex_tensor_function, args=(a, b, c)
)
self.assertEqual(ret, my_complex_tensor_function(a, b, c))
@dist_init
def test_py_nested_pickle(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
run_nested_pickle,
args=(MyPickleClass(), torch.ones(2, 2)),
)
m = MyPickleClass()
m.set(my_tensor_function(torch.ones(2, 2), torch.ones(2, 2)))
self.assertEqual(ret, run_nested_pickle(m, torch.ones(2, 2)))
@dist_init
def test_py_function_exception(self):
n = self.rank + 1
dst_rank = n % self.world_size
with self.assertRaises(TypeError):
rpc.rpc_sync(worker_name(dst_rank), no_result, args=(10,))
@dist_init
def test_py_raise_in_user_func(self):
with captured_output() as (_, err):
# This barrier prevents a race condition where the main thread has
# not entered the context manager when the remote function runs.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(worker_name(dst_rank), raise_func)
with self.assertRaisesRegex(ValueError, expected_err):
fut.wait()
# This barrier prevents a race condition where the main thread exits
# context manager before the remote function has ran.
dist.barrier()
# Validate that trainers log errors when running functions.
stderr_lines = err.getvalue()
self.assertTrue(expected_err in stderr_lines)
@dist_init
def test_py_raise_in_user_func_escaped_str(self):
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(worker_name(dst_rank), raise_func_escape)
try:
fut.wait()
except ValueError as e:
msg = str(e)
# Ensure newlines are unescaped to provide a better repr of error.
self.assertEqual(msg, msg.encode("utf-8").decode("unicode_escape"))
else:
self.assertTrue(False, "expected raise_func_escape to raise ValueError.")
@dist_init
def test_nested_rpc(self):
self._nested_rpc(nested_rpc, torch.ones(2, 2) + 1)
@dist_init
def test_stress_light_rpc(self):
self._stress_test_rpc(light_rpc)
@dist_init
def test_stress_heavy_rpc(self):
self._stress_test_rpc(heavy_rpc, repeat=20, args=(torch.ones(100, 100),))
@dist_init
def test_stress_heavy_rpc_torchscript(self):
self._stress_test_rpc(
heavy_rpc_torchscript, repeat=20, args=(torch.ones(100, 100),)
)
@dist_init
def test_builtin_remote_ret(self):
self._builtin_remote_ret(
torch.ones(2, 2), torch.ones(2, 2), torch.ones(2, 2) * 2
)
@dist_init
def test_builtin_remote_self(self):
self._builtin_remote_self(
torch.ones(2, 2), torch.ones(2, 2), torch.ones(2, 2) * 2
)
@staticmethod
def _multi_args_fn(n, sparse=False):
if sparse:
return (build_sparse_tensor(), build_sparse_tensor())
else:
return (torch.ones(n, n), torch.ones(n, n))
@dist_init
def test_multi_builtin_remote_ret(self):
self._test_multi_remote_call(torch.add, False, args_fn=RpcTest._multi_args_fn)
@dist_init
def test_py_udf_remote(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
my_function,
kwargs={"a": n, "b": n + 1, "c": n + 2},
)
self.assertEqual(rref.to_here(), my_function(n, n + 1, n + 2))
@staticmethod
def _multi_kwargs_fn(n, sparse=False):
if sparse:
return {
"a": build_sparse_tensor(),
"b": build_sparse_tensor(),
"c": build_sparse_tensor(),
}
else:
return {"a": torch.ones(n, n), "b": torch.ones(n, n), "c": torch.ones(n, n)}
@dist_init
def test_multi_py_udf_remote(self):
self._test_multi_remote_call(
my_function, False, kwargs_fn=RpcTest._multi_kwargs_fn
)
@dist_init
def test_py_rref_args(self):
self._py_rref_args(
torch.ones(2, 2), 1, torch.ones(2, 2), 2, torch.ones(2, 2) * 2 + 3
)
@dist_init
def test_py_rref_args_user_share(self):
self._py_rref_args_user_share(
torch.ones(2, 2), 1, 2, torch.ones(2, 2), 3, 4, torch.ones(2, 2) * 2 + 10
)
@dist_init
def test_py_rpc_rref_args(self):
self._py_rpc_rref_args(
torch.ones(2, 2), 1, 2, torch.ones(2, 2), 3, 4, torch.ones(2, 2) * 2 + 10
)
@dist_init
def test_nested_remote(self):
self._nested_remote(nested_remote, torch.ones(2, 2) + 3)
@dist_init
def test_nested_rref(self):
self._nested_rref(nested_rref, torch.ones(2, 2) + 1, torch.ones(2, 2) + 2)
@dist_init
def test_nested_rref_stress(self):
self._nested_rref_stress(
nested_rref, torch.ones(2, 2) + 1, torch.ones(2, 2) + 2
)
@dist_init
def test_multi_layer_nested_async_rpc(self):
# This test will exit right away, but there will be a chain of async
# RPCs. The termination algorithm should detect those messages properly.
# Otherwise, some peer could exit early, leaving others to timeout
# errors or connection closed errors.
ttl = 20
n = self.rank + 1
dst_rank = n % self.world_size
multi_layer_nested_async_rpc(dst_rank, self.world_size, ttl)
@dist_init
def test_remote_with_exception(self):
n = self.rank + 1
dst_rank = n % self.world_size
# check ref to other workers
rref = rpc.remote(worker_name(dst_rank), raise_func)
with self.assertRaises(ValueError):
rref.to_here()
# check ref to itself
rref = rpc.remote(worker_name(self.rank), no_result, args=(10,))
with self.assertRaises(TypeError):
rref.to_here()
@dist_init
def test_rpc_return_rref(self):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref = rpc.rpc_sync(
worker_name(dst_rank1),
rpc_return_rref,
args=(worker_name(dst_rank2),),
)
self.assertEqual(rref.to_here(), torch.ones(2, 2) + 1)
@dist_init
def test_rref_forward_chain(self):
ttl = 8
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1))
ret_rref = rref_forward_chain(dst_rank, self.world_size, rref, ttl)
for _ in range(ttl):
self.assertEqual(len(ret_rref), 1)
ret_rref = ret_rref[0].to_here()
ret = ret_rref
self.assertEqual(ret, torch.add(torch.ones(n, n), 1))
@dist_init
def test_local_rref_no_fork(self):
local_rref = RRef(35)
self.assertEqual(local_rref.local_value(), 35)
@dist_init
def test_local_value_not_on_owner(self):
# ensure that an error message is thrown if a user tries to call
# local_value() on a non-owning node.
next_rank = (self.rank + 1) % self.world_size
rref = rpc.remote(
worker_name(next_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
with self.assertRaisesRegex(
RuntimeError,
(
rf"For UserRRef\(rref_id=GloballyUniqueId\(created_on={self.rank}, local_id=0\), "
rf"fork_id=GloballyUniqueId\(created_on={self.rank}, local_id=1\)\), "
r"can't call localValue\(\) on user "
rf"WorkerInfo\(id={self.rank}, name={worker_name(self.rank)}\). "
rf"Call it on owner WorkerInfo\(id={next_rank}, name={worker_name(next_rank)}\)"
),
):
rref.local_value()
@dist_init
def test_return_local_rrefs(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_list = rpc.rpc_sync(
worker_name(dst_rank), get_rref_list, args=([1, 2, 3],)
)
for rref in rref_list:
rpc.rpc_sync(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, 10),
)
rets = [
rpc.rpc_sync(
rref.owner(), _call_method_on_rref, args=(MyClass.get_value, rref)
)
for rref in rref_list
]
self.assertEqual(rets, [11, 12, 13])
@dist_init
def _test_rref_type(self, blocking):
def launched_rpc(events):
expected_name = f"rpc_{RPCExecMode.ASYNC.value}#_rref_typeof_on_owner"
return any(e.name.startswith(expected_name) for e in events)
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, torch.add, args=(torch.ones(2), 1))
with _profile() as p:
t = rref._get_type(blocking=blocking)
if not blocking:
t = t.wait()
self.assertTrue(launched_rpc(p.function_events))
expected_type = type(torch.ones(2))
self.assertEqual(t, expected_type)
futs = []
def verify(fut):
self.assertEqual(fut.value(), expected_type)
with _profile() as p:
for _ in range(10):
t = rref._get_type(blocking=blocking)
if not blocking:
futs.append(t)
t.add_done_callback(verify)
t = t.wait()
self.assertEqual(t, expected_type)
if not blocking:
# Note that cached calls with blocking=False all return the same
# cached original future.
first_fut = futs[0]
for f in futs[1:]:
self.assertTrue(f is first_fut)
# Ensure we never launch another RPC, other than for the very
# first call.
self.assertFalse(launched_rpc(p.function_events))
self.assertEqual(t, type(torch.ones(2)))
rref = rpc.remote(dst, MyClass, args=(0,))
rref_type = rref._get_type(blocking=blocking)
if not blocking:
rref_type = rref_type.wait()
self.assertEqual(rref_type, MyClass)
def test_rref_type_blocking(self):
self._test_rref_type(blocking=True)
def test_rref_type_non_blocking(self):
self._test_rref_type(blocking=False)
@dist_init
def _test_rref_type_with_error(self, blocking):
dst = worker_name((self.rank + 1) % self.world_size)
# 10 ms timeout
rref = rpc.remote(dst, raise_func)
# Blocking: error raised inline
if blocking:
with self.assertRaisesRegex(ValueError, "Expected error"):
rref._get_type(blocking=blocking)
else:
# Non-blocking: Immediately return future, block on wait
fut = rref._get_type(blocking=blocking)
with self.assertRaisesRegex(ValueError, "Expected error"):
fut.wait()
def test_rref_type_with_error_blocking(self):
self._test_rref_type_with_error(blocking=True)
def test_rref_type_with_error_non_blocking(self):
self._test_rref_type_with_error(blocking=False)
@dist_init
def _test_rref_type_owner(self, blocking):
rref = RRef(torch.ones(2) + 1)
rref_type = rref._get_type(blocking=blocking)
if not blocking:
rref_type = rref_type.wait()
self.assertEqual(rref_type, type(torch.ones(2)))
rref = RRef(MyClass(0))
rref_type = rref._get_type(blocking=blocking)
if not blocking:
rref_type = rref_type.wait()
self.assertEqual(rref_type, MyClass)
def test_rref_type_owner_blocking(self):
self._test_rref_type_owner(blocking=True)
def test_rref_type_owner_non_blocking(self):
self._test_rref_type_owner(blocking=False)
@staticmethod
def _slow_add(x, y):
time.sleep(1)
return x + y
@dist_init
def test_rref_type_slow_init(self):
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, RpcTest._slow_add, args=(torch.ones(2), 1))
self.assertEqual(rref._get_type(), type(torch.ones(2)))
@dist_init
def test_owner_equality(self):
a = RRef(40)
b = RRef(50)
other_rank = (self.rank + 1) % self.world_size
other_a = rpc.remote(
worker_name(other_rank), torch.add, args=(torch.ones(1), 1)
)
other_b = rpc.remote(
worker_name(other_rank), torch.add, args=(torch.ones(1), 1)
)
other_a.to_here() # to ensure clean termination
other_b.to_here()
self.assertNotEqual(a.owner(), 23)
self.assertEqual(other_a.owner(), other_b.owner())
self.assertNotEqual(a.owner(), other_a.owner())
self.assertEqual(other_a.owner(), other_a.owner())
self.assertEqual(other_a.owner(), other_b.owner())
self.assertEqual(a.owner(), a.owner())
self.assertEqual(a.owner(), b.owner())
self.assertEqual(a.owner(), rpc.get_worker_info())
x = {}
x[a.owner()] = a
x[other_a.owner()] = other_a
self.assertEqual(x[a.owner()], a)
self.assertEqual(x[b.owner()], a)
self.assertEqual(x[other_a.owner()], other_a)
self.assertEqual(x[other_b.owner()], other_a)
self.assertEqual(len(x), 2)
@dist_init
def test_pass_local_rrefs(self):
n = self.rank + 1
dst_rank = n % self.world_size
dst_worker = worker_name(dst_rank)
rref = RRef(40)
self.assertEqual(
rpc.rpc_sync(dst_worker, add_rref_to_value, args=(rref, 50)), 90
)
self.assertEqual(
rpc.rpc_async(dst_worker, add_rref_to_value, args=(rref, 50)).wait(), 90
)
self.assertEqual(
rpc.remote(dst_worker, add_rref_to_value, args=(rref, 50)).to_here(), 90
)
@dist_init
def test_remote_same_worker(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 2)
)
rref_b = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1)
)
rref_c = rpc.remote(
worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), torch.ones(n, n) + 4)
@dist_init(setup_rpc=True)
def test_call_method_on_rref(self):
"""
Tests that it is possible to call an instance method on a remote object
by using rref.owner() as destination of the call.
"""
vals = [10, 2, 5, 7]
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
# creates a remote object
rref = rpc.remote(dst_worker, MyClass, args=(vals[0],))
# modifies state of the remote object
rpc.rpc_sync(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[1]),
)
rpc.rpc_async(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[2]),
).wait()
rpc.remote(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[3]),
).to_here()
# queries state of the remote object
result = rpc.rpc_sync(
dst_worker, _call_method_on_rref, args=(MyClass.get_value, rref)
)
self.assertEqual(result, sum(vals))
# Notice `rpc.api.shutdown()` accesses
# `_delete_all_user_and_unforked_owner_rrefs` through
# `torch.distributed.rpc.api`, so patching
# `torch.distributed.rpc._delete_all_user_and_unforked_owner_rrefs` will
# not help.
@mock.patch.object(
torch.distributed.rpc.api, "_delete_all_user_and_unforked_owner_rrefs"
)
def _test_rref_leak(
self, _mock_delete_all_user_and_unforked_owner_rrefs, ignore_leak
):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Wait for all init to complete.
dist.barrier()
rref = rpc.remote( # noqa: F841
worker_name((self.rank + 1) % self.world_size),
torch.add,
args=(torch.ones(2, 2), 1),
)
import torch.distributed.rpc.api as api
if ignore_leak:
api._ignore_rref_leak = True
rpc.shutdown(graceful=True)
else:
api._ignore_rref_leak = False
with self.assertRaisesRegex(RuntimeError, "Leaking RRef"):
rpc.shutdown(graceful=True)
@dist_init(setup_rpc=False)
def test_rref_leak(self):
self._test_rref_leak(ignore_leak=False)
@dist_init(setup_rpc=False)
def test_ignore_rref_leak(self):
self._test_rref_leak(ignore_leak=True)
@dist_init
def test_rref_str(self):
rref1 = RRef(self.rank)
id_class = "GloballyUniqueId"
self.assertEqual(
f"OwnerRRef({id_class}(created_on={self.rank}, local_id=0))",
rref1.__str__(),
)
dst_rank = (self.rank + 1) % self.world_size
rref2 = rpc.remote(worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(
rref2.__str__(),
f"UserRRef(RRefId = {id_class}(created_on={self.rank}, local_id=1), "
f"ForkId = {id_class}(created_on={self.rank}, local_id=2))",
)
@dist_init
def test_rref_get_future(self):
# Tests that we can obtain the future corresponding to the creation of
# the RRef on remote end
if self.rank == 0:
# Builtin
rref = rpc.remote(worker_name(1), torch.add, args=(1, 1))
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
# UDF
rref = rpc.remote(worker_name(1), foo_add, args=())
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
# Script
rref = rpc.remote(worker_name(1), my_script_func, args=(torch.tensor(1),))
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
@dist_init
def test_rref_context_debug_info(self):
# This test checks local states that are modified by remote workers.
# This means that we would need barrier before and after every check.
# The barrier before the check makes sure that all previous states are
# cleared globally, the barrier after ensures that no following states
# change gets into the current check.
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Check 1: local RRef does not update owners_ map or add a pending user.
#################################################
rref1 = RRef(self.rank)
# don't need a barrier here as local RRef is handled by this thread
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertIn("num_pending_users", info)
# RRef on local value is not added to context until shared across RPC
self.assertEqual(0, int(info["num_owner_rrefs"]))
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after the check 1
dist.barrier()
# Check 2: Sharing RRef as an arg should update owners_ map
###########################################################
dst_rank = (self.rank + 1) % self.world_size
rpc.rpc_sync(worker_name(dst_rank), set_global_rref, args=(rref1,))
# barrier before check 2
wait_until_pending_futures_and_users_flushed()
dist.barrier()
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertEqual(1, int(info["num_owner_rrefs"]))
# no pending users since the fork is finished
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after check 2
dist.barrier()
# clear states for check 2
rpc.rpc_sync(worker_name(dst_rank), clear_global_rref)
# Wait for owner rref to be cleared.
while int(info["num_owner_rrefs"]) != 0:
info = _rref_context_get_debug_info()
time.sleep(0.1)
dist.barrier()
# Check 3: rpc.remote call should update owners_ map
####################################################
rref2 = rpc.remote(worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1))
rref3 = rpc.remote(worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1))
rref2.to_here()
rref3.to_here()
# barrier before check 3
wait_until_pending_futures_and_users_flushed()
dist.barrier()
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertEqual(2, int(info["num_owner_rrefs"]))
# no pending users since the fork is finished
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after check 3
dist.barrier()
@dist_init
def test_disable_gil_profiling(self):
# test that rpc.enable_gil_profiling(false) will result in
# GIL wait time not being recorded.
# GIL profiling should be disabled by default.
dst_rank = (self.rank + 1) % self.world_size
rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertRaises(KeyError, lambda: info["agent.gil_average_wait_time_us"])
rpc.enable_gil_profiling(True)
rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertIn("agent.gil_average_wait_time_us", info)
@dist_init(setup_rpc=False)
def test_local_shutdown(self):
# test that we can start RPC and then immediately locally shutdown
# without sending any messages.
rpc.init_rpc(
name=f"worker{self.rank:d}",
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
# pass in graceful=False to ensure that we don't wait for other workers.
rpc.shutdown(graceful=False)
@dist_init
def test_debug_info(self):
# only test keys in this test case. Values should be covered by
# individual module debug info tests
import torch.distributed.autograd as dist_autograd
info = _get_debug_info()
rref_info = _rref_context_get_debug_info()
agent_info = rpc.api._get_current_rpc_agent().get_debug_info()
autograd_info = dist_autograd._get_debug_info()
common_keys = rref_info.keys() & agent_info.keys() & autograd_info.keys()
self.assertEqual(0, len(common_keys))
expected = {}
expected.update(rref_info)
expected.update(agent_info)
expected.update(autograd_info)
# NB: Key ordering is only preserved in python 3.6+. So here, we
# manually check keys are equal.
for key in expected:
self.assertIn(key, info.keys())
for key in info:
self.assertIn(key, expected.keys())
@dist_init(setup_rpc=False)
@skip_but_pass_in_sandcastle_if(
IS_MACOS,
"Test is flaky on MacOS since libuv error handling is not as robust as TCP",
)
def test_handle_send_exceptions(self):
# test that if a callee node has gone down, we raise an appropriate
# exception instead of just crashing.
rpc.init_rpc(
name=f"worker{self.rank:d}",
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc._set_rpc_timeout(10)
# This barrier is needed to ensure that some workers do not exit before
# others have been brought up.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
if self.rank == 1:
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
# allow destination worker to exit without joining
error_str = self.get_shutdown_error_regex()
wait_until_node_failure(dst_rank, error_str)
fut = rpc.rpc_async(dst_worker, torch.add, args=(torch.ones(1), 3))
# Shutdown sequence is not very well defined and as a result
# we can see any of the error messages defined in get_shutdown_error_regex.
with self.assertRaisesRegex(RuntimeError, error_str):
fut.wait()
# exit all workers non-gracefully.
rpc.shutdown(graceful=False)
@dist_init
def test_deadlock(self):
# this test is copied from https://github.com/pytorch/pytorch/issues/45089
if self.rank == 1:
dst1 = worker_name((self.rank + 1) % self.world_size)
x = torch.ones(2)
y = torch.ones(2)
rpc.rpc_async(dst1, RpcTest._slow_add, args=(x, y), timeout=15).wait()
dist_initialized = dist.is_initialized()
if not dist_initialized:
dist.init_process_group(
backend="gloo",
init_method=self.file_init_method,
rank=self.rank,
world_size=self.world_size,
)
@dist_init(setup_rpc=False)
def test_local_shutdown_with_rpc(self):
# test that we can start RPC, send RPCs, and then run local shutdown.
rpc.init_rpc(
name=f"worker{self.rank:d}",
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
# A barrier is needed to ensure that all RPCs are processed.
# Otherwise, some RPCs can timeout since the receiving end
# has terminated.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
# pass in graceful=False to ensure that we don't wait for other workers.
rpc.shutdown(graceful=False)
@dist_init(setup_rpc=False)
def test_set_and_get_default_rpc_timeout(self):
timeout = 0.5
# A new `RpcBackendOptions` is constructed
# when accessing `self.rpc_backend_options`.
rpc_backend_options = self.rpc_backend_options
rpc_backend_options.rpc_timeout = timeout
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
set_timeout = rpc.get_rpc_timeout()
self.assertEqual(timeout, set_timeout)
rpc.shutdown()
@dist_init
def test_default_timeout_used(self):
"""
Tests that if no timeout is passed into rpc_async and rpc_sync, then the
default timeout is used.
"""
dst_rank = (self.rank + 1) % self.world_size
rpc._set_rpc_timeout(0.001) # 1 ms
# futures should time out and be marked with an exception indicating it as such.
futs = [
rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=())
for _ in range(10)
]
expected_error = self.get_timeout_error_regex()
for fut in futs:
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# ensure that if a new timeout is set old futures don't time out but new ones do.
rpc._set_rpc_timeout(200) # 200 seconds
# create a longstanding RPC.
fut1 = rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=(1,))
# now, set a short timeout.
rpc._set_rpc_timeout(0.001)
# fut2 should time out, fut1 should not.
fut2 = rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=(1,))
with self.assertRaisesRegex(RuntimeError, expected_error):
fut2.wait()
fut1.wait()
# Zero timeout means infinity, so future should run to completion.
rpc._set_rpc_timeout(0)
rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=()).wait()
# reset to default timeout so shutdown messages can process cleanly.
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init
def test_rpc_timeouts(self):
# TODO: enable timeouts for rpc.remote/RRef (https://github.com/pytorch/pytorch/issues/33803)
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
timeout = 0.1 # 100 ms
expected_error = self.get_timeout_error_regex()
# Test async UDF
fut = rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=timeout)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if there is no timeout and we use the default
# RPC timeout.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,)).wait()
# Test sync UDF
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=timeout)
# Ensure run to completion if there is no timeout and we use the default
# RPC timeout.
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,))
# If we set a default timeout for RPCs, it should be respected, though
# still overridden if we pass in a different timeout to the APIs.
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(dst_worker, my_sleep_func, args=(1,))
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,))
# The RPCs should run to completion since we override the timeout.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=5).wait()
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=5)
# Passing in a zero timeout should ensure that the RPC won't time out.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=0).wait()
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=0)
# Reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
def test_dist_init_decorator(self):
@dist_init(setup_rpc=False)
def test_func(self):
return "expected result"
self.assertEqual(test_func(self), "expected result")
@dist_init
def test_func(self):
return "expected result"
self.assertEqual(test_func(self), "expected result")
def test_use_rpc_pickler(self):
class TestPickler:
pass
test_pickler = TestPickler()
with _use_rpc_pickler(test_pickler):
self.assertTrue(torch.distributed.rpc.api._default_pickler is test_pickler)
self.assertTrue(
torch.distributed.rpc.api._default_pickler is _internal_rpc_pickler
)
@dist_init
def test_wait_all(self):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, torch.add, (torch.ones(2, 2), 1))
self.assertTrue(len(_thread_local_var.future_list) == 1)
self.assertTrue(
isinstance(_thread_local_var.future_list[0], torch._C.Future)
)
self.assertTrue(fut.done())
self.assertEqual(fut.wait(), torch.ones(2, 2) + 1)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_multiple_call(self):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
for i in range(20):
fut = rpc.rpc_async(dst, torch.add, (torch.ones(i, i), 1))
res = rpc.rpc_sync(dst, torch.add, (torch.ones(i, i), 1))
self.assertEqual(res, torch.ones(i, i) + 1)
self.assertEqual(fut.wait(), torch.ones(i, i) + 1)
self.assertTrue(len(_thread_local_var.future_list) == 20)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_timeout(self):
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error), _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
timeout = 0.1 # 100 ms
rpc.rpc_async(dst, my_sleep_func, args=(1,), timeout=timeout)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_raise_in_user_func(self):
with self.assertRaises(ValueError), _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
rpc.rpc_async(dst, raise_func)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_raise_in_body(self):
with self.assertRaises(ValueError), _wait_all():
raise_func()
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_custom_exception_throw_during_reconstruction(self):
"""
Test that we still throw info about the remote side exception even when
we cannot recreate it on client side.
"""
initialize_pg(self.file_init_method, self.rank, self.world_size)
if self.rank != 0:
exc_caught = False
dst = worker_name(0)
try:
rpc.rpc_sync(dst, custom_raise_func, args=())
except RuntimeError as e:
exc_caught = True
msg = str(e)
print(f"Got msg {msg}")
self.assertTrue("Original exception on remote side was" in msg)
self.assertTrue("CustomException" in msg)
except BaseException as e: # noqa: B036
raise RuntimeError(f"Failure - expected RuntimeError, got {e}") from e
finally:
self.assertTrue(exc_caught)
dist.barrier()
timed_out_rpc_event = None
@staticmethod
def timed_out_rpc():
RpcTest.timed_out_rpc_event.wait()
@dist_init
def test_wait_all_exit_early_python(self):
# Initialize the event in the subprocess.
RpcTest.timed_out_rpc_event = Event()
# Wait for all processes to initialize event.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
dst = worker_name((self.rank + 1) % self.world_size)
fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc)
fut2 = rpc.rpc_async(dst, raise_func)
fut3 = rpc.rpc_async(dst, raise_func)
# We should receive the error from fut2
with self.assertRaisesRegex(ValueError, expected_err):
torch.futures.wait_all([fut1, fut2, fut3])
# Unblock RPC thread for fut1
RpcTest.timed_out_rpc_event.set()
@dist_init
def test_wait_all_exit_early_builtin(self):
# Initialize the event in the subprocess.
RpcTest.timed_out_rpc_event = Event()
# Wait for all processes to initialize event.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
dst = worker_name((self.rank + 1) % self.world_size)
fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc)
fut2 = rpc.rpc_async(dst, torch.add, args=(torch.rand(10), torch.rand(5)))
fut3 = rpc.rpc_async(dst, torch.add, args=(torch.rand(10), torch.rand(5)))
# We should receive the error from fut2
with self.assertRaisesRegex(RuntimeError, "size of tensor"):
torch.futures.wait_all([fut1, fut2, fut3])
# Unblock RPC thread for fut1
RpcTest.timed_out_rpc_event.set()
@dist_init
def test_wait_all_exit_early_script_function(self):
# Initialize the event in the subprocess.
RpcTest.timed_out_rpc_event = Event()
# Wait for all processes to initialize event.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
dst = worker_name((self.rank + 1) % self.world_size)
fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc)
fut2 = rpc.rpc_async(dst, raise_func_script, args=(expected_err,))
fut3 = rpc.rpc_async(dst, raise_func_script, args=(expected_err,))
# We should receive the error from fut2
with self.assertRaisesRegex(RuntimeError, expected_err):
torch.futures.wait_all([fut1, fut2, fut3])
# Unblock RPC thread for fut1
RpcTest.timed_out_rpc_event.set()
@dist_init
def test_function_not_on_callee(self):
# test that if a function does not exist on a callee, we don't crash,
# instead we get an AttributeError indicating that the func does not exist.
this_module = sys.modules[__name__]
caller_worker = "worker0"
callee_worker = "worker1"
if self.rank == 1:
# Use delattr to remove the binding of a func on this nodes
delattr(this_module, "foo_add")
# notify remote end that we have removed it.
rpc.rpc_sync(caller_worker, set_value, args=(self.rank,))
if self.rank == 0:
# func exists on caller, but not callee.
# wait for remote end to remove the binding of foo_add func.
wait_for_value_future()
# Ensure that we have the attribute on this module. Otherwise, the test could fail due to a caller-side pickling error.
self.assertTrue(hasattr(this_module, "foo_add"))
with self.assertRaisesRegex(RuntimeError, "RPC pickler does not serialize"):
rpc.rpc_sync(callee_worker, foo_add, args=())
@dist_init
def test_non_garbage_collected_user_rref_due_to_local_circular_dependency(self):
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
a = MyClass(1)
b = MyClass(2)
# This is to make Python not garbage collect a and b.
a.other = b
b.other = a
n = self.rank
a.rref = rpc.remote(dst_worker_name, torch.add, args=(torch.ones(n, n), 2))
@dist_init(setup_rpc=False)
def test_use_rref_after_shutdown(self):
rpc.init_rpc(
name=f"worker{self.rank:d}",
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
# pass in graceful=True to ensure that local UserRRefs are deleted.
rpc.shutdown(graceful=True)
with self.assertRaisesRegex(
RuntimeError, "Cannot call to_here\\(\\) on it after deletion."
):
rref.to_here()
with self.assertRaisesRegex(
RuntimeError, "Cannot call fork an UserRRef after deletion."
):
import torch.distributed.rpc.internal as internal
internal.serialize(rref)
@staticmethod
def _return_gpu_tensor():
return torch.rand(3, 3).cuda(0)
@staticmethod
def _return_gpu_tensor_list():
return [torch.rand(3, 3).cuda(0), torch.rand(3, 3).cuda(1)]
@staticmethod
def _gpu_tensor_list_arg(tensor_list):
return torch.rand(3, 3)
def _create_rref(self):
owner_rank = (self.rank + 2) % self.world_size
return rpc.remote(
worker_name(owner_rank), torch.add, args=(torch.zeros(2, 2), 1)
)
@dist_init
def test_user_rrefs_confirmed(self):
dst_rank = (self.rank + 1) % self.world_size
rref = self._create_rref()
ret = rpc.rpc_sync(worker_name(dst_rank), check_rref_confirmed, args=(rref,))
self.assertEqual(ret, True)
@dist_init
def test_user_rrefs_confirmed_remote(self):
dst_rank = (self.rank + 1) % self.world_size
rref = self._create_rref()
ret_rref = rpc.remote(worker_name(dst_rank), check_rref_confirmed, args=(rref,))
self.assertEqual(ret_rref.to_here(), True)
@dist_init
def test_rref_py_pickle_not_supported(self):
local_rref = RRef(35)
with (
TemporaryFileName() as fname,
self.assertRaisesRegex(
RuntimeError, "Can not pickle rref in python pickler"
),
):
torch.save(local_rref, fname)
@dist_init
def test_remote_throw(self):
rref = rpc.remote(
worker_name((self.rank + 1) % self.world_size),
raise_or_inc,
args=(torch.ones(2),),
)
with self.assertRaisesRegex(Exception, ".*Expected error.*"):
rref.to_here()
@dist_init
def test_non_cont_tensors(self):
if self.rank == 0:
# Create a non-contiguous tensor.
t = torch.rand(5, 5)
t_view = t.narrow(1, 2, 2)
self.assertFalse(t_view.is_contiguous())
t_cont = t_view.contiguous()
self.assertTrue(t_cont.is_contiguous())
self.assertEqual(t_view, t_cont)
# Send non-cont tensor over RPC.
next_rank = (self.rank + 1) % self.world_size
t_ret = rpc.rpc_sync(
worker_name(next_rank), non_cont_test, args=(t_view, t_cont)
)
# Verify the returned tensor.
self.assertEqual(t_view, t_ret)
self.assertFalse(t_ret.is_contiguous())
@dist_init
def test_callback_simple(self):
set_by_cb = concurrent.futures.Future()
n = self.rank + 1
def callback(fut):
ret = fut.wait()
self.assertEqual(ret, torch.ones(n, n) * 2)
set_by_cb.set_result(ret.clone() + 1)
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
fut.then(callback)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
self.assertEqual(set_by_cb.result(), torch.ones(n, n) * 2 + 1)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_callback_wrong_arg_num(self):
n = self.rank + 1
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
cb_fut = fut.then(my_function)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
with self.assertRaisesRegex(
RuntimeError, "my\\_function\\(\\) missing 2 required positional arguments"
):
cb_fut.wait()
@dist_init
def test_callback_wrong_arg_type(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut0 = rpc.rpc_async(dst, torch.add, args=(torch.ones(2, 2), 1))
fut1 = fut0.then(lambda x: x + 1)
with self.assertRaisesRegex(
RuntimeError, "unsupported operand type\\(s\\) for \\+"
):
fut1.wait()
@dist_init
def test_callback_multi(self):
num_cbs = 10
n = self.rank + 1
def callback(idx, fut):
ret = fut.wait()
self.assertEqual(ret, torch.ones(n, n) * 2)
return ret + idx
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
cb_futs = [fut.then(partial(callback, idx)) for idx in range(num_cbs)]
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
for idx in range(num_cbs):
self.assertEqual(cb_futs[idx].wait(), torch.ones(n, n) * 2 + idx)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_callback_chain(self):
n = self.rank + 1
def callback(fut):
return fut.wait() + 1
fut = rpc.rpc_async(
worker_name(n % self.world_size), torch.add, args=(torch.ones(n, n), 1)
)
num_cbs = 20
for _ in range(num_cbs):
fut = fut.then(callback)
self.assertEqual(fut.wait(), torch.ones(n, n) + 1 + num_cbs)
@dist_init
def test_callback_in_rpc(self):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(dst1, add_use_future_cb, args=(dst2, torch.ones(2, 2), 1, 2))
self.assertEqual(ret, torch.ones(2, 2) + 1 + 2)
@dist_init
def test_callback_with_ret(self):
dst = worker_name((self.rank + 1) % self.world_size)
def callback(fut0):
fut2 = rpc.rpc_async(dst, torch.add, args=(fut0.wait(), 1)).then(
lambda fut1: fut1.wait() + 1
)
return fut2.wait()
fut3 = rpc.rpc_async(dst, torch.add, args=(torch.ones(2, 2), 1)).then(callback)
self.assertEqual(fut3.wait(), torch.ones(2, 2) + 3)
@dist_init
def test_callback_with_error(self):
dst = worker_name((self.rank + 1) % self.world_size)
def callback(fut0):
with self.assertRaisesRegex(ValueError, "Expected error"):
fut0.wait()
raise RuntimeError("Another expected error")
fut1 = rpc.rpc_async(dst, raise_func).then(callback)
with self.assertRaisesRegex(RuntimeError, "Another expected error"):
fut1.wait()
@dist_init
def test_callback_none(self):
dst = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(TypeError, "incompatible function arguments."):
rpc.rpc_async(dst, raise_func).then(None)
@dist_init
def test_add_done_callback(self):
set_by_cb = False
n = self.rank + 1
def callback(fut):
nonlocal set_by_cb
fut.wait()
set_by_cb = True
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
fut.add_done_callback(callback)
fut_then = fut.then(lambda _: True)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
# We have no guarantee that the add_done_callback fn will execute before the test finishes.
# Adding a 'then' callback that runs afterwards to guarantee we wait for the first callback
fut_then.wait()
self.assertTrue(set_by_cb)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_mark_future_twice(self):
fut = rpc.rpc_async(
worker_name((self.rank + 1) % self.world_size),
torch.add,
args=(torch.zeros(2, 2), 1),
)
self.assertEqual(fut.wait(), torch.zeros(2, 2) + 1)
with self.assertRaisesRegex(
RuntimeError, "Future can only be marked completed once"
):
fut.set_result(1)
@dist_init
def test_pickle_future(self):
fut = torch.futures.Future()
errMsg = "Can not pickle torch.futures.Future"
dst = worker_name((self.rank + 1) % self.world_size)
with TemporaryFileName(), self.assertRaisesRegex(RuntimeError, errMsg):
rpc.rpc_sync(dst, fail_on_fut, args=(fut,))
with TemporaryFileName(), self.assertRaisesRegex(RuntimeError, errMsg):
rpc.rpc_async(dst, fail_on_fut, args=(fut,))
with TemporaryFileName(), self.assertRaisesRegex(RuntimeError, errMsg):
rpc.remote(dst, fail_on_fut, args=(fut,))
@dist_init
def test_future_done(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, torch.add, args=(torch.zeros(2), 1))
fut.wait()
self.assertTrue(fut.done())
@dist_init
def test_future_done_exception(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, raise_func)
with self.assertRaisesRegex(ValueError, "Expected error"):
fut.wait()
self.assertTrue(fut.done())
def _test_future_cb(self, func):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(dst1, func, args=(dst2, torch.ones(2, 2), 1, 2))
self.assertEqual(ret, torch.ones(2, 2) + 1 + 2)
@dist_init
def test_future_in_rpc(self):
self._test_future_cb(add_use_future_set_result)
@dist_init
def test_future_nested_callback(self):
self._test_future_cb(add_use_future_nested_cb)
def _test_async_function_raise(self, mode):
with self.assertRaisesRegex(RuntimeError, "Expected error"):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size), async_raise_func, mode
)
@dist_init
def test_async_function_raise(self):
self._test_async_function_raise(RPCExecMode.SYNC)
@dist_init
def test_async_function_raise_async(self):
self._test_async_function_raise(RPCExecMode.ASYNC)
@dist_init
def test_async_function_raise_remote(self):
self._test_async_function_raise(RPCExecMode.REMOTE)
def _test_async_function_wrong_return_type(self, mode):
errMsg = (
"Functions decorated with @rpc\\.async_function must return a "
"torch\\.futures\\.Future object,"
)
with self.assertRaisesRegex(RuntimeError, errMsg):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size), async_wrong_type, mode
)
@dist_init
def test_async_function_wrong_return_type(self):
self._test_async_function_wrong_return_type(RPCExecMode.SYNC)
@dist_init
def test_async_function_wrong_return_type_async(self):
self._test_async_function_wrong_return_type(RPCExecMode.ASYNC)
@dist_init
def test_async_function_wrong_return_type_remote(self):
self._test_async_function_wrong_return_type(RPCExecMode.REMOTE)
@dist_init
def test_async_function_simple(self):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(dst1, async_add, args=(dst2, torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
def _test_async_function(self, fn, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
args = (dst2, torch.ones(2, 2), 1, 2)
ret = self._run_func_in_mode(dst1, fn, mode, args=args)
self.assertEqual(ret, torch.ones(2, 2) + 3)
@dist_init
def test_async_function_with_future_ctor(self):
self._test_async_function(async_add_with_future_ctor)
@dist_init
def test_async_function_with_future_ctor_remote(self):
self._test_async_function(async_add_with_future_ctor, RPCExecMode.REMOTE)
@dist_init
def test_async_function_chained(self):
self._test_async_function(async_add_chained)
@dist_init
def test_async_function_chained_remote(self):
self._test_async_function(async_add_chained, RPCExecMode.REMOTE)
@dist_init
def test_async_function_nested(self):
self._test_async_function(async_add_nested)
@dist_init
def test_async_function_nested_remote(self):
self._test_async_function(async_add_nested, RPCExecMode.REMOTE)
@dist_init
def test_async_static_method(self):
self._test_async_function(AsyncExecutionClass.static_async_add)
@dist_init
def test_async_static_method_remote(self):
self._test_async_function(
AsyncExecutionClass.static_async_add, RPCExecMode.REMOTE
)
@dist_init
def test_async_class_method(self):
self._test_async_function(AsyncExecutionClass.class_async_add)
@dist_init
def test_async_class_method_remote(self):
self._test_async_function(
AsyncExecutionClass.class_async_add, RPCExecMode.REMOTE
)
def _test_test_async_class_rref_proxy(self, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
rref = rpc.remote(dst1, AsyncExecutionClass)
x = torch.ones(2, 2)
y = torch.ones(2, 2) + 1
if mode == RPCExecMode.SYNC:
ret = rref.rpc_sync().static_async_add(dst2, x, x, y)
ret += rref.rpc_sync().class_async_add(dst2, x, x, y)
ret += rref.rpc_sync().bound_async_add(dst2, x, x, y)
elif mode == RPCExecMode.ASYNC:
ret = rref.rpc_async().static_async_add(dst2, x, x, y).wait()
ret += rref.rpc_async().class_async_add(dst2, x, x, y).wait()
ret += rref.rpc_async().bound_async_add(dst2, x, x, y).wait()
elif mode == RPCExecMode.REMOTE:
ret = rref.remote().static_async_add(dst2, x, x, y).to_here()
ret += rref.remote().class_async_add(dst2, x, x, y).to_here()
ret += rref.remote().bound_async_add(dst2, x, x, y).to_here()
self.assertEqual(ret, 3 * 4 * x)
@dist_init
def test_async_class_rref_proxy(self):
self._test_test_async_class_rref_proxy()
@dist_init
def test_async_class_rref_proxy_async(self):
self._test_test_async_class_rref_proxy(mode=RPCExecMode.ASYNC)
@dist_init
def test_async_class_rref_proxy_remote(self):
self._test_test_async_class_rref_proxy(mode=RPCExecMode.REMOTE)
def _test_async_function_multi(self, fn, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
num = 20
step = 3
args = (dst2, torch.ones(2, 2), num, step)
ret = self._run_func_in_mode(dst1, fn, mode, args=args)
self.assertEqual(ret, torch.ones(2, 2) + num * step)
@dist_init
def test_async_function_multi_chained(self):
self._test_async_function_multi(async_add_chained_multi)
@dist_init
def test_async_function_multi_chained_async(self):
self._test_async_function_multi(async_add_chained_multi, RPCExecMode.ASYNC)
@dist_init
def test_async_function_multi_chained_remote(self):
self._test_async_function_multi(async_add_chained_multi, RPCExecMode.REMOTE)
@dist_init
def test_async_function_multi_fanout(self):
self._test_async_function_multi(async_add_multi_fanout)
@dist_init
def test_async_function_multi_fanout_async(self):
self._test_async_function_multi(async_add_multi_fanout, RPCExecMode.ASYNC)
@dist_init
def test_async_function_multi_fanout_remote(self):
self._test_async_function_multi(async_add_multi_fanout, RPCExecMode.REMOTE)
def _test_return_future(self, mode):
with self.assertRaisesRegex(
RuntimeError, "Can not pickle torch.futures.Future"
):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size), return_future, mode
)
@dist_init
def test_return_future(self):
self._test_return_future(RPCExecMode.SYNC)
@dist_init
def test_return_future_async(self):
self._test_return_future(RPCExecMode.ASYNC)
@dist_init
def test_return_future_remote(self):
self._test_return_future(RPCExecMode.REMOTE)
@dist_init
def test_rref_timeout(self):
# This test is similar to ones in FaultyProcessGroupTest, but is meant to be
# run with other backends besides ProcessGroup.
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = f"worker{dst_rank}"
# 10 ms timeout
rref = rpc.remote(dst_worker, my_sleep_func, args=(2,), timeout=0.01)
# Future corresponding to the remote creation should time out.
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref._get_future().wait()
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref.to_here()
wait_until_owners_and_forks_on_rank(1, 1, rank=1)
@dist_init(setup_rpc=False)
@skip_but_pass_in_sandcastle_if(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"init_pg_then_rpc does not work with TCP init, see https://github.com/pytorch/pytorch/issues/41614.",
)
def test_init_pg_then_rpc(self):
dist.init_process_group(
backend="gloo",
init_method=self.init_method,
rank=self.rank,
world_size=self.world_size,
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
# Test RPC.
next_rank = (self.rank + 1) % self.world_size
ret = rpc.rpc_sync(
worker_name(next_rank), torch.add, args=(torch.ones(2, 2), 1)
)
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test PG
dist.barrier()
rpc.shutdown()
@dist_init(setup_rpc=False)
@skip_but_pass_in_sandcastle_if(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"init_rpc_then_pg does not work with TCP init, see https://github.com/pytorch/pytorch/issues/41614.",
)
def test_init_rpc_then_pg(self):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
dist.init_process_group(
backend="gloo",
init_method=self.init_method,
rank=self.rank,
world_size=self.world_size,
)
# Test RPC.
next_rank = (self.rank + 1) % self.world_size
ret = rpc.rpc_sync(
worker_name(next_rank), torch.add, args=(torch.ones(2, 2), 1)
)
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test PG
dist.barrier()
rpc.shutdown()
@dist_init
def test_wait_all_with_exception(self):
dst = worker_name((self.rank + 1) % self.world_size)
futs = [rpc.rpc_async(dst, raise_func) for _ in range(10)]
with self.assertRaisesRegex(ValueError, "Expected error"):
torch.futures.wait_all(futs)
@dist_init
def test_wait_all_with_partial_exception(self):
dst = worker_name((self.rank + 1) % self.world_size)
futs = [
rpc.rpc_async(dst, torch.add, args=(torch.ones(2), 1)) for _ in range(10)
]
futs.append(rpc.rpc_async(dst, raise_func))
with self.assertRaisesRegex(ValueError, "Expected error"):
torch.futures.wait_all(futs)
@dist_init(setup_rpc=False)
@skip_but_pass_in_sandcastle_if(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"Test does not work with TCP init, see https://github.com/pytorch/pytorch/issues/46491",
)
def test_init_rpc_twice(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc.shutdown()
# Wait for all init to complete.
dist.barrier()
# Use a different file name for the next initialization
new_backend_options = self.rpc_backend_options
new_backend_options.init_method += "init_2"
# Ensure rpc initialization works again.
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=new_backend_options,
)
# Verify RPCs work after re-init.
dst = worker_name((self.rank + 1) % self.world_size)
rpc.rpc_sync(dst, torch.add, args=(torch.ones(2, 2), 1))
rpc.rpc_sync(dst, foo_add, args=())
rpc.shutdown()
def test_wrong_types(self):
with self.assertRaisesRegex(
TypeError,
"Argument backend must be a member of BackendType",
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend="TENSORPIPE",
)
with self.assertRaisesRegex(
TypeError,
"Argument rpc_backend_options must be an instance of RpcBackendOptions",
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend=self.rpc_backend,
rpc_backend_options={"init_method": self.init_method},
)
def test_cannot_infer_backend_from_options(self):
# An exception should be raised if the backend isn't specified but
# options are given which are not an instance of any of the known
# agents' option classes.
rpc_backend_options = FooBackendOptions(self.init_method)
with self.assertRaisesRegex(TypeError, "Could not infer backend for options"):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
# Do _not_ pass backend.
rpc_backend_options=rpc_backend_options,
)
@dist_init
def test_owner_rref_backward(self):
dst = worker_name((self.rank + 1) % self.world_size)
t1 = torch.rand(10, 10, requires_grad=True)
rref = rpc.RRef(t1.sum() + t1.sum())
rref.backward()
expected_grad = torch.ones_like(t1) * 2
self.assertEqual(expected_grad, t1.grad)
with dist_autograd.context() as context_id:
t2 = rpc.rpc_sync(dst, torch.add, args=(t1, t1))
rref = rpc.RRef(t2.sum())
rref.backward(context_id)
self.assertEqual(expected_grad, dist_autograd.get_gradients(context_id)[t1])
# Double backward.
with dist_autograd.context() as context_id:
t2 = rpc.rpc_sync(dst, torch.add, args=(t1, t1))
rref = rpc.RRef(t2.sum())
rref.backward(context_id, retain_graph=True)
rref.backward(context_id)
self.assertEqual(
expected_grad * 2, dist_autograd.get_gradients(context_id)[t1]
)
# Test errors.
with self.assertRaisesRegex(
RuntimeError, "tensors does not require grad and does not have a grad_fn"
):
rpc.RRef(torch.rand(10)).backward()
with self.assertRaisesRegex(
RuntimeError, "grad can be implicitly created only for scalar outputs"
):
rpc.RRef(torch.rand(10, requires_grad=True)).backward()
with self.assertRaisesRegex(
RuntimeError, "Could not find autograd context with id: 100"
):
rpc.RRef(torch.rand(10, requires_grad=True).sum()).backward(100)
with self.assertRaisesRegex(
RuntimeError, "RRef should contain a tensor for .backward()"
):
rpc.RRef("foo").backward()
@staticmethod
def _sum(x):
return x.sum()
@staticmethod
def _identity(x):
return x
@dist_init
def test_user_rref_backward(self):
dst = worker_name((self.rank + 1) % self.world_size)
t = torch.rand(10, requires_grad=True)
with dist_autograd.context() as context_id:
rref = rpc.remote(dst, RpcTest._sum, args=(t,))
rref.backward(context_id, retain_graph=True)
rref.backward(context_id)
self.assertEqual(
torch.ones_like(t) * 2, dist_autograd.get_gradients(context_id)[t]
)
with dist_autograd.context() as context_id:
rref = rpc.remote(dst, RpcTest._identity, args=("foo",))
with self.assertRaisesRegex(
RuntimeError, "RRef should contain a tensor for .backward()"
):
rref.backward(context_id)
with self.assertRaisesRegex(
RuntimeError,
"User RRefs require 'dist_autograd_ctx_id' to be specified",
):
rref.backward()
@dist_init(setup_rpc=False)
def test_shutdown_errors(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
if self.rank != 0:
og_func = rpc.api._broadcast_to_followers
og_rref_func = rpc.api._delete_all_user_and_unforked_owner_rrefs
# Monkey-patch _broadcast_to_followers to fail, which would ensure
# _all_gather on leader raises an exception.
def raise_error(sequence_id, objects_map):
og_func(sequence_id, objects_map)
raise RuntimeError("simulation")
# Monkey-patch _delete_all_user_and_unforked_owner_rrefs to fail,
# which would ensure barrier is not called on followers.
def rref_error():
raise RuntimeError("simulation rref")
try:
rpc.api._broadcast_to_followers = raise_error
rpc.api._delete_all_user_and_unforked_owner_rrefs = rref_error
with self.assertRaisesRegex(RuntimeError, "simulation rref"):
rpc.shutdown()
finally:
rpc.api._broadcast_to_followers = og_func
rpc.api._delete_all_user_and_unforked_owner_rrefs = og_rref_func
else:
with self.assertRaisesRegex(RuntimeError, "timed out in _all_gather"):
rpc.shutdown()
dist.barrier()
@dist_init
def test_my_parameter_server(self):
self._my_parameter_server(False)
| RpcTest |
python | PrefectHQ__prefect | src/prefect/server/schemas/responses.py | {
"start": 18570,
"end": 19422
} | class ____(schemas.core.WorkQueue):
work_pool_name: Optional[str] = Field(
default=None,
description="The name of the work pool the work pool resides within.",
)
status: Optional[schemas.statuses.WorkQueueStatus] = Field(
default=None, description="The queue status."
)
@classmethod
def model_validate(
cls: Type[Self],
obj: Any,
*,
strict: Optional[bool] = None,
from_attributes: Optional[bool] = None,
context: Optional[dict[str, Any]] = None,
) -> Self:
response = super().model_validate(
obj, strict=strict, from_attributes=from_attributes, context=context
)
if from_attributes:
if obj.work_pool:
response.work_pool_name = obj.work_pool.name
return response
| WorkQueueResponse |
python | huggingface__transformers | src/transformers/models/seggpt/modeling_seggpt.py | {
"start": 17924,
"end": 20069
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: SegGptConfig, drop_path_rate: float) -> None:
super().__init__()
self.attention = SegGptAttention(config)
self.mlp = SegGptMlp(config)
self.drop_path = SegGptDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
ensemble_cond: int,
feature_ensemble: bool = False,
output_attentions: bool = False,
) -> Union[tuple[torch.Tensor, torch.Tensor], tuple[torch.Tensor]]:
self_attention_outputs = self.attention(
self.layernorm_before(hidden_states), # in SegGpt, layernorm is applied before self-attention
output_attentions=output_attentions,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
if feature_ensemble and attention_output.shape[0] // 2 >= ensemble_cond:
prompt, inputs = attention_output.split(attention_output.shape[1] // 2, dim=1)
if ensemble_cond == 2:
num_prompts = attention_output.shape[0] // 2
inputs = inputs.reshape(2, num_prompts, -1)
inputs = inputs.mean(dim=1, keepdim=True).expand_as(inputs)
inputs = inputs.reshape(*prompt.shape)
else:
inputs = inputs.mean(dim=0, keepdim=True).expand_as(inputs)
attention_output = torch.cat([prompt, inputs], dim=1)
# first residual connection
hidden_states = self.drop_path(attention_output) + hidden_states
residual = hidden_states
hidden_states = self.layernorm_after(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + self.drop_path(hidden_states)
outputs = (hidden_states,) + outputs
return outputs
| SegGptLayer |
python | sqlalchemy__sqlalchemy | test/orm/inheritance/test_relationship.py | {
"start": 56156,
"end": 62151
} | class ____(
fixtures.DeclarativeMappedTest, testing.AssertsCompiledSQL
):
"""test for #5082"""
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class User(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True)
foos = relationship("Foo", back_populates="owner")
class Foo(Base):
__tablename__ = "foos"
__mapper_args__ = {"polymorphic_on": "type"}
id = Column(Integer, primary_key=True)
type = Column(String(10), nullable=False)
owner_id = Column(Integer, ForeignKey("users.id"))
owner = relationship("User", back_populates="foos")
bar_id = Column(ForeignKey("bars.id"))
bar = relationship("Bar")
class SubFoo(Foo):
__tablename__ = "foos_sub"
__mapper_args__ = {"polymorphic_identity": "SUB"}
id = Column(Integer, ForeignKey("foos.id"), primary_key=True)
baz = Column(Integer)
sub_bar_id = Column(Integer, ForeignKey("sub_bars.id"))
sub_bar = relationship("SubBar")
class Bar(Base):
__tablename__ = "bars"
id = Column(Integer, primary_key=True)
fred_id = Column(Integer, ForeignKey("freds.id"), nullable=False)
fred = relationship("Fred")
class SubBar(Base):
__tablename__ = "sub_bars"
id = Column(Integer, primary_key=True)
fred_id = Column(Integer, ForeignKey("freds.id"), nullable=False)
fred = relationship("Fred")
class Fred(Base):
__tablename__ = "freds"
id = Column(Integer, primary_key=True)
@classmethod
def insert_data(cls, connection):
User, Fred, SubBar, Bar, SubFoo = cls.classes(
"User", "Fred", "SubBar", "Bar", "SubFoo"
)
user = User(id=1)
fred = Fred(id=1)
bar = Bar(fred=fred)
sub_bar = SubBar(fred=fred)
rectangle = SubFoo(owner=user, baz=10, bar=bar, sub_bar=sub_bar)
s = Session(connection)
s.add_all([user, fred, bar, sub_bar, rectangle])
s.commit()
def test_joined_load_lastlink_subclass(self):
Foo, User, SubBar = self.classes("Foo", "User", "SubBar")
s = fixture_session()
foo_polymorphic = with_polymorphic(Foo, "*", aliased=True)
foo_load = joinedload(User.foos.of_type(foo_polymorphic))
query = s.query(User).options(
foo_load.joinedload(foo_polymorphic.SubFoo.sub_bar).joinedload(
SubBar.fred
)
)
self.assert_compile(
query,
"SELECT users.id AS users_id, anon_1.foos_id AS anon_1_foos_id, "
"anon_1.foos_type AS anon_1_foos_type, anon_1.foos_owner_id "
"AS anon_1_foos_owner_id, "
"anon_1.foos_bar_id AS anon_1_foos_bar_id, "
"freds_1.id AS freds_1_id, sub_bars_1.id "
"AS sub_bars_1_id, sub_bars_1.fred_id AS sub_bars_1_fred_id, "
"anon_1.foos_sub_id AS anon_1_foos_sub_id, "
"anon_1.foos_sub_baz AS anon_1_foos_sub_baz, "
"anon_1.foos_sub_sub_bar_id AS anon_1_foos_sub_sub_bar_id "
"FROM users LEFT OUTER JOIN "
"(SELECT foos.id AS foos_id, foos.type AS foos_type, "
"foos.owner_id AS foos_owner_id, foos.bar_id AS foos_bar_id, "
"foos_sub.id AS foos_sub_id, "
"foos_sub.baz AS foos_sub_baz, "
"foos_sub.sub_bar_id AS foos_sub_sub_bar_id "
"FROM foos LEFT OUTER JOIN foos_sub ON foos.id = foos_sub.id) "
"AS anon_1 ON users.id = anon_1.foos_owner_id "
"LEFT OUTER JOIN sub_bars AS sub_bars_1 "
"ON sub_bars_1.id = anon_1.foos_sub_sub_bar_id "
"LEFT OUTER JOIN freds AS freds_1 "
"ON freds_1.id = sub_bars_1.fred_id",
)
def go():
user = query.one()
user.foos[0].sub_bar
user.foos[0].sub_bar.fred
self.assert_sql_count(testing.db, go, 1)
def test_joined_load_lastlink_baseclass(self):
Foo, User, Bar = self.classes("Foo", "User", "Bar")
s = fixture_session()
foo_polymorphic = with_polymorphic(Foo, "*", aliased=True)
foo_load = joinedload(User.foos.of_type(foo_polymorphic))
query = s.query(User).options(
foo_load.joinedload(foo_polymorphic.bar).joinedload(Bar.fred)
)
self.assert_compile(
query,
"SELECT users.id AS users_id, freds_1.id AS freds_1_id, "
"bars_1.id AS bars_1_id, "
"bars_1.fred_id AS bars_1_fred_id, "
"anon_1.foos_id AS anon_1_foos_id, "
"anon_1.foos_type AS anon_1_foos_type, anon_1.foos_owner_id AS "
"anon_1_foos_owner_id, anon_1.foos_bar_id AS anon_1_foos_bar_id, "
"anon_1.foos_sub_id AS anon_1_foos_sub_id, anon_1.foos_sub_baz AS "
"anon_1_foos_sub_baz, "
"anon_1.foos_sub_sub_bar_id AS anon_1_foos_sub_sub_bar_id "
"FROM users LEFT OUTER JOIN (SELECT foos.id AS foos_id, "
"foos.type AS foos_type, "
"foos.owner_id AS foos_owner_id, foos.bar_id AS foos_bar_id, "
"foos_sub.id AS "
"foos_sub_id, foos_sub.baz AS foos_sub_baz, "
"foos_sub.sub_bar_id AS "
"foos_sub_sub_bar_id FROM foos "
"LEFT OUTER JOIN foos_sub ON foos.id = "
"foos_sub.id) AS anon_1 ON users.id = anon_1.foos_owner_id "
"LEFT OUTER JOIN bars "
"AS bars_1 ON bars_1.id = anon_1.foos_bar_id "
"LEFT OUTER JOIN freds AS freds_1 ON freds_1.id = bars_1.fred_id",
)
def go():
user = query.one()
user.foos[0].bar
user.foos[0].bar.fred
self.assert_sql_count(testing.db, go, 1)
| JoinedloadWPolyOfTypeContinued |
python | doocs__leetcode | solution/0100-0199/0108.Convert Sorted Array to Binary Search Tree/Solution.py | {
"start": 192,
"end": 526
} | class ____:
def sortedArrayToBST(self, nums: List[int]) -> Optional[TreeNode]:
def dfs(l: int, r: int) -> Optional[TreeNode]:
if l > r:
return None
mid = (l + r) >> 1
return TreeNode(nums[mid], dfs(l, mid - 1), dfs(mid + 1, r))
return dfs(0, len(nums) - 1)
| Solution |
python | keras-team__keras | keras/src/ops/core.py | {
"start": 24642,
"end": 27616
} | class ____(Operation):
def __init__(self, num=None, axis=0, *, name=None):
super().__init__(name=name)
self.num = num
self.axis = axis
def call(self, x):
return backend.core.unstack(x, self.num, self.axis)
def compute_output_spec(self, x):
axis = self.axis
if axis < 0:
axis = len(x.shape) + axis
output_shapes = x.shape[:axis] + x.shape[axis + 1 :]
num = self.num
if num is None:
num = x.shape[axis]
if num is None:
raise ValueError(
"Cannot infer argument `num` from shape "
f"{x.shape}. Either provide a tensor with a "
"concrete shape in the `axis` dimension or "
"explicitly pass the `num` argument."
)
output = [
KerasTensor(shape=output_shapes, dtype=x.dtype) for _ in range(num)
]
return output
@keras_export("keras.ops.unstack")
def unstack(x, num=None, axis=0):
"""Unpacks the given dimension of a rank-R tensor into rank-(R-1) tensors.
Args:
x: The input tensor.
num: The length of the dimension axis. Automatically inferred
if `None`.
axis: The axis along which to unpack.
Returns:
A list of tensors unpacked along the given axis.
Example:
>>> x = keras.ops.array([[1, 2], [3, 4]])
>>> keras.ops.unstack(x, axis=0)
[array([1, 2]), array([3, 4])]
"""
if any_symbolic_tensors((x,)):
return Unstack(num, axis).symbolic_call(x)
return backend.core.unstack(x, num=num, axis=axis)
@keras_export("keras.ops.shape")
def shape(x):
"""Gets the shape of the tensor input.
Note: On the TensorFlow backend, when `x` is a `tf.Tensor` with dynamic
shape, dimensions which are dynamic in the context of a compiled function
will have a `tf.Tensor` value instead of a static integer value.
Args:
x: A tensor. This function will try to access the `shape` attribute of
the input tensor.
Returns:
A tuple of integers or None values, indicating the shape of the input
tensor.
Example:
>>> x = keras.ops.zeros((8, 12))
>>> keras.ops.shape(x)
(8, 12)
"""
if any_symbolic_tensors((x,)):
return x.shape
return backend.core.shape(x)
@keras_export("keras.ops.dtype")
def dtype(x):
"""Return the dtype of the tensor input as a standardized string.
Note that due to the standardization, the dtype will not compare equal
to the backend-specific version of the dtype.
Args:
x: A tensor. This function will try to access the `dtype` attribute of
the input tensor.
Returns:
A string indicating the dtype of the input tensor, e.g. `"float32"`.
Example:
>>> x = keras.ops.zeros((8, 12))
>>> keras.ops.dtype(x)
'float32'
"""
return backend.standardize_dtype(x.dtype)
| Unstack |
python | mlflow__mlflow | tests/langchain/conftest.py | {
"start": 1178,
"end": 1681
} | class ____(Embeddings, BaseModel):
size: int
def _get_embedding(self, text: str) -> list[float]:
import numpy as np
seed = abs(hash(text)) % (10**8)
np.random.seed(seed)
return list(np.random.normal(size=self.size))
def embed_documents(self, texts: list[str]) -> list[list[float]]:
return [self._get_embedding(t) for t in texts]
def embed_query(self, text: str) -> list[float]:
return self._get_embedding(text)
| DeterministicDummyEmbeddings |
python | getsentry__sentry | src/sentry/utils/sdk_crashes/sdk_crash_detection_config.py | {
"start": 1280,
"end": 1447
} | class ____(Enum):
Cocoa = "cocoa"
ReactNative = "react-native"
Java = "java"
Native = "native"
Dart = "dart"
Dotnet = "dotnet"
@dataclass
| SdkName |
python | dagster-io__dagster | python_modules/libraries/dagster-omni/dagster_omni/workspace.py | {
"start": 357,
"end": 5893
} | class ____(dg.Resolvable, dg.Model):
"""Handles all interactions with the Omni API to fetch and manage state."""
base_url: str = Field(
description="The base URL to your Omni instance.", examples=["https://acme.omniapp.co"]
)
api_key: str = Field(
description="The API key to your Omni instance.",
examples=['"{{ env.OMNI_API_KEY }}"'],
repr=False,
)
max_retries: int = Field(
default=5, description="The maximum number of retries to make when rate-limited."
)
base_delay: float = Field(
default=4.0,
description="The base delay for exponential backoff between retries in seconds.",
)
@property
def base_api_url(self) -> str:
return f"{self.base_url.rstrip('/')}/api/v1"
def _get_session(self) -> aiohttp.ClientSession:
"""Create configured session with Bearer token authentication."""
headers = {
"Accept": "application/json",
"Content-Type": "application/json",
"Authorization": f"Bearer {self.api_key}",
}
return aiohttp.ClientSession(headers=headers)
def _should_retry(self, exc: BaseException) -> bool:
"""Determine if an exception should trigger a retry."""
if isinstance(exc, ClientResponseError):
return exc.status == 429 or 500 <= exc.status < 600
return isinstance(exc, aiohttp.ClientError)
def _build_url(self, endpoint: str) -> str:
return f"{self.base_url.rstrip('/')}/{endpoint.lstrip('/')}"
async def make_request(
self,
endpoint: str,
params: Optional[dict[str, Any]] = None,
headers: Optional[dict[str, str]] = None,
) -> dict[str, Any]:
"""Make a GET request to the API with retry logic."""
url = self._build_url(endpoint)
if params:
url = f"{url}?{urllib.parse.urlencode(params)}"
async def _make_request():
async with self._get_session() as session:
request_headers = headers or {}
async with session.get(url, headers=request_headers) as response:
response.raise_for_status()
return await response.json()
return await async_backoff(
_make_request,
retry_on=self._should_retry,
max_retries=self.max_retries,
delay_generator=exponential_delay_generator(base_delay=self.base_delay),
)
async def _fetch_document_queries(self, document_identifier: str) -> list[OmniQuery]:
"""Fetch all queries for a specific document."""
endpoint = f"api/v1/documents/{document_identifier}/queries"
try:
response = await self.make_request(endpoint)
return [OmniQuery.from_json(query_data) for query_data in response.get("queries", [])]
except ClientResponseError as e:
# When a document has no queries, this will return 404
if e.status == 404:
return []
raise
async def _fetch_document_with_queries(self, document_data: dict[str, Any]) -> OmniDocument:
"""Returns an OmniDocument with its queries embedded."""
queries = await self._fetch_document_queries(document_data["identifier"])
return OmniDocument.from_json(document_data, queries)
async def _fetch_documents(self) -> list[OmniDocument]:
"""Fetch all documents from the Omni API with their queries embedded."""
base_params = {"pageSize": "100", "include": "_count"}
documents = []
next_cursor = None
while True:
params = base_params.copy()
if next_cursor:
params["cursor"] = next_cursor
response = await self.make_request("api/v1/documents", params)
# Fan out the requests to fetch queries for each document in parallel
coroutines = [
self._fetch_document_with_queries(doc_data)
for doc_data in response.get("records", [])
]
documents.extend(await asyncio.gather(*coroutines))
next_cursor = response.get("pageInfo", {}).get("nextCursor")
if not next_cursor:
break
return documents
async def _fetch_users(self) -> list[OmniUser]:
"""Fetch all users from the Omni SCIM API."""
base_params = {"count": "100"}
users = []
start_index = 1
while True:
params = base_params.copy()
params["startIndex"] = str(start_index)
response = await self.make_request("api/scim/v2/users", params)
user_resources = response.get("Resources", [])
if not user_resources:
break
users.extend([OmniUser.from_json(user_data) for user_data in user_resources])
# Check if we've received fewer users than requested, indicating we're done
if len(user_resources) < int(base_params["count"]):
break
start_index += len(user_resources)
return users
async def fetch_omni_state(self) -> OmniWorkspaceData:
"""Fetch all documents and users from the Omni API.
This is the main public method for getting complete Omni state.
"""
# Fetch documents and users concurrently
documents, users = await asyncio.gather(self._fetch_documents(), self._fetch_users())
return OmniWorkspaceData(documents=documents, users=users)
| OmniWorkspace |
python | langchain-ai__langchain | libs/core/langchain_core/indexing/base.py | {
"start": 8060,
"end": 14597
} | class ____(RecordManager):
"""An in-memory record manager for testing purposes."""
def __init__(self, namespace: str) -> None:
"""Initialize the in-memory record manager.
Args:
namespace: The namespace for the record manager.
"""
super().__init__(namespace)
# Each key points to a dictionary
# of {'group_id': group_id, 'updated_at': timestamp}
self.records: dict[str, _Record] = {}
self.namespace = namespace
def create_schema(self) -> None:
"""In-memory schema creation is simply ensuring the structure is initialized."""
async def acreate_schema(self) -> None:
"""In-memory schema creation is simply ensuring the structure is initialized."""
@override
def get_time(self) -> float:
return time.time()
@override
async def aget_time(self) -> float:
return self.get_time()
def update(
self,
keys: Sequence[str],
*,
group_ids: Sequence[str | None] | None = None,
time_at_least: float | None = None,
) -> None:
"""Upsert records into the database.
Args:
keys: A list of record keys to upsert.
group_ids: A list of group IDs corresponding to the keys.
time_at_least: Optional timestamp. Implementation can use this
to optionally verify that the timestamp IS at least this time
in the system that stores.
E.g., use to validate that the time in the postgres database
is equal to or larger than the given timestamp, if not
raise an error.
This is meant to help prevent time-drift issues since
time may not be monotonically increasing!
Raises:
ValueError: If the length of keys doesn't match the length of group
ids.
ValueError: If time_at_least is in the future.
"""
if group_ids and len(keys) != len(group_ids):
msg = "Length of keys must match length of group_ids"
raise ValueError(msg)
for index, key in enumerate(keys):
group_id = group_ids[index] if group_ids else None
if time_at_least and time_at_least > self.get_time():
msg = "time_at_least must be in the past"
raise ValueError(msg)
self.records[key] = {"group_id": group_id, "updated_at": self.get_time()}
async def aupdate(
self,
keys: Sequence[str],
*,
group_ids: Sequence[str | None] | None = None,
time_at_least: float | None = None,
) -> None:
"""Async upsert records into the database.
Args:
keys: A list of record keys to upsert.
group_ids: A list of group IDs corresponding to the keys.
time_at_least: Optional timestamp. Implementation can use this
to optionally verify that the timestamp IS at least this time
in the system that stores.
E.g., use to validate that the time in the postgres database
is equal to or larger than the given timestamp, if not
raise an error.
This is meant to help prevent time-drift issues since
time may not be monotonically increasing!
"""
self.update(keys, group_ids=group_ids, time_at_least=time_at_least)
def exists(self, keys: Sequence[str]) -> list[bool]:
"""Check if the provided keys exist in the database.
Args:
keys: A list of keys to check.
Returns:
A list of boolean values indicating the existence of each key.
"""
return [key in self.records for key in keys]
async def aexists(self, keys: Sequence[str]) -> list[bool]:
"""Async check if the provided keys exist in the database.
Args:
keys: A list of keys to check.
Returns:
A list of boolean values indicating the existence of each key.
"""
return self.exists(keys)
def list_keys(
self,
*,
before: float | None = None,
after: float | None = None,
group_ids: Sequence[str] | None = None,
limit: int | None = None,
) -> list[str]:
"""List records in the database based on the provided filters.
Args:
before: Filter to list records updated before this time.
after: Filter to list records updated after this time.
group_ids: Filter to list records with specific group IDs.
limit: optional limit on the number of records to return.
Returns:
A list of keys for the matching records.
"""
result = []
for key, data in self.records.items():
if before and data["updated_at"] >= before:
continue
if after and data["updated_at"] <= after:
continue
if group_ids and data["group_id"] not in group_ids:
continue
result.append(key)
if limit:
return result[:limit]
return result
async def alist_keys(
self,
*,
before: float | None = None,
after: float | None = None,
group_ids: Sequence[str] | None = None,
limit: int | None = None,
) -> list[str]:
"""Async list records in the database based on the provided filters.
Args:
before: Filter to list records updated before this time.
after: Filter to list records updated after this time.
group_ids: Filter to list records with specific group IDs.
limit: optional limit on the number of records to return.
Returns:
A list of keys for the matching records.
"""
return self.list_keys(
before=before, after=after, group_ids=group_ids, limit=limit
)
def delete_keys(self, keys: Sequence[str]) -> None:
"""Delete specified records from the database.
Args:
keys: A list of keys to delete.
"""
for key in keys:
if key in self.records:
del self.records[key]
async def adelete_keys(self, keys: Sequence[str]) -> None:
"""Async delete specified records from the database.
Args:
keys: A list of keys to delete.
"""
self.delete_keys(keys)
| InMemoryRecordManager |
python | walkccc__LeetCode | solutions/2403. Minimum Time to Kill All Monsters/2403.py | {
"start": 0,
"end": 534
} | class ____:
def minimumTime(self, power: list[int]) -> int:
n = len(power)
maxMask = 1 << n
# dp[i] := the minimum number of days needed to defeat the monsters, where
# i is the bitmask of the monsters
dp = [math.inf] * maxMask
dp[0] = 0
for mask in range(1, maxMask):
currentGain = mask.bit_count()
for i in range(n):
if mask >> i & 1:
dp[mask] = min(dp[mask], dp[mask & ~(1 << i)] +
int(math.ceil(power[i] / currentGain)))
return dp[-1]
| Solution |
python | walkccc__LeetCode | solutions/758. Bold Words in String/758.py | {
"start": 0,
"end": 704
} | class ____:
def boldWords(self, words: list[str], s: str) -> str:
n = len(s)
ans = []
# bold[i] := True if s[i] should be bolded
bold = [0] * n
boldEnd = -1 # s[i:boldEnd] should be bolded
for i in range(n):
for word in words:
if s[i:].startswith(word):
boldEnd = max(boldEnd, i + len(word))
bold[i] = boldEnd > i
# Construct the string with the bold tags.
i = 0
while i < n:
if bold[i]:
j = i
while j < n and bold[j]:
j += 1
# s[i..j) should be bolded.
ans.append('<b>' + s[i:j] + '</b>')
i = j
else:
ans.append(s[i])
i += 1
return ''.join(ans)
| Solution |
python | matplotlib__matplotlib | lib/mpl_toolkits/axes_grid1/inset_locator.py | {
"start": 1310,
"end": 2177
} | class ____(AnchoredLocatorBase):
def __init__(self, bbox_to_anchor, x_size, y_size, loc,
borderpad=0.5, bbox_transform=None):
super().__init__(
bbox_to_anchor, None, loc,
borderpad=borderpad, bbox_transform=bbox_transform
)
self.x_size = Size.from_any(x_size)
self.y_size = Size.from_any(y_size)
def get_bbox(self, renderer):
bbox = self.get_bbox_to_anchor()
dpi = renderer.points_to_pixels(72.)
r, a = self.x_size.get_size(renderer)
width = bbox.width * r + a * dpi
r, a = self.y_size.get_size(renderer)
height = bbox.height * r + a * dpi
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
pad = self.pad * fontsize
return Bbox.from_bounds(0, 0, width, height).padded(pad)
| AnchoredSizeLocator |
python | django__django | tests/ordering/models.py | {
"start": 771,
"end": 1273
} | class ____(models.Model):
author = models.ForeignKey(Author, models.SET_NULL, null=True)
second_author = models.ForeignKey(
Author, models.SET_NULL, null=True, related_name="+"
)
headline = models.CharField(max_length=100)
pub_date = models.DateTimeField()
class Meta:
ordering = (
"-pub_date",
models.F("headline"),
models.F("author__name").asc(),
models.OrderBy(models.F("second_author__name")),
)
| Article |
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/multi_device_iterator_test.py | {
"start": 5249,
"end": 13697
} | class ____(test_base.DatasetTestBase,
parameterized.TestCase):
def setUp(self):
super(MultiDeviceIteratorTest, self).setUp()
self._devices = self.configureDevicesForMultiDeviceTest(3)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(num_inits=[0, 1, 42])))
def testInitOnly(self, num_inits):
dataset = dataset_ops.Dataset.range(10)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]])
for _ in range(num_inits):
self.evaluate(multi_device_iterator.initializer)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
max_buffer_size=[0, 1, 10], prefetch_buffer_size=[0, 1, 10])))
def testBasic(self, prefetch_buffer_size, max_buffer_size):
dataset = dataset_ops.Dataset.range(10)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]],
max_buffer_size=max_buffer_size,
prefetch_buffer_size=prefetch_buffer_size)
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 10, 2):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.assertEqual(i, self.evaluate(elem_on_1))
self.assertEqual(i + 1, self.evaluate(elem_on_2))
with self.assertRaises(errors.OutOfRangeError):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
@combinations.generate(test_base.default_test_combinations())
def testOneOnSameDevice(self):
dataset = dataset_ops.Dataset.range(12)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, [self._devices[0], self._devices[1], self._devices[2]])
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 12, 3):
elem_on_0, elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.assertEqual(i, self.evaluate(elem_on_0))
self.assertEqual(i + 1, self.evaluate(elem_on_1))
self.assertEqual(i + 2, self.evaluate(elem_on_2))
with self.assertRaises(errors.OutOfRangeError):
elem_on_0, elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.evaluate(elem_on_0)
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
@combinations.generate(test_base.default_test_combinations())
def testRepeatDevices(self):
dataset = dataset_ops.Dataset.range(10)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, [self._devices[1], self._devices[1]])
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 10, 2):
elements = multi_device_iterator.get_next()
elem_on_1, elem_on_2 = elements
self.assertEqual(i, self.evaluate(elem_on_1))
self.assertEqual(i + 1, self.evaluate(elem_on_2))
with self.assertRaises(errors.OutOfRangeError):
elements = multi_device_iterator.get_next()
elem_on_1, elem_on_2 = elements
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
@combinations.generate(test_base.default_test_combinations())
def testNotFullyDivisible(self):
dataset = dataset_ops.Dataset.range(9)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]])
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 8, 2):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.assertEqual(i, self.evaluate(elem_on_1))
self.assertEqual(i + 1, self.evaluate(elem_on_2))
elem_on_1 = multi_device_iterator.get_next(self._devices[1])
self.assertEqual(8, self.evaluate(elem_on_1))
with self.assertRaises(errors.OutOfRangeError):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
@combinations.generate(test_base.default_test_combinations())
def testGetNextAsOptional(self):
dataset = dataset_ops.Dataset.range(10)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]])
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 10, 2):
elem_on_1, elem_on_2 = multi_device_iterator.get_next_as_optional()
has_elem_1, get_elem_1 = self.evaluate(
[elem_on_1.has_value(), elem_on_1.get_value()])
has_elem_2, get_elem_2 = self.evaluate(
[elem_on_2.has_value(), elem_on_2.get_value()])
self.assertTrue(has_elem_1)
self.assertEqual(i, get_elem_1)
self.assertTrue(has_elem_2)
self.assertEqual(i + 1, get_elem_2)
elem_on_1, elem_on_2 = multi_device_iterator.get_next_as_optional()
has_elem_1 = elem_on_1.has_value()
has_elem_2 = elem_on_2.has_value()
self.assertFalse(self.evaluate(has_elem_1))
self.assertFalse(self.evaluate(has_elem_2))
with self.assertRaises(errors.InvalidArgumentError):
elem_1 = elem_on_1.get_value()
self.evaluate(elem_1)
with self.assertRaises(errors.InvalidArgumentError):
elem_2 = elem_on_2.get_value()
self.evaluate(elem_2)
@combinations.generate(test_base.default_test_combinations())
def testUneven(self):
dataset = dataset_ops.Dataset.range(10)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]], max_buffer_size=4)
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 10, 2):
elem_on_1 = multi_device_iterator.get_next(self._devices[1])
self.assertEqual(i, self.evaluate(elem_on_1))
for i in range(0, 10, 2):
elem_on_2 = multi_device_iterator.get_next(self._devices[2])
self.assertEqual(i + 1, self.evaluate(elem_on_2))
with self.assertRaises(errors.OutOfRangeError):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
@combinations.generate(test_base.graph_only_combinations())
def testMultipleInitializationsGraph(self):
dataset1 = dataset_ops.Dataset.range(1000)
dataset2 = dataset_ops.Dataset.range(1000)
dataset = dataset_ops.Dataset.zip((dataset1, dataset2))
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]], prefetch_buffer_size=4)
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
for _ in range(5):
self.evaluate(multi_device_iterator.initializer)
self.assertEqual([(0, 0), (1, 1)], self.evaluate([elem_on_1, elem_on_2]))
@combinations.generate(test_base.eager_only_combinations())
def testMultipleInitializationsEager(self):
dataset1 = dataset_ops.Dataset.range(1000)
dataset2 = dataset_ops.Dataset.range(1000)
dataset = dataset_ops.Dataset.zip((dataset1, dataset2))
for _ in range(5):
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]], prefetch_buffer_size=4)
self.evaluate(multi_device_iterator.initializer)
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.assertEqual([(0, 0), (1, 1)], self.evaluate([elem_on_1, elem_on_2]))
@combinations.generate(test_base.default_test_combinations())
def testOptimization(self):
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.apply(testing.assert_next(["MemoryCacheImpl"]))
dataset = dataset.skip(0) # this should be optimized away
dataset = dataset.cache()
options = options_lib.Options()
options.experimental_optimization.noop_elimination = True
dataset = dataset.with_options(options)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]])
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 10, 2):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.assertEqual(i, self.evaluate(elem_on_1))
self.assertEqual(i + 1, self.evaluate(elem_on_2))
with self.assertRaises(errors.OutOfRangeError):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
| MultiDeviceIteratorTest |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/runs.py | {
"start": 1329,
"end": 1592
} | class ____(graphene.ObjectType):
run = graphene.Field(graphene.NonNull("dagster_graphql.schema.pipelines.pipeline.GrapheneRun"))
class Meta:
interfaces = (GrapheneLaunchPipelineRunSuccess,)
name = "LaunchRunSuccess"
| GrapheneLaunchRunSuccess |
python | numpy__numpy | numpy/random/tests/test_random.py | {
"start": 261,
"end": 1879
} | class ____:
def test_scalar(self):
s = np.random.RandomState(0)
assert_equal(s.randint(1000), 684)
s = np.random.RandomState(4294967295)
assert_equal(s.randint(1000), 419)
def test_array(self):
s = np.random.RandomState(range(10))
assert_equal(s.randint(1000), 468)
s = np.random.RandomState(np.arange(10))
assert_equal(s.randint(1000), 468)
s = np.random.RandomState([0])
assert_equal(s.randint(1000), 973)
s = np.random.RandomState([4294967295])
assert_equal(s.randint(1000), 265)
def test_invalid_scalar(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, np.random.RandomState, -0.5)
assert_raises(ValueError, np.random.RandomState, -1)
def test_invalid_array(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, np.random.RandomState, [-0.5])
assert_raises(ValueError, np.random.RandomState, [-1])
assert_raises(ValueError, np.random.RandomState, [4294967296])
assert_raises(ValueError, np.random.RandomState, [1, 2, 4294967296])
assert_raises(ValueError, np.random.RandomState, [1, -2, 4294967296])
def test_invalid_array_shape(self):
# gh-9832
assert_raises(ValueError, np.random.RandomState,
np.array([], dtype=np.int64))
assert_raises(ValueError, np.random.RandomState, [[1, 2, 3]])
assert_raises(ValueError, np.random.RandomState, [[1, 2, 3],
[4, 5, 6]])
| TestSeed |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 342482,
"end": 343377
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of
UpdateEnterpriseMembersCanUpdateProtectedBranchesSetting
"""
__schema__ = github_schema
__field_names__ = ("enterprise_id", "setting_value", "client_mutation_id")
enterprise_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="enterpriseId")
"""The ID of the enterprise on which to set the members can update
protected branches setting.
"""
setting_value = sgqlc.types.Field(sgqlc.types.non_null(EnterpriseEnabledDisabledSettingValue), graphql_name="settingValue")
"""The value for the members can update protected branches setting on
the enterprise.
"""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| UpdateEnterpriseMembersCanUpdateProtectedBranchesSettingInput |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_settings.py | {
"start": 11362,
"end": 19994
} | class ____(RuleBasedStateMachine):
def __init__(self):
super().__init__()
self.step_count = 0
@rule()
def count_step(self):
self.step_count += 1
def teardown(self):
assert self.step_count <= settings_step_count
test_settings_decorator_applies_to_rule_based_state_machine_class = StepCounter.TestCase
def test_two_settings_decorators_applied_to_state_machine_class_raises_error():
with pytest.raises(InvalidArgument):
@settings()
@settings()
class StatefulTest(RuleBasedStateMachine):
pass
def test_settings_decorator_applied_to_non_state_machine_class_raises_error():
with pytest.raises(InvalidArgument):
@settings()
class NonStateMachine:
pass
def test_assigning_to_settings_attribute_on_state_machine_raises_error():
class StateMachine(RuleBasedStateMachine):
@rule(x=st.none())
def a_rule(self, x):
assert x is None
with pytest.raises(AttributeError):
StateMachine.settings = settings()
state_machine_instance = StateMachine()
state_machine_instance.settings = "any value"
def test_derandomise_with_explicit_database_is_invalid():
with pytest.raises(InvalidArgument):
settings(derandomize=True, database=InMemoryExampleDatabase())
@pytest.mark.parametrize(
"kwargs",
[
{"max_examples": -1},
{"max_examples": 2.5},
{"stateful_step_count": -1},
{"stateful_step_count": 2.5},
{"deadline": -1},
{"deadline": 0},
{"deadline": True},
{"deadline": False},
{"backend": "nonexistent_backend"},
{"suppress_health_check": ["nonexistent_healthcheck"]},
{"phases": ["nonexistent_phase"]},
{"phases": 0},
{"verbosity": -1},
{"verbosity": "nonexistent_verbosity"},
],
)
def test_invalid_settings_are_errors(kwargs):
with pytest.raises(InvalidArgument):
settings(**kwargs)
def test_invalid_parent():
class NotSettings:
def __repr__(self):
return "(not settings repr)"
not_settings = NotSettings()
with pytest.raises(InvalidArgument) as excinfo:
settings(not_settings)
assert "parent=(not settings repr)" in str(excinfo.value)
def test_default_settings_do_not_use_ci():
assert settings.get_profile("default").suppress_health_check == ()
def test_show_changed():
s = settings(settings.get_profile("default"), max_examples=999, database=None)
assert s.show_changed() == "database=None, max_examples=999"
def test_note_deprecation_checks_date():
with pytest.warns(HypothesisDeprecationWarning) as rec:
note_deprecation("This is bad", since="RELEASEDAY", has_codemod=False)
assert len(rec) == 1
with pytest.raises(AssertionError):
note_deprecation("This is way too old", since="1999-12-31", has_codemod=False)
def test_note_deprecation_checks_has_codemod():
with pytest.warns(
HypothesisDeprecationWarning,
match="The `hypothesis codemod` command-line tool",
):
note_deprecation("This is bad", since="2021-01-01", has_codemod=True)
def test_deprecated_settings_warn_on_set_settings():
with validate_deprecation():
settings(suppress_health_check=[HealthCheck.return_value])
with validate_deprecation():
settings(suppress_health_check=[HealthCheck.not_a_test_method])
@checks_deprecated_behaviour
def test_deprecated_settings_not_in_settings_all_list():
al = HealthCheck.all()
ls = list(HealthCheck)
assert al == ls
assert HealthCheck.return_value not in ls
assert HealthCheck.not_a_test_method not in ls
@skipif_emscripten
def test_check_defaults_to_derandomize_when_running_on_ci():
env = dict(os.environ)
env["CI"] = "true"
assert (
subprocess.check_output(
[
sys.executable,
"-c",
"from hypothesis import settings\nprint(settings().derandomize)",
],
env=env,
text=True,
encoding="utf-8",
).strip()
== "True"
)
@skipif_emscripten
def test_check_defaults_to_randomize_when_not_running_on_ci():
env = dict(os.environ)
for key in _CI_VARS:
env.pop(key, None)
assert (
subprocess.check_output(
[
sys.executable,
"-c",
"from hypothesis import settings\nprint(settings().derandomize)",
],
env=env,
text=True,
encoding="utf-8",
).strip()
== "False"
)
@skipif_threading # modifying global state (profiles) during testing
def test_reloads_the_loaded_profile_if_registered_again():
with restore_profile():
test_profile = "some nonsense profile purely for this test"
test_value = 123456
settings.register_profile(test_profile, settings(max_examples=test_value))
settings.load_profile(test_profile)
assert settings.default.max_examples == test_value
test_value_2 = 42
settings.register_profile(test_profile, settings(max_examples=test_value_2))
assert settings.default.max_examples == test_value_2
CI_TESTING_SCRIPT = """
from hypothesis import settings
if __name__ == '__main__':
settings.register_profile("ci", settings(max_examples=42))
assert settings.default.max_examples == 42
"""
@skipif_emscripten
def test_will_automatically_pick_up_changes_to_ci_profile_in_ci():
env = dict(os.environ)
env["CI"] = "true"
subprocess.check_call(
[sys.executable, "-c", CI_TESTING_SCRIPT],
env=env,
text=True,
encoding="utf-8",
)
def test_register_profile_avoids_intermediate_profiles():
parent = settings()
s = settings(parent, max_examples=10)
with temp_register_profile("for_intermediate_test", s):
assert settings.get_profile("for_intermediate_test")._fallback is parent
@checks_deprecated_behaviour
@settings(max_examples=10)
@given(st.integers())
def test_cannot_register_profile_from_inside_test(x):
settings.register_profile("problematic", settings(max_examples=20))
def test_can_set_verbosity_to_strings():
assert settings(verbosity="quiet").verbosity is Verbosity.quiet
assert settings(verbosity="normal").verbosity is Verbosity.normal
assert settings(verbosity="verbose").verbosity is Verbosity.verbose
assert settings(verbosity="debug").verbosity is Verbosity.debug
def test_can_set_phase_to_strings():
assert settings(phases=["reuse"]).phases == (Phase.reuse,)
assert settings(phases=["reuse", "explicit"]).phases == (
Phase.explicit,
Phase.reuse,
)
def test_can_set_suppressions_to_strings():
assert settings(
suppress_health_check=["filter_too_much"]
).suppress_health_check == (HealthCheck.filter_too_much,)
assert settings(
suppress_health_check=["filter_too_much", "too_slow"]
).suppress_health_check == (HealthCheck.filter_too_much, HealthCheck.too_slow)
def test_verbosity_is_comparable():
assert Verbosity.quiet < Verbosity.normal
assert Verbosity.quiet <= Verbosity.quiet
assert Verbosity.quiet == Verbosity.quiet
assert Verbosity.quiet >= Verbosity.quiet
assert Verbosity.debug > Verbosity.quiet
# make sure we're comparing by int value, not by str value
assert Verbosity.quiet < Verbosity.normal < Verbosity.verbose < Verbosity.debug
# also comparable with other ints
assert Verbosity.quiet < 1
assert Verbosity.quiet <= 1
assert Verbosity.quiet == 0
assert Verbosity.quiet >= 0
assert Verbosity.normal > 0
@checks_deprecated_behaviour
def test_can_set_verbosity_to_integers():
assert Verbosity(0) is Verbosity.quiet
assert Verbosity(1) is Verbosity.normal
assert Verbosity(2) is Verbosity.verbose
assert Verbosity(3) is Verbosity.debug
@checks_deprecated_behaviour
def test_can_set_phase_to_integers():
assert Phase(0) is Phase.explicit
assert Phase(1) is Phase.reuse
assert Phase(2) is Phase.generate
assert Phase(4) is Phase.shrink
@checks_deprecated_behaviour
def test_can_set_suppressions_to_integers():
assert HealthCheck(1) is HealthCheck.data_too_large
assert HealthCheck(2) is HealthCheck.filter_too_much
assert HealthCheck(3) is HealthCheck.too_slow
def test_invalid_integer_phase_raises():
with pytest.raises(ValueError):
Phase(99)
def test_invalid_integer_healthcheck_raises():
with pytest.raises(ValueError):
HealthCheck(99)
| StepCounter |
python | PyCQA__bandit | tests/unit/core/test_meta_ast.py | {
"start": 127,
"end": 916
} | class ____(testtools.TestCase):
def setUp(self):
super().setUp()
self.b_meta_ast = meta_ast.BanditMetaAst()
self.node = "fake_node"
self.parent_id = "fake_parent_id"
self.depth = 1
self.b_meta_ast.add_node(self.node, self.parent_id, self.depth)
self.node_id = hex(id(self.node))
def test_add_node(self):
expected = {
"raw": self.node,
"parent_id": self.parent_id,
"depth": self.depth,
}
self.assertEqual(expected, self.b_meta_ast.nodes[self.node_id])
def test_str(self):
node = self.b_meta_ast.nodes[self.node_id]
expected = f"Node: {self.node_id}\n\t{node}\nLength: 1\n"
self.assertEqual(expected, str(self.b_meta_ast))
| BanditMetaAstTests |
python | google__flatbuffers | tests/monster_test_generated.py | {
"start": 21972,
"end": 23158
} | class ____(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Referrable()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsReferrable(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def ReferrableBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4D\x4F\x4E\x53", size_prefixed=size_prefixed)
# Referrable
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Referrable
def Id(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
return 0
def ReferrableStart(builder):
builder.StartObject(1)
def ReferrableAddId(builder, id):
builder.PrependUint64Slot(0, id, 0)
def ReferrableEnd(builder):
return builder.EndObject()
| Referrable |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/roots/mutation.py | {
"start": 26482,
"end": 26719
} | class ____(graphene.ObjectType):
"""Output indicating that asset history was deleted."""
assetPartitionRanges = non_null_list(GrapheneAssetPartitionRange)
class Meta:
name = "AssetWipeSuccess"
| GrapheneAssetWipeSuccess |
python | walkccc__LeetCode | solutions/2771. Longest Non-decreasing Subarray From Two Arrays/2771.py | {
"start": 0,
"end": 600
} | class ____:
def maxNonDecreasingLength(self, nums1: list[int], nums2: list[int]) -> int:
ans = 1
dp1 = 1 # the longest subarray that ends in nums1[i] so far
dp2 = 1 # the longest subarray that ends in nums2[i] so far
for i in range(1, len(nums1)):
dp11 = dp1 + 1 if nums1[i - 1] <= nums1[i] else 1
dp21 = dp2 + 1 if nums2[i - 1] <= nums1[i] else 1
dp12 = dp1 + 1 if nums1[i - 1] <= nums2[i] else 1
dp22 = dp2 + 1 if nums2[i - 1] <= nums2[i] else 1
dp1 = max(dp11, dp21)
dp2 = max(dp12, dp22)
ans = max(ans, dp1, dp2)
return ans
| Solution |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.