language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | huggingface__transformers | src/transformers/models/data2vec/modeling_data2vec_text.py | {
"start": 29400,
"end": 30312
} | class ____(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@auto_docstring(
custom_intro="""
Data2VecText Model with a `language modeling` head on top for CLM fine-tuning.
"""
)
| Data2VecTextClassificationHead |
python | django__django | tests/template_tests/syntax_tests/test_extends.py | {
"start": 16844,
"end": 17141
} | class ____(SimpleTestCase):
def test_extends_node_repr(self):
extends_node = ExtendsNode(
nodelist=NodeList([]),
parent_name=Node(),
template_dirs=[],
)
self.assertEqual(repr(extends_node), "<ExtendsNode: extends None>")
| ExtendsNodeTests |
python | pandas-dev__pandas | pandas/tests/tools/test_to_datetime.py | {
"start": 126461,
"end": 142256
} | class ____:
@pytest.mark.parametrize(
"listlike,do_caching",
[
([1, 2, 3, 4, 5, 6, 7, 8, 9, 0], False),
([1, 1, 1, 1, 4, 5, 6, 7, 8, 9], True),
],
)
def test_should_cache(self, listlike, do_caching):
assert (
tools.should_cache(listlike, check_count=len(listlike), unique_share=0.7)
== do_caching
)
@pytest.mark.parametrize(
"unique_share,check_count, err_message",
[
(0.5, 11, r"check_count must be in next bounds: \[0; len\(arg\)\]"),
(10, 2, r"unique_share must be in next bounds: \(0; 1\)"),
],
)
def test_should_cache_errors(self, unique_share, check_count, err_message):
arg = [5] * 10
with pytest.raises(AssertionError, match=err_message):
tools.should_cache(arg, unique_share, check_count)
@pytest.mark.parametrize(
"listlike",
[
(deque([Timestamp("2010-06-02 09:30:00")] * 51)),
([Timestamp("2010-06-02 09:30:00")] * 51),
(tuple([Timestamp("2010-06-02 09:30:00")] * 51)),
],
)
def test_no_slicing_errors_in_should_cache(self, listlike):
# GH#29403
assert tools.should_cache(listlike) is True
def test_nullable_integer_to_datetime():
# Test for #30050
ser = Series([1, 2, None, 2**61, None], dtype="Int64")
ser_copy = ser.copy()
res = to_datetime(ser, unit="ns")
expected = Series(
[
np.datetime64("1970-01-01 00:00:00.000000001"),
np.datetime64("1970-01-01 00:00:00.000000002"),
np.datetime64("NaT"),
np.datetime64("2043-01-25 23:56:49.213693952"),
np.datetime64("NaT"),
]
)
tm.assert_series_equal(res, expected)
# Check that ser isn't mutated
tm.assert_series_equal(ser, ser_copy)
@pytest.mark.parametrize("klass", [np.array, list])
def test_na_to_datetime(nulls_fixture, klass):
if isinstance(nulls_fixture, Decimal):
with pytest.raises(TypeError, match="not convertible to datetime"):
to_datetime(klass([nulls_fixture]))
else:
result = to_datetime(klass([nulls_fixture]))
assert result[0] is NaT
@pytest.mark.parametrize("errors", ["raise", "coerce"])
@pytest.mark.parametrize(
"args, format",
[
(["03/24/2016", "03/25/2016", ""], "%m/%d/%Y"),
(["2016-03-24", "2016-03-25", ""], "%Y-%m-%d"),
],
ids=["non-ISO8601", "ISO8601"],
)
def test_empty_string_datetime(errors, args, format):
# GH13044, GH50251
td = Series(args)
# coerce empty string to pd.NaT
result = to_datetime(td, format=format, errors=errors)
expected = Series(["2016-03-24", "2016-03-25", NaT], dtype="datetime64[us]")
tm.assert_series_equal(expected, result)
def test_empty_string_datetime_coerce__unit():
# GH13044
# coerce empty string to pd.NaT
result = to_datetime([1, ""], unit="s", errors="coerce")
expected = DatetimeIndex(["1970-01-01 00:00:01", "NaT"], dtype="datetime64[ns]")
tm.assert_index_equal(expected, result)
# verify that no exception is raised even when errors='raise' is set
result = to_datetime([1, ""], unit="s", errors="raise")
tm.assert_index_equal(expected, result)
def test_to_datetime_monotonic_increasing_index(cache):
# GH28238
cstart = start_caching_at
times = date_range(Timestamp("1980"), periods=cstart, freq="YS")
times = times.to_frame(index=False, name="DT").sample(n=cstart, random_state=1)
times.index = times.index.to_series().astype(float) / 1000
result = to_datetime(times.iloc[:, 0], cache=cache)
expected = times.iloc[:, 0]
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"series_length",
[40, start_caching_at, (start_caching_at + 1), (start_caching_at + 5)],
)
def test_to_datetime_cache_coerce_50_lines_outofbounds(series_length):
# GH#45319
ser = Series(
[datetime.fromisoformat("1446-04-12 00:00:00+00:00")]
+ ([datetime.fromisoformat("1991-10-20 00:00:00+00:00")] * series_length),
dtype=object,
)
result1 = to_datetime(ser, errors="coerce", utc=True)
expected1 = Series([Timestamp(x) for x in ser])
assert expected1.dtype == "M8[us, UTC]"
tm.assert_series_equal(result1, expected1)
result3 = to_datetime(ser, errors="raise", utc=True)
tm.assert_series_equal(result3, expected1)
def test_to_datetime_format_f_parse_nanos():
# GH 48767
timestamp = "15/02/2020 02:03:04.123456789"
timestamp_format = "%d/%m/%Y %H:%M:%S.%f"
result = to_datetime(timestamp, format=timestamp_format)
expected = Timestamp(
year=2020,
month=2,
day=15,
hour=2,
minute=3,
second=4,
microsecond=123456,
nanosecond=789,
)
assert result == expected
def test_to_datetime_mixed_iso8601():
# https://github.com/pandas-dev/pandas/issues/50411
result = to_datetime(["2020-01-01", "2020-01-01 05:00:00"], format="ISO8601")
expected = DatetimeIndex(["2020-01-01 00:00:00", "2020-01-01 05:00:00"])
tm.assert_index_equal(result, expected)
def test_to_datetime_mixed_other():
# https://github.com/pandas-dev/pandas/issues/50411
result = to_datetime(["01/11/2000", "12 January 2000"], format="mixed")
expected = DatetimeIndex(["2000-01-11", "2000-01-12"])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("exact", [True, False])
@pytest.mark.parametrize("format", ["ISO8601", "mixed"])
def test_to_datetime_mixed_or_iso_exact(exact, format):
msg = "Cannot use 'exact' when 'format' is 'mixed' or 'ISO8601'"
with pytest.raises(ValueError, match=msg):
to_datetime(["2020-01-01"], exact=exact, format=format)
def test_to_datetime_mixed_not_necessarily_iso8601_raise():
# https://github.com/pandas-dev/pandas/issues/50411
with pytest.raises(ValueError, match="Time data 01-01-2000 is not ISO8601 format"):
to_datetime(["2020-01-01", "01-01-2000"], format="ISO8601")
def test_to_datetime_mixed_not_necessarily_iso8601_coerce():
# https://github.com/pandas-dev/pandas/issues/50411
result = to_datetime(
["2020-01-01", "01-01-2000"], format="ISO8601", errors="coerce"
)
tm.assert_index_equal(result, DatetimeIndex(["2020-01-01 00:00:00", NaT]))
def test_to_datetime_iso8601_utc_single_naive():
# GH#61389
result = to_datetime("2023-10-15T14:30:00", utc=True, format="ISO8601")
expected = Timestamp("2023-10-15 14:30:00+00:00")
assert result == expected
def test_to_datetime_iso8601_utc_mixed_negative_offset():
# GH#61389
data = ["2023-10-15T10:30:00-12:00", "2023-10-15T14:30:00"]
result = to_datetime(data, utc=True, format="ISO8601")
expected = DatetimeIndex(
[Timestamp("2023-10-15 22:30:00+00:00"), Timestamp("2023-10-15 14:30:00+00:00")]
)
tm.assert_index_equal(result, expected)
def test_to_datetime_iso8601_utc_mixed_positive_offset():
# GH#61389
data = ["2023-10-15T10:30:00+08:00", "2023-10-15T14:30:00"]
result = to_datetime(data, utc=True, format="ISO8601")
expected = DatetimeIndex(
[Timestamp("2023-10-15 02:30:00+00:00"), Timestamp("2023-10-15 14:30:00+00:00")]
)
tm.assert_index_equal(result, expected)
def test_to_datetime_iso8601_utc_mixed_both_offsets():
# GH#61389
data = [
"2023-10-15T10:30:00+08:00",
"2023-10-15T12:30:00-05:00",
"2023-10-15T14:30:00",
]
result = to_datetime(data, utc=True, format="ISO8601")
expected = DatetimeIndex(
[
Timestamp("2023-10-15 02:30:00+00:00"),
Timestamp("2023-10-15 17:30:00+00:00"),
Timestamp("2023-10-15 14:30:00+00:00"),
]
)
tm.assert_index_equal(result, expected)
def test_unknown_tz_raises():
# GH#18702, GH#51476
dtstr = "2014 Jan 9 05:15 FAKE"
msg = '.*un-recognized timezone "FAKE".'
with pytest.raises(ValueError, match=msg):
Timestamp(dtstr)
with pytest.raises(ValueError, match=msg):
to_datetime(dtstr)
with pytest.raises(ValueError, match=msg):
to_datetime([dtstr])
def test_unformatted_input_raises():
valid, invalid = "2024-01-01", "N"
ser = Series([valid] * start_caching_at + [invalid])
msg = 'time data "N" doesn\'t match format "%Y-%m-%d"'
with pytest.raises(ValueError, match=msg):
to_datetime(ser, format="%Y-%m-%d", exact=True, cache=True)
def test_from_numeric_arrow_dtype(any_numeric_ea_dtype):
# GH 52425
pytest.importorskip("pyarrow")
ser = Series([1, 2], dtype=f"{any_numeric_ea_dtype.lower()}[pyarrow]")
result = to_datetime(ser)
expected = Series([1, 2], dtype="datetime64[ns]")
tm.assert_series_equal(result, expected)
def test_to_datetime_with_empty_str_utc_false_format_mixed():
# GH 50887
vals = ["2020-01-01 00:00+00:00", ""]
result = to_datetime(vals, format="mixed")
expected = Index([Timestamp("2020-01-01 00:00+00:00"), "NaT"], dtype="M8[us, UTC]")
tm.assert_index_equal(result, expected)
# Check that a couple of other similar paths work the same way
alt = to_datetime(vals)
tm.assert_index_equal(alt, expected)
alt2 = DatetimeIndex(vals)
tm.assert_index_equal(alt2, expected)
def test_to_datetime_with_empty_str_utc_false_offsets_and_format_mixed():
# GH#50887, GH#57275
msg = "Mixed timezones detected. Pass utc=True in to_datetime"
with pytest.raises(ValueError, match=msg):
to_datetime(
["2020-01-01 00:00+00:00", "2020-01-01 00:00+02:00", ""], format="mixed"
)
def test_to_datetime_mixed_tzs_mixed_types():
# GH#55793, GH#55693 mismatched tzs but one is str and other is
# datetime object
ts = Timestamp("2016-01-02 03:04:05", tz="US/Pacific")
dtstr = "2023-10-30 15:06+01"
arr = [ts, dtstr]
msg = (
"Mixed timezones detected. Pass utc=True in to_datetime or tz='UTC' "
"in DatetimeIndex to convert to a common timezone"
)
with pytest.raises(ValueError, match=msg):
to_datetime(arr)
with pytest.raises(ValueError, match=msg):
to_datetime(arr, format="mixed")
with pytest.raises(ValueError, match=msg):
DatetimeIndex(arr)
def test_to_datetime_mixed_types_matching_tzs():
# GH#55793
dtstr = "2023-11-01 09:22:03-07:00"
ts = Timestamp(dtstr)
arr = [ts, dtstr]
res1 = to_datetime(arr)
res2 = to_datetime(arr[::-1])[::-1]
res3 = to_datetime(arr, format="mixed")
res4 = DatetimeIndex(arr)
expected = DatetimeIndex([ts, ts])
tm.assert_index_equal(res1, expected)
tm.assert_index_equal(res2, expected)
tm.assert_index_equal(res3, expected)
tm.assert_index_equal(res4, expected)
dtstr = "2020-01-01 00:00+00:00"
ts = Timestamp(dtstr)
@pytest.mark.filterwarnings("ignore:Could not infer format:UserWarning")
@pytest.mark.parametrize(
"aware_val",
[dtstr, Timestamp(dtstr)],
ids=lambda x: type(x).__name__,
)
@pytest.mark.parametrize(
"naive_val",
[dtstr[:-6], ts.tz_localize(None), ts.date(), ts.asm8, ts.value, float(ts.value)],
ids=lambda x: type(x).__name__,
)
@pytest.mark.parametrize("naive_first", [True, False])
def test_to_datetime_mixed_awareness_mixed_types(aware_val, naive_val, naive_first):
# GH#55793, GH#55693, GH#57275
# Empty string parses to NaT
vals = [aware_val, naive_val, ""]
vec = vals
if naive_first:
# alas, the behavior is order-dependent, so we test both ways
vec = [naive_val, aware_val, ""]
# both_strs-> paths that were previously already deprecated with warning
# issued in _array_to_datetime_object
both_strs = isinstance(aware_val, str) and isinstance(naive_val, str)
has_numeric = isinstance(naive_val, (int, float))
both_datetime = isinstance(naive_val, datetime) and isinstance(aware_val, datetime)
mixed_msg = (
"Mixed timezones detected. Pass utc=True in to_datetime or tz='UTC' "
"in DatetimeIndex to convert to a common timezone"
)
first_non_null = next(x for x in vec if x != "")
# if first_non_null is a not a string, _guess_datetime_format_for_array
# doesn't guess a format so we don't go through array_strptime
if not isinstance(first_non_null, str):
# that case goes through array_strptime which has different behavior
msg = mixed_msg
if naive_first and isinstance(aware_val, Timestamp):
if isinstance(naive_val, Timestamp):
msg = "Tz-aware datetime.datetime cannot be converted to datetime64"
with pytest.raises(ValueError, match=msg):
to_datetime(vec)
else:
if not naive_first and both_datetime:
msg = "Cannot mix tz-aware with tz-naive values"
with pytest.raises(ValueError, match=msg):
to_datetime(vec)
# No warning/error with utc=True
to_datetime(vec, utc=True)
elif has_numeric and vec.index(aware_val) < vec.index(naive_val):
msg = "time data .* doesn't match format"
with pytest.raises(ValueError, match=msg):
to_datetime(vec)
with pytest.raises(ValueError, match=msg):
to_datetime(vec, utc=True)
elif both_strs and vec.index(aware_val) < vec.index(naive_val):
msg = r"time data \"2020-01-01 00:00\" doesn't match format"
with pytest.raises(ValueError, match=msg):
to_datetime(vec)
with pytest.raises(ValueError, match=msg):
to_datetime(vec, utc=True)
elif both_strs and vec.index(naive_val) < vec.index(aware_val):
msg = "unconverted data remains when parsing with format"
with pytest.raises(ValueError, match=msg):
to_datetime(vec)
with pytest.raises(ValueError, match=msg):
to_datetime(vec, utc=True)
else:
msg = mixed_msg
with pytest.raises(ValueError, match=msg):
to_datetime(vec)
# No warning/error with utc=True
to_datetime(vec, utc=True)
if both_strs:
msg = mixed_msg
with pytest.raises(ValueError, match=msg):
to_datetime(vec, format="mixed")
with pytest.raises(ValueError, match=msg):
DatetimeIndex(vec)
else:
msg = mixed_msg
if naive_first and isinstance(aware_val, Timestamp):
if isinstance(naive_val, Timestamp):
msg = "Tz-aware datetime.datetime cannot be converted to datetime64"
with pytest.raises(ValueError, match=msg):
to_datetime(vec, format="mixed")
with pytest.raises(ValueError, match=msg):
DatetimeIndex(vec)
else:
if not naive_first and both_datetime:
msg = "Cannot mix tz-aware with tz-naive values"
with pytest.raises(ValueError, match=msg):
to_datetime(vec, format="mixed")
with pytest.raises(ValueError, match=msg):
DatetimeIndex(vec)
def test_to_datetime_wrapped_datetime64_ps():
# GH#60341
result = to_datetime([np.datetime64(1901901901901, "ps")])
expected = DatetimeIndex(
["1970-01-01 00:00:01.901901901"], dtype="datetime64[ns]", freq=None
)
tm.assert_index_equal(result, expected)
def test_to_datetime_lxml_elementunicoderesult_with_format(cache):
etree = pytest.importorskip("lxml.etree")
s = "2025-02-05 16:59:57"
node = etree.XML(f"<date>{s}</date>")
val = node.xpath("/date/node()")[0] # _ElementUnicodeResult
out = to_datetime(Series([val]), format="%Y-%m-%d %H:%M:%S", cache=cache)
assert out.iloc[0] == Timestamp(s)
| TestShouldCache |
python | FactoryBoy__factory_boy | tests/djapp/models.py | {
"start": 2654,
"end": 2781
} | class ____(models.Model):
custom_objects = CustomManager()
class Meta:
abstract = True
| AbstractWithCustomManager |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/signal/dct_ops_test.py | {
"start": 5307,
"end": 9316
} | class ____(parameterized.TestCase, test.TestCase):
def _compare(self, signals, n, norm, dct_type, atol, rtol):
"""Compares (I)DCT to SciPy (if available) and a NumPy implementation."""
np_dct = NP_DCT[dct_type](signals, n=n, norm=norm)
tf_dct = dct_ops.dct(signals, n=n, type=dct_type, norm=norm)
self.assertEqual(tf_dct.dtype.as_numpy_dtype, signals.dtype)
self.assertAllClose(np_dct, tf_dct, atol=atol, rtol=rtol)
np_idct = NP_IDCT[dct_type](signals, n=None, norm=norm)
tf_idct = dct_ops.idct(signals, type=dct_type, norm=norm)
self.assertEqual(tf_idct.dtype.as_numpy_dtype, signals.dtype)
self.assertAllClose(np_idct, tf_idct, atol=atol, rtol=rtol)
if fftpack and dct_type != 4:
scipy_dct = fftpack.dct(signals, n=n, type=dct_type, norm=norm)
self.assertAllClose(scipy_dct, tf_dct, atol=atol, rtol=rtol)
scipy_idct = fftpack.idct(signals, type=dct_type, norm=norm)
self.assertAllClose(scipy_idct, tf_idct, atol=atol, rtol=rtol)
# Verify inverse(forward(s)) == s, up to a normalization factor.
# Since `n` is not implemented for IDCT operation, re-calculating tf_dct
# without n.
tf_dct = dct_ops.dct(signals, type=dct_type, norm=norm)
tf_idct_dct = dct_ops.idct(tf_dct, type=dct_type, norm=norm)
tf_dct_idct = dct_ops.dct(tf_idct, type=dct_type, norm=norm)
if norm is None:
if dct_type == 1:
tf_idct_dct *= 0.5 / (signals.shape[-1] - 1)
tf_dct_idct *= 0.5 / (signals.shape[-1] - 1)
else:
tf_idct_dct *= 0.5 / signals.shape[-1]
tf_dct_idct *= 0.5 / signals.shape[-1]
self.assertAllClose(signals, tf_idct_dct, atol=atol, rtol=rtol)
self.assertAllClose(signals, tf_dct_idct, atol=atol, rtol=rtol)
@parameterized.parameters(itertools.product(
[1, 2, 3, 4],
[None, "ortho"],
[[2], [3], [10], [2, 20], [2, 3, 25]],
[np.float32, np.float64]))
def test_random(self, dct_type, norm, shape, dtype):
"""Test randomly generated batches of data."""
# "ortho" normalization is not implemented for type I.
if dct_type == 1 and norm == "ortho":
return
with self.session():
tol = 5e-4 if dtype == np.float32 else 1e-7
signals = np.random.rand(*shape).astype(dtype)
n = np.random.randint(1, 2 * signals.shape[-1])
n = np.random.choice([None, n])
self._compare(signals, n, norm=norm, dct_type=dct_type,
rtol=tol, atol=tol)
@parameterized.parameters(itertools.product(
[1, 2, 3, 4],
[None, "ortho"],
[[2], [3], [10], [2, 20], [2, 3, 25]],
[np.float32, np.float64]))
def test_with_dynamic_dimensions(self, dct_type, norm, shape, dtype):
# "ortho" normalization is not implemented for type I.
if dct_type == 1 and norm == "ortho":
return
signals = np.random.rand(*shape).astype(dtype)
n = np.random.randint(1, 2 * shape[-1])
n = np.random.choice([None, n])
@def_function.function
def func(signals):
return dct_ops.dct(signals, n=n, type=dct_type, norm=norm)
# Trace with all undefined dimensions
signals_spec = tensor_spec.TensorSpec([None] * len(shape), dtype)
f = func.get_concrete_function(signals_spec)
# Run with actual shape
f(signals)
def test_error(self):
signals = np.random.rand(10)
# Unsupported type.
with self.assertRaises(ValueError):
dct_ops.dct(signals, type=5)
# Invalid n.
with self.assertRaises(ValueError):
dct_ops.dct(signals, n=-2)
# DCT-I normalization not implemented.
with self.assertRaises(ValueError):
dct_ops.dct(signals, type=1, norm="ortho")
# DCT-I requires at least two inputs.
with self.assertRaises(ValueError):
dct_ops.dct(np.random.rand(1), type=1)
# Unknown normalization.
with self.assertRaises(ValueError):
dct_ops.dct(signals, norm="bad")
with self.assertRaises(NotImplementedError):
dct_ops.dct(signals, axis=0)
if __name__ == "__main__":
test.main()
| DCTOpsTest |
python | huggingface__transformers | src/transformers/models/efficientloftr/modeling_efficientloftr.py | {
"start": 23622,
"end": 24358
} | class ____(nn.Module):
def __init__(self, config: EfficientLoFTRConfig):
super().__init__()
self.layers = nn.ModuleList(
[
EfficientLoFTRLocalFeatureTransformerLayer(config, layer_idx=i)
for i in range(config.num_attention_layers)
]
)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
for layer in self.layers:
hidden_states = layer(hidden_states, position_embeddings=position_embeddings, **kwargs)
return hidden_states
| EfficientLoFTRLocalFeatureTransformer |
python | apache__airflow | providers/apache/spark/src/airflow/providers/apache/spark/hooks/spark_jdbc.py | {
"start": 991,
"end": 11867
} | class ____(SparkSubmitHook):
"""
Extends the SparkSubmitHook for performing data transfers to/from JDBC-based databases with Apache Spark.
:param spark_app_name: Name of the job (default airflow-spark-jdbc)
:param spark_conn_id: The :ref:`spark connection id <howto/connection:spark-submit>`
as configured in Airflow administration
:param spark_conf: Any additional Spark configuration properties
:param spark_py_files: Additional python files used (.zip, .egg, or .py)
:param spark_files: Additional files to upload to the container running the job
:param spark_jars: Additional jars to upload and add to the driver and
executor classpath
:param num_executors: number of executor to run. This should be set so as to manage
the number of connections made with the JDBC database
:param executor_cores: Number of cores per executor
:param executor_memory: Memory per executor (e.g. 1000M, 2G)
:param driver_memory: Memory allocated to the driver (e.g. 1000M, 2G)
:param verbose: Whether to pass the verbose flag to spark-submit for debugging
:param keytab: Full path to the file that contains the keytab
:param principal: The name of the kerberos principal used for keytab
:param cmd_type: Which way the data should flow. 2 possible values:
spark_to_jdbc: data written by spark from metastore to jdbc
jdbc_to_spark: data written by spark from jdbc to metastore
:param jdbc_table: The name of the JDBC table
:param jdbc_conn_id: Connection id used for connection to JDBC database
:param jdbc_driver: Name of the JDBC driver to use for the JDBC connection. This
driver (usually a jar) should be passed in the 'jars' parameter
:param metastore_table: The name of the metastore table,
:param jdbc_truncate: (spark_to_jdbc only) Whether or not Spark should truncate or
drop and recreate the JDBC table. This only takes effect if
'save_mode' is set to Overwrite. Also, if the schema is
different, Spark cannot truncate, and will drop and recreate
:param save_mode: The Spark save-mode to use (e.g. overwrite, append, etc.)
:param save_format: (jdbc_to_spark-only) The Spark save-format to use (e.g. parquet)
:param batch_size: (spark_to_jdbc only) The size of the batch to insert per round
trip to the JDBC database. Defaults to 1000
:param fetch_size: (jdbc_to_spark only) The size of the batch to fetch per round trip
from the JDBC database. Default depends on the JDBC driver
:param num_partitions: The maximum number of partitions that can be used by Spark
simultaneously, both for spark_to_jdbc and jdbc_to_spark
operations. This will also cap the number of JDBC connections
that can be opened
:param partition_column: (jdbc_to_spark-only) A numeric column to be used to
partition the metastore table by. If specified, you must
also specify:
num_partitions, lower_bound, upper_bound
:param lower_bound: (jdbc_to_spark-only) Lower bound of the range of the numeric
partition column to fetch. If specified, you must also specify:
num_partitions, partition_column, upper_bound
:param upper_bound: (jdbc_to_spark-only) Upper bound of the range of the numeric
partition column to fetch. If specified, you must also specify:
num_partitions, partition_column, lower_bound
:param create_table_column_types: (spark_to_jdbc-only) The database column data types
to use instead of the defaults, when creating the
table. Data type information should be specified in
the same format as CREATE TABLE columns syntax
(e.g: "name CHAR(64), comments VARCHAR(1024)").
The specified types should be valid spark sql data
types.
:param use_krb5ccache: if True, configure spark to use ticket cache instead of relying
on keytab for Kerberos login
"""
conn_name_attr = "spark_conn_id"
default_conn_name = "spark_default"
conn_type = "spark_jdbc"
hook_name = "Spark JDBC"
def __init__(
self,
spark_app_name: str = "airflow-spark-jdbc",
spark_conn_id: str = default_conn_name,
spark_conf: dict[str, Any] | None = None,
spark_py_files: str | None = None,
spark_files: str | None = None,
spark_jars: str | None = None,
num_executors: int | None = None,
executor_cores: int | None = None,
executor_memory: str | None = None,
driver_memory: str | None = None,
verbose: bool = False,
principal: str | None = None,
keytab: str | None = None,
cmd_type: str = "spark_to_jdbc",
jdbc_table: str | None = None,
jdbc_conn_id: str = "jdbc-default",
jdbc_driver: str | None = None,
metastore_table: str | None = None,
jdbc_truncate: bool = False,
save_mode: str | None = None,
save_format: str | None = None,
batch_size: int | None = None,
fetch_size: int | None = None,
num_partitions: int | None = None,
partition_column: str | None = None,
lower_bound: str | None = None,
upper_bound: str | None = None,
create_table_column_types: str | None = None,
*args: Any,
use_krb5ccache: bool = False,
**kwargs: Any,
):
super().__init__(*args, **kwargs)
self._name = spark_app_name
self._conn_id = spark_conn_id
self._conf = spark_conf or {}
self._py_files = spark_py_files
self._files = spark_files
self._jars = spark_jars
self._num_executors = num_executors
self._executor_cores = executor_cores
self._executor_memory = executor_memory
self._driver_memory = driver_memory
self._verbose = verbose
self._keytab = keytab
self._principal = principal
self._cmd_type = cmd_type
self._jdbc_table = jdbc_table
self._jdbc_conn_id = jdbc_conn_id
self._jdbc_driver = jdbc_driver
self._metastore_table = metastore_table
self._jdbc_truncate = jdbc_truncate
self._save_mode = save_mode
self._save_format = save_format
self._batch_size = batch_size
self._fetch_size = fetch_size
self._num_partitions = num_partitions
self._partition_column = partition_column
self._lower_bound = lower_bound
self._upper_bound = upper_bound
self._create_table_column_types = create_table_column_types
self._jdbc_connection = self._resolve_jdbc_connection()
self._use_krb5ccache = use_krb5ccache
def _resolve_jdbc_connection(self) -> dict[str, Any]:
conn_data = {"url": "", "schema": "", "conn_prefix": "", "user": "", "password": ""}
try:
conn = self.get_connection(self._jdbc_conn_id)
if conn.host is not None and "/" in conn.host:
raise ValueError("The jdbc host should not contain a '/'")
if conn.schema is not None and "?" in conn.schema:
raise ValueError("The jdbc schema should not contain a '?'")
if conn.port is not None:
conn_data["url"] = f"{cast('str', conn.host)}:{conn.port}"
else:
conn_data["url"] = cast("str", conn.host)
conn_data["schema"] = cast("str", conn.schema)
conn_data["user"] = cast("str", conn.login)
conn_data["password"] = cast("str", conn.password)
extra = conn.extra_dejson
conn_data["conn_prefix"] = extra.get("conn_prefix", "")
except AirflowException:
self.log.debug(
"Could not load jdbc connection string %s, defaulting to %s", self._jdbc_conn_id, ""
)
return conn_data
def _build_jdbc_application_arguments(self, jdbc_conn: dict[str, Any]) -> Any:
arguments = []
arguments += ["-cmdType", self._cmd_type]
if self._jdbc_connection["url"]:
if "?" in jdbc_conn["conn_prefix"]:
raise ValueError("The jdbc extra conn_prefix should not contain a '?'")
arguments += [
"-url",
f"{jdbc_conn['conn_prefix']}{jdbc_conn['url']}/{jdbc_conn['schema']}",
]
if self._jdbc_connection["user"]:
arguments += ["-user", self._jdbc_connection["user"]]
if self._jdbc_connection["password"]:
arguments += ["-password", self._jdbc_connection["password"]]
if self._metastore_table:
arguments += ["-metastoreTable", self._metastore_table]
if self._jdbc_table:
arguments += ["-jdbcTable", self._jdbc_table]
if self._jdbc_truncate:
arguments += ["-jdbcTruncate", str(self._jdbc_truncate)]
if self._jdbc_driver:
arguments += ["-jdbcDriver", self._jdbc_driver]
if self._batch_size:
arguments += ["-batchsize", str(self._batch_size)]
if self._fetch_size:
arguments += ["-fetchsize", str(self._fetch_size)]
if self._num_partitions:
arguments += ["-numPartitions", str(self._num_partitions)]
if self._partition_column and self._lower_bound and self._upper_bound and self._num_partitions:
# these 3 parameters need to be used all together to take effect.
arguments += [
"-partitionColumn",
self._partition_column,
"-lowerBound",
self._lower_bound,
"-upperBound",
self._upper_bound,
]
if self._save_mode:
arguments += ["-saveMode", self._save_mode]
if self._save_format:
arguments += ["-saveFormat", self._save_format]
if self._create_table_column_types:
arguments += ["-createTableColumnTypes", self._create_table_column_types]
return arguments
def submit_jdbc_job(self) -> None:
"""Submit Spark JDBC job."""
self._application_args = self._build_jdbc_application_arguments(self._jdbc_connection)
self.submit(application=f"{os.path.dirname(os.path.abspath(__file__))}/spark_jdbc_script.py")
def get_conn(self) -> Any:
pass
| SparkJDBCHook |
python | scrapy__scrapy | tests/test_signals.py | {
"start": 983,
"end": 1780
} | class ____:
@classmethod
def setup_class(cls):
cls.mockserver = MockServer()
cls.mockserver.__enter__()
@classmethod
def teardown_class(cls):
cls.mockserver.__exit__(None, None, None)
def setup_method(self):
self.items = []
async def _on_item_scraped(self, item):
item = await get_from_asyncio_queue(item)
self.items.append(item)
@pytest.mark.only_asyncio
@inlineCallbacks
def test_simple_pipeline(self):
crawler = get_crawler(ItemSpider)
crawler.signals.connect(self._on_item_scraped, signals.item_scraped)
yield crawler.crawl(mockserver=self.mockserver)
assert len(self.items) == 10
for index in range(10):
assert {"index": index} in self.items
| TestMockServer |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/pg8000.py | {
"start": 5089,
"end": 5237
} | class ____(sqltypes.JSON.JSONIndexType):
def get_dbapi_type(self, dbapi):
raise NotImplementedError("should not be here")
| _PGJSONIndexType |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 719944,
"end": 727479
} | class ____(
ColorDef, MarkPropDefGradientstringnull
):
"""
FieldOrDatumDefWithConditionDatumDefGradientstringnull schema wrapper.
Parameters
----------
bandPosition : float
Relative position on a band of a stacked, binned, time unit, or band scale. For
example, the marks will be positioned at the beginning of the band if set to ``0``,
and at the middle of the band if set to ``0.5``.
condition : dict, :class:`ConditionalValueDefGradientstringnullExprRef`, :class:`ConditionalParameterValueDefGradientstringnullExprRef`, :class:`ConditionalPredicateValueDefGradientstringnullExprRef`, Sequence[dict, :class:`ConditionalValueDefGradientstringnullExprRef`, :class:`ConditionalParameterValueDefGradientstringnullExprRef`, :class:`ConditionalPredicateValueDefGradientstringnullExprRef`]
One or more value definition(s) with `a parameter or a test predicate
<https://vega.github.io/vega-lite/docs/condition.html>`__.
**Note:** A field definition's ``condition`` property can only contain `conditional
value definitions <https://vega.github.io/vega-lite/docs/condition.html#value>`__
since Vega-Lite only allows at most one encoded field per encoding channel.
datum : str, bool, dict, float, :class:`ExprRef`, :class:`DateTime`, :class:`RepeatRef`, :class:`PrimitiveValue`, None
A constant value in data domain.
title : str, :class:`Text`, Sequence[str], None
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function
(``aggregate``, ``bin`` and ``timeUnit``). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"``). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"``).
Otherwise, the title is simply the field name.
**Notes**:
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/usage/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`Type`, Literal['quantitative', 'ordinal', 'temporal', 'nominal', 'geojson']
The type of measurement (``"quantitative"``, ``"temporal"``, ``"ordinal"``, or
``"nominal"``) for the encoded field or constant value (``datum``). It can also be a
``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
Vega-Lite automatically infers data types in many cases as discussed below. However,
type is required for a field if: (1) the field is not nominal and the field encoding
has no specified ``aggregate`` (except ``argmin`` and ``argmax``), ``bin``, scale
type, custom ``sort`` order, nor ``timeUnit`` or (2) if you wish to use an ordinal
scale for a field with ``bin`` or ``timeUnit``.
**Default value:**
1) For a data ``field``, ``"nominal"`` is the default data type unless the field
encoding has ``aggregate``, ``channel``, ``bin``, scale type, ``sort``, or
``timeUnit`` that satisfies the following criteria:
* ``"quantitative"`` is the default type if (1) the encoded field contains ``bin``
or ``aggregate`` except ``"argmin"`` and ``"argmax"``, (2) the encoding channel is
``latitude`` or ``longitude`` channel or (3) if the specified scale type is `a
quantitative scale <https://vega.github.io/vega-lite/docs/scale.html#type>`__.
* ``"temporal"`` is the default type if (1) the encoded field contains ``timeUnit``
or (2) the specified scale type is a time or utc scale
* ``"ordinal"`` is the default type if (1) the encoded field contains a `custom sort
order
<https://vega.github.io/vega-lite/docs/sort.html#specifying-custom-sort-order>`__,
(2) the specified scale type is an ordinal/point/band scale, or (3) the encoding
channel is ``order``.
2) For a constant value in data domain (``datum``):
* ``"quantitative"`` if the datum is a number
* ``"nominal"`` if the datum is a string
* ``"temporal"`` if the datum is `a date time object
<https://vega.github.io/vega-lite/docs/datetime.html>`__
**Note:**
* Data ``type`` describes the semantics of the data rather than the primitive data
types (number, string, etc.). The same primitive data type can have different
types of measurement. For example, numeric data can represent quantitative,
ordinal, or nominal data.
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"``) or a
timestamp number (e.g., ``1552199579097``).
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (default, for using a temporal scale) or `"ordinal"
(for using an ordinal scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat"}``. The ``"type"`` of the aggregate output is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError``) do not have
``type`` as they must have exactly the same type as their primary channels (e.g.,
``x``, ``y``).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_schema = {
"$ref": "#/definitions/FieldOrDatumDefWithCondition<DatumDef,(Gradient|string|null)>"
}
def __init__(
self,
bandPosition: Optional[float] = Undefined,
condition: Optional[SchemaBase | Sequence[SchemaBase | Map] | Map] = Undefined,
datum: Optional[
Temporal | Parameter | SchemaBase | Map | PrimitiveValue_T
] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | Type_T] = Undefined,
**kwds,
):
super().__init__(
bandPosition=bandPosition,
condition=condition,
datum=datum,
title=title,
type=type,
**kwds,
)
| FieldOrDatumDefWithConditionDatumDefGradientstringnull |
python | readthedocs__readthedocs.org | readthedocs/search/migrations/0004_make_total_results_not_null.py | {
"start": 149,
"end": 528
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("search", "0003_migrate_total_results_null_values"),
]
operations = [
migrations.AlterField(
model_name="searchquery",
name="total_results",
field=models.IntegerField(default=0, verbose_name="Total results"),
),
]
| Migration |
python | huggingface__transformers | src/transformers/models/ibert/modeling_ibert.py | {
"start": 12165,
"end": 14050
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.quant_mode = config.quant_mode
self.act_bit = 8
self.weight_bit = 8
self.bias_bit = 32
self.ln_input_bit = 22
self.ln_output_bit = 32
self.dense = QuantLinear(
config.hidden_size,
config.hidden_size,
bias=True,
weight_bit=self.weight_bit,
bias_bit=self.bias_bit,
quant_mode=self.quant_mode,
per_channel=True,
)
self.ln_input_act = QuantAct(self.ln_input_bit, quant_mode=self.quant_mode)
self.LayerNorm = IntLayerNorm(
config.hidden_size,
eps=config.layer_norm_eps,
output_bit=self.ln_output_bit,
quant_mode=self.quant_mode,
force_dequant=config.force_dequant,
)
self.output_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, hidden_states_scaling_factor, input_tensor, input_tensor_scaling_factor):
hidden_states, hidden_states_scaling_factor = self.dense(hidden_states, hidden_states_scaling_factor)
hidden_states = self.dropout(hidden_states)
hidden_states, hidden_states_scaling_factor = self.ln_input_act(
hidden_states,
hidden_states_scaling_factor,
identity=input_tensor,
identity_scaling_factor=input_tensor_scaling_factor,
)
hidden_states, hidden_states_scaling_factor = self.LayerNorm(hidden_states, hidden_states_scaling_factor)
hidden_states, hidden_states_scaling_factor = self.output_activation(
hidden_states, hidden_states_scaling_factor
)
return hidden_states, hidden_states_scaling_factor
| IBertSelfOutput |
python | neetcode-gh__leetcode | python/0211-design-add-and-search-words-data-structure.py | {
"start": 0,
"end": 111
} | class ____:
def __init__(self):
self.children = {} # a : TrieNode
self.word = False
| TrieNode |
python | huggingface__transformers | src/transformers/models/grounding_dino/modeling_grounding_dino.py | {
"start": 45395,
"end": 49054
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
drop_path = config.fusion_droppath
# pre layer norm
self.layer_norm_vision = nn.LayerNorm(config.d_model, config.layer_norm_eps)
self.layer_norm_text = nn.LayerNorm(config.d_model, config.layer_norm_eps)
self.attn = GroundingDinoBiMultiHeadAttention(config)
# add layer scale for training stability
self.drop_path = GroundingDinoDropPath(drop_path) if drop_path > 0.0 else nn.Identity()
init_values = 1e-4
self.vision_param = nn.Parameter(init_values * torch.ones(config.d_model), requires_grad=True)
self.text_param = nn.Parameter(init_values * torch.ones(config.d_model), requires_grad=True)
def forward(
self,
vision_features: torch.FloatTensor,
text_features: torch.FloatTensor,
attention_mask_vision: Optional[torch.BoolTensor] = None,
attention_mask_text: Optional[torch.BoolTensor] = None,
) -> tuple[tuple[torch.FloatTensor, torch.FloatTensor], tuple[torch.FloatTensor, torch.FloatTensor]]:
"""Image and text features fusion
Args:
vision_features (`torch.FloatTensor` of shape `(batch_size, vision_sequence_length, hidden_dim)`):
Projected flattened image features generated by the vision backbone.
text_features (`torch.FloatTensor` of shape `(batch_size, text_sequence_length, hidden_dim)`):
Projected text features generated by the text encoder.
attention_mask_vision (`torch.BoolTensor`, **optional**):
Attention mask for image-to-text cross-attention. False for real tokens and True for padding tokens.
attention_mask_text (`torch.BoolTensor`, **optional**):
Attention mask for text-to-image cross-attention. False for real tokens and True for padding tokens.
Returns:
`tuple(tuple(torch.FloatTensor), tuple(torch.FloatTensor))` where each inner tuple comprises an enhanced
feature and attention output and weights:
- **vision_features** (`torch.FloatTensor` of shape `(batch_size, vision_sequence_length, vision_dim)`) --
Updated vision features with attention output from image-to-text cross-attention layer.
- **vision_attn_weights** (`torch.FloatTensor` of shape `(batch_size, num_heads, vision_sequence_length,
vision_sequence_length)`) --
Attention weights of the image-to-text cross-attention layer.
- **text_features** (`torch.FloatTensor` of shape `(batch_size, text_sequence_length, text_dim)`) --
Updated text features with attention output from text-to-image cross-attention layer.
- **text_attn_weights** (`torch.FloatTensor` of shape `(batch_size, num_heads, text_sequence_length,
text_sequence_length)`) --
Attention weights of the text-to-image cross-attention layer.
"""
vision_features = self.layer_norm_vision(vision_features)
text_features = self.layer_norm_text(text_features)
(delta_v, vision_attn), (delta_t, text_attn) = self.attn(
vision_features,
text_features,
vision_attention_mask=attention_mask_vision,
text_attention_mask=attention_mask_text,
)
vision_features = vision_features + self.drop_path(self.vision_param * delta_v)
text_features = text_features + self.drop_path(self.text_param * delta_t)
return (vision_features, vision_attn), (text_features, text_attn)
| GroundingDinoFusionLayer |
python | PrefectHQ__prefect | tests/test_flow_engine.py | {
"start": 45610,
"end": 53217
} | class ____:
async def test_suspended_flow_runs_do_not_block_execution(
self, prefect_client, deployment, session
):
flow_run_id = None
@flow()
async def suspending_flow():
nonlocal flow_run_id
context = get_run_context()
assert context.flow_run
flow_run_id = context.flow_run.id
from prefect.server.models.flow_runs import update_flow_run
await update_flow_run(
session,
flow_run_id,
ServerFlowRun.model_construct(deployment_id=deployment.id),
)
await session.commit()
await suspend_flow_run()
await asyncio.sleep(20)
start = time.time()
with pytest.raises(Pause):
await suspending_flow()
end = time.time()
assert end - start < 20
async def test_suspended_flow_run_has_correct_state(
self, prefect_client, deployment, session
):
flow_run_id = None
@flow()
async def suspending_flow():
nonlocal flow_run_id
context = get_run_context()
assert context.flow_run
flow_run_id = context.flow_run.id
from prefect.server.models.flow_runs import update_flow_run
await update_flow_run(
session,
flow_run_id,
ServerFlowRun.model_construct(deployment_id=deployment.id),
)
await session.commit()
await suspend_flow_run()
with pytest.raises(Pause):
await suspending_flow()
flow_run = await prefect_client.read_flow_run(flow_run_id)
state = flow_run.state
assert state.is_paused()
assert state.name == "Suspended"
async def test_suspending_flow_run_without_deployment_fails(self):
@flow()
async def suspending_flow():
await suspend_flow_run()
with pytest.raises(
RuntimeError, match="Cannot suspend flows without a deployment."
):
await suspending_flow()
async def test_suspending_sub_flow_run_fails(self):
@flow()
async def suspending_flow():
await suspend_flow_run()
@flow
async def main_flow():
await suspending_flow()
with pytest.raises(RuntimeError, match="Cannot suspend subflows."):
await main_flow()
@pytest.mark.xfail(reason="Brittle caused by 5xx from API")
async def test_suspend_flow_run_by_id(self, prefect_client, deployment, session):
flow_run_id = None
task_completions = 0
@task
async def increment_completions():
nonlocal task_completions
task_completions += 1
await asyncio.sleep(1)
@flow
async def suspendable_flow():
nonlocal flow_run_id
context = get_run_context()
assert context.flow_run
from prefect.server.models.flow_runs import update_flow_run
await update_flow_run(
session,
context.flow_run.id,
ServerFlowRun.model_construct(deployment_id=deployment.id),
)
await session.commit()
flow_run_id = context.flow_run.id
for i in range(20):
await increment_completions()
async def suspending_func():
nonlocal flow_run_id
while flow_run_id is None:
await asyncio.sleep(0.1)
# Sleep for a bit to let some of `suspendable_flow`s tasks complete
await asyncio.sleep(2)
await suspend_flow_run(flow_run_id=flow_run_id)
with pytest.raises(Pause):
await asyncio.gather(suspendable_flow(), suspending_func())
# When suspending a flow run by id, that flow run must use tasks for
# the suspension to take place. This setup allows for `suspendable_flow`
# to complete some tasks before `suspending_flow` suspends the flow run.
# Here then we check to ensure that some tasks completed but not _all_
# of the tasks.
assert task_completions > 0 and task_completions < 20
flow_run = await prefect_client.read_flow_run(flow_run_id)
state = flow_run.state
assert state.is_paused(), state
assert state.name == "Suspended"
async def test_suspend_can_receive_input(self, deployment, session, prefect_client):
flow_run_id = None
class FlowInput(RunInput):
x: int
@flow(persist_result=False)
async def suspending_flow():
nonlocal flow_run_id
context = get_run_context()
assert context.flow_run
if not context.flow_run.deployment_id:
# Ensure that the flow run has a deployment id so it's
# suspendable.
from prefect.server.models.flow_runs import update_flow_run
await update_flow_run(
session,
context.flow_run.id,
ServerFlowRun.model_construct(deployment_id=deployment.id),
)
await session.commit()
flow_run_id = context.flow_run.id
flow_input = await suspend_flow_run(wait_for_input=FlowInput)
return flow_input
with pytest.raises(Pause):
await suspending_flow()
assert flow_run_id
flow_run = await prefect_client.read_flow_run(flow_run_id)
keyset = flow_run.state.state_details.run_input_keyset
schema = await read_flow_run_input(
key=keyset["schema"], flow_run_id=flow_run_id
)
assert schema is not None
await resume_flow_run(flow_run_id, run_input={"x": 42})
flow_input = await run_flow_async(
flow=suspending_flow,
flow_run=flow_run,
parameters={},
)
assert flow_input
assert flow_input.x == 42
async def test_suspend_can_receive_automatic_input(
self, deployment, session, prefect_client
):
flow_run_id = None
@flow()
async def suspending_flow():
nonlocal flow_run_id
context = get_run_context()
assert context.flow_run
if not context.flow_run.deployment_id:
# Ensure that the flow run has a deployment id so it's
# suspendable.
from prefect.server.models.flow_runs import update_flow_run
assert await update_flow_run(
session,
context.flow_run.id,
ServerFlowRun.model_construct(deployment_id=deployment.id),
)
await session.commit()
flow_run_id = context.flow_run.id
age = await suspend_flow_run(int)
return age
with pytest.raises(Pause):
await suspending_flow()
assert flow_run_id
flow_run = await prefect_client.read_flow_run(flow_run_id)
keyset = flow_run.state.state_details.run_input_keyset
schema = await read_flow_run_input(
key=keyset["schema"], flow_run_id=flow_run_id
)
assert schema is not None
await resume_flow_run(flow_run_id, run_input={"value": 42})
age = await run_flow_async(
flow=suspending_flow,
flow_run=flow_run,
parameters={},
)
assert age == 42
| TestSuspendFlowRun |
python | astropy__astropy | astropy/stats/sigma_clipping.py | {
"start": 901,
"end": 33766
} | class ____:
"""
Class to perform sigma clipping.
The data will be iterated over, each time rejecting values that are
less or more than a specified number of standard deviations from a
center value.
Clipped (rejected) pixels are those where::
data < center - (sigma_lower * std)
data > center + (sigma_upper * std)
where::
center = cenfunc(data [, axis=])
std = stdfunc(data [, axis=])
Invalid data values (i.e., NaN or inf) are automatically clipped.
For a functional interface to sigma clipping, see
:func:`sigma_clip`.
.. note::
`scipy.stats.sigmaclip` provides a subset of the functionality
in this class. Also, its input data cannot be a masked array
and it does not handle data that contains invalid values (i.e.,
NaN or inf). Also note that it uses the mean as the centering
function. The equivalent settings to `scipy.stats.sigmaclip`
are::
sigclip = SigmaClip(sigma=4., cenfunc='mean', maxiters=None)
sigclip(data, axis=None, masked=False, return_bounds=True)
Parameters
----------
sigma : float, optional
The number of standard deviations to use for both the lower
and upper clipping limit. These limits are overridden by
``sigma_lower`` and ``sigma_upper``, if input. The default is 3.
sigma_lower : float or None, optional
The number of standard deviations to use as the lower bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
sigma_upper : float or None, optional
The number of standard deviations to use as the upper bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
maxiters : int or None, optional
The maximum number of sigma-clipping iterations to perform or
`None` to clip until convergence is achieved (i.e., iterate
until the last iteration clips nothing). If convergence is
achieved prior to ``maxiters`` iterations, the clipping
iterations will stop. The default is 5.
cenfunc : {'median', 'mean'} or callable, optional
The statistic or callable function/object used to compute
the center value for the clipping. If using a callable
function/object and the ``axis`` keyword is used, then it must
be able to ignore NaNs (e.g., `numpy.nanmean`) and it must have
an ``axis`` keyword to return an array with axis dimension(s)
removed. The default is ``'median'``.
stdfunc : {'std', 'mad_std'} or callable, optional
The statistic or callable function/object used to compute the
standard deviation about the center value. If using a callable
function/object and the ``axis`` keyword is used, then it must
be able to ignore NaNs (e.g., `numpy.nanstd`) and it must have
an ``axis`` keyword to return an array with axis dimension(s)
removed. The default is ``'std'``.
grow : float or `False`, optional
Radius within which to mask the neighbouring pixels of those
that fall outwith the clipping limits (only applied along
``axis``, if specified). As an example, for a 2D image a value
of 1 will mask the nearest pixels in a cross pattern around each
deviant pixel, while 1.5 will also reject the nearest diagonal
neighbours and so on.
See Also
--------
sigma_clip, sigma_clipped_stats, SigmaClippedStats
Notes
-----
The best performance will typically be obtained by setting
``cenfunc`` and ``stdfunc`` to one of the built-in functions
specified as a string. If one of the options is set to a string
while the other has a custom callable, you may in some cases see
better performance if you have the `bottleneck`_ package installed.
To preserve accuracy, bottleneck is only used for float64 computations.
.. _bottleneck: https://github.com/pydata/bottleneck
Examples
--------
This example uses a data array of random variates from a Gaussian
distribution. We clip all points that are more than 2 sample
standard deviations from the median. The result is a masked array,
where the mask is `True` for clipped data::
>>> from astropy.stats import SigmaClip
>>> from numpy.random import randn
>>> randvar = randn(10000)
>>> sigclip = SigmaClip(sigma=2, maxiters=5)
>>> filtered_data = sigclip(randvar)
This example clips all points that are more than 3 sigma relative
to the sample *mean*, clips until convergence, returns an unmasked
`~numpy.ndarray`, and modifies the data in-place::
>>> from astropy.stats import SigmaClip
>>> from numpy.random import randn
>>> from numpy import mean
>>> randvar = randn(10000)
>>> sigclip = SigmaClip(sigma=3, maxiters=None, cenfunc='mean')
>>> filtered_data = sigclip(randvar, masked=False, copy=False)
This example sigma clips along one axis::
>>> from astropy.stats import SigmaClip
>>> from numpy.random import normal
>>> from numpy import arange, diag, ones
>>> data = arange(5) + normal(0., 0.05, (5, 5)) + diag(ones(5))
>>> sigclip = SigmaClip(sigma=2.3)
>>> filtered_data = sigclip(data, axis=0)
Note that along the other axis, no points would be clipped, as the
standard deviation is higher.
"""
def __init__(
self,
sigma: float = 3.0,
sigma_lower: float | None = None,
sigma_upper: float | None = None,
maxiters: int | None = 5,
cenfunc: Literal["median", "mean"] | Callable = "median",
stdfunc: Literal["std", "mad_std"] | Callable = "std",
grow: float | Literal[False] | None = False,
) -> None:
self.sigma = sigma
self.sigma_lower = sigma_lower or sigma
self.sigma_upper = sigma_upper or sigma
self.maxiters = maxiters or np.inf
self.cenfunc = cenfunc
self.stdfunc = stdfunc
self._cenfunc_parsed = self._parse_cenfunc(cenfunc)
self._stdfunc_parsed = self._parse_stdfunc(stdfunc)
self._min_value = np.nan
self._max_value = np.nan
self._niterations = 0
self.grow = grow
# This just checks that SciPy is available, to avoid failing
# later than necessary if __call__ needs it:
if self.grow:
from scipy.ndimage import binary_dilation
self._binary_dilation = binary_dilation
def __repr__(self) -> str:
return (
f"SigmaClip(sigma={self.sigma}, sigma_lower={self.sigma_lower},"
f" sigma_upper={self.sigma_upper}, maxiters={self.maxiters},"
f" cenfunc={self.cenfunc!r}, stdfunc={self.stdfunc!r}, grow={self.grow})"
)
def __str__(self) -> str:
lines = ["<" + self.__class__.__name__ + ">"]
attrs = [
"sigma",
"sigma_lower",
"sigma_upper",
"maxiters",
"cenfunc",
"stdfunc",
"grow",
]
for attr in attrs:
lines.append(f" {attr}: {repr(getattr(self, attr))}")
return "\n".join(lines)
@staticmethod
def _parse_cenfunc(
cenfunc: Literal["median", "mean"] | Callable | None,
) -> Callable | None:
if isinstance(cenfunc, str):
if cenfunc == "median":
cenfunc = nanmedian
elif cenfunc == "mean":
cenfunc = nanmean
else:
raise ValueError(f"{cenfunc} is an invalid cenfunc.")
return cenfunc
@staticmethod
def _parse_stdfunc(
stdfunc: Literal["std", "mad_std"] | Callable | None,
) -> Callable | None:
if isinstance(stdfunc, str):
if stdfunc == "std":
stdfunc = nanstd
elif stdfunc == "mad_std":
stdfunc = nanmadstd
else:
raise ValueError(f"{stdfunc} is an invalid stdfunc.")
return stdfunc
def _compute_bounds(
self,
data: ArrayLike,
axis: int | tuple[int, ...] | None = None,
) -> None:
# ignore RuntimeWarning if the array (or along an axis) has only
# NaNs
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
cen = self._cenfunc_parsed(data, axis=axis)
std = self._stdfunc_parsed(data, axis=axis)
self._min_value = cen - (std * self.sigma_lower)
self._max_value = cen + (std * self.sigma_upper)
def _sigmaclip_fast(
self,
data: ArrayLike,
axis: int | tuple[int, ...] | None = None,
masked: bool | None = True,
return_bounds: bool | None = False,
copy: bool | None = True,
) -> (
NDArray
| np.ma.MaskedArray
| tuple[NDArray | np.ma.MaskedArray, float, float]
| tuple[NDArray | np.ma.MaskedArray, NDArray, NDArray]
):
"""
Fast C implementation for simple use cases.
"""
if isinstance(data, Quantity):
data, unit = data.value, data.unit
else:
unit = None
if copy is False and masked is False and data.dtype.kind != "f":
raise Exception(
"cannot mask non-floating-point array with NaN "
"values, set copy=True or masked=True to avoid "
"this."
)
if axis is None:
axis = -1 if data.ndim == 1 else tuple(range(data.ndim))
if not np.iterable(axis):
axis = normalize_axis_index(axis, data.ndim)
data_reshaped = data
transposed_shape = None
else:
# The gufunc implementation does not handle non-scalar axis
# so we combine the dimensions together as the last
# dimension and set axis=-1
axis = tuple(normalize_axis_index(ax, data.ndim) for ax in axis)
transposed_axes = (
tuple(ax for ax in range(data.ndim) if ax not in axis) + axis
)
data_transposed = data.transpose(transposed_axes)
transposed_shape = data_transposed.shape
data_reshaped = data_transposed.reshape(
transposed_shape[: data.ndim - len(axis)] + (-1,)
)
axis = -1
if data_reshaped.dtype.kind != "f" or data_reshaped.dtype.itemsize > 8:
data_reshaped = data_reshaped.astype(float)
mask = ~np.isfinite(data_reshaped)
if np.any(mask):
warnings.warn(
"Input data contains invalid values (NaNs or "
"infs), which were automatically clipped.",
AstropyUserWarning,
)
if isinstance(data_reshaped, np.ma.MaskedArray):
mask |= data_reshaped.mask
data = data.view(np.ndarray)
data_reshaped = data_reshaped.view(np.ndarray)
mask = np.broadcast_to(mask, data_reshaped.shape).copy()
bound_lo, bound_hi = _sigma_clip_fast(
data_reshaped,
mask,
self.cenfunc == "median",
self.stdfunc == "mad_std",
-1 if np.isinf(self.maxiters) else self.maxiters,
self.sigma_lower,
self.sigma_upper,
axis=axis,
)
with np.errstate(invalid="ignore"):
mask |= data_reshaped < np.expand_dims(bound_lo, axis)
mask |= data_reshaped > np.expand_dims(bound_hi, axis)
if transposed_shape is not None:
# Get mask in shape of data.
mask = mask.reshape(transposed_shape)
mask = mask.transpose(
tuple(transposed_axes.index(ax) for ax in range(data.ndim))
)
if masked:
result = np.ma.array(data, mask=mask, copy=copy)
else:
if data.dtype.kind != "f":
# float array type is needed to insert nans into the array
result = data.astype(np.float32) # also makes a copy
else:
if copy:
result = data.copy()
else:
result = data
result[mask] = np.nan
if unit is not None:
result = result << unit
bound_lo = bound_lo << unit
bound_hi = bound_hi << unit
if return_bounds:
return result, bound_lo, bound_hi
else:
return result
def _sigmaclip_noaxis(
self,
data: ArrayLike,
masked: bool | None = True,
return_bounds: bool | None = False,
copy: bool | None = True,
) -> NDArray | np.ma.MaskedArray | tuple[NDArray | np.ma.MaskedArray, float, float]:
"""
Sigma clip when ``axis`` is None and ``grow`` is not >0.
In this simple case, we remove clipped elements from the
flattened array during each iteration.
"""
filtered_data = data.ravel()
# remove masked values and convert to ndarray
if isinstance(filtered_data, np.ma.MaskedArray):
filtered_data = filtered_data._data[~filtered_data.mask]
# remove invalid values
good_mask = np.isfinite(filtered_data)
if np.any(~good_mask):
filtered_data = filtered_data[good_mask]
warnings.warn(
"Input data contains invalid values (NaNs or "
"infs), which were automatically clipped.",
AstropyUserWarning,
)
nchanged = 1
iteration = 0
while nchanged != 0 and (iteration < self.maxiters):
iteration += 1
size = filtered_data.size
self._compute_bounds(filtered_data, axis=None)
filtered_data = filtered_data[
(filtered_data >= self._min_value) & (filtered_data <= self._max_value)
]
nchanged = size - filtered_data.size
self._niterations = iteration
if masked:
# return a masked array and optional bounds
filtered_data = np.ma.masked_invalid(data, copy=copy)
# update the mask in place, ignoring RuntimeWarnings for
# comparisons with NaN data values
with np.errstate(invalid="ignore"):
filtered_data.mask |= np.logical_or(
data < self._min_value, data > self._max_value
)
if return_bounds:
return filtered_data, self._min_value, self._max_value
else:
return filtered_data
def _sigmaclip_withaxis(
self,
data: ArrayLike,
axis: int | tuple[int, ...] | None = None,
masked: bool | None = True,
return_bounds: bool | None = False,
copy: bool | None = True,
) -> (
NDArray
| np.ma.MaskedArray
| tuple[NDArray | np.ma.MaskedArray, float, float]
| tuple[NDArray | np.ma.MaskedArray, NDArray, NDArray]
):
"""
Sigma clip the data when ``axis`` or ``grow`` is specified.
In this case, we replace clipped values with NaNs as placeholder
values.
"""
if data.dtype.kind != "f":
# float array type is needed to insert nans into the array
filtered_data = data.astype(np.float32) # also makes a copy
else:
filtered_data = data.copy()
# remove invalid values
bad_mask = ~np.isfinite(filtered_data)
if np.any(bad_mask):
filtered_data[bad_mask] = np.nan
warnings.warn(
"Input data contains invalid values (NaNs or "
"infs), which were automatically clipped.",
AstropyUserWarning,
)
# remove masked values and convert to plain ndarray
if isinstance(filtered_data, np.ma.MaskedArray):
filtered_data = np.ma.masked_invalid(filtered_data).astype(float)
filtered_data = filtered_data.filled(np.nan)
if axis is not None:
# convert negative axis/axes
if not np.iterable(axis):
axis = (axis,)
axis = tuple(filtered_data.ndim + n if n < 0 else n for n in axis)
# define the shape of min/max arrays so that they can be broadcast
# with the data
mshape = tuple(
1 if dim in axis else size
for dim, size in enumerate(filtered_data.shape)
)
if self.grow:
# Construct a growth kernel from the specified radius in
# pixels (consider caching this for reuse by subsequent
# calls?):
cenidx = int(self.grow)
size = 2 * cenidx + 1
indices = np.mgrid[(slice(0, size),) * data.ndim]
if axis is not None:
for n, dim in enumerate(indices):
# For any axes that we're not clipping over, set
# their indices outside the growth radius, so masked
# points won't "grow" in that dimension:
if n not in axis:
dim[dim != cenidx] = size
kernel = sum((idx - cenidx) ** 2 for idx in indices) <= self.grow**2
del indices
nchanged = 1
iteration = 0
while nchanged != 0 and (iteration < self.maxiters):
iteration += 1
self._compute_bounds(filtered_data, axis=axis)
if not np.isscalar(self._min_value):
self._min_value = self._min_value.reshape(mshape)
self._max_value = self._max_value.reshape(mshape)
with np.errstate(invalid="ignore"):
# Since these comparisons are always False for NaNs, the
# resulting mask contains only newly-rejected pixels and
# we can dilate it without growing masked pixels more
# than once.
new_mask = (filtered_data < self._min_value) | (
filtered_data > self._max_value
)
if self.grow:
new_mask = self._binary_dilation(new_mask, kernel)
filtered_data[new_mask] = np.nan
nchanged = np.count_nonzero(new_mask)
del new_mask
self._niterations = iteration
if masked:
# create an output masked array
if copy:
filtered_data = np.ma.MaskedArray(
data, ~np.isfinite(filtered_data), copy=True
)
else:
# ignore RuntimeWarnings for comparisons with NaN data values
with np.errstate(invalid="ignore"):
out = np.ma.masked_invalid(data, copy=False)
filtered_data = np.ma.masked_where(
np.logical_or(out < self._min_value, out > self._max_value),
out,
copy=False,
)
if return_bounds:
return filtered_data, self._min_value, self._max_value
else:
return filtered_data
def __call__(
self,
data: ArrayLike,
axis: int | tuple[int, ...] | None = None,
masked: bool | None = True,
return_bounds: bool | None = False,
copy: bool | None = True,
) -> (
NDArray
| np.ma.MaskedArray
| tuple[NDArray | np.ma.MaskedArray, float, float]
| tuple[NDArray | np.ma.MaskedArray, NDArray, NDArray]
):
"""
Perform sigma clipping on the provided data.
Parameters
----------
data : array-like or `~numpy.ma.MaskedArray`
The data to be sigma clipped.
axis : None or int or tuple of int, optional
The axis or axes along which to sigma clip the data. If
`None`, then the flattened data will be used. ``axis`` is
passed to the ``cenfunc`` and ``stdfunc``. The default is
`None`.
masked : bool, optional
If `True`, then a `~numpy.ma.MaskedArray` is returned, where
the mask is `True` for clipped values. If `False`, then a
`~numpy.ndarray` is returned. The default is `True`.
return_bounds : bool, optional
If `True`, then the minimum and maximum clipping bounds are
also returned.
copy : bool, optional
If `True`, then the ``data`` array will be copied. If
`False` and ``masked=True``, then the returned masked array
data will contain the same array as the input ``data`` (if
``data`` is a `~numpy.ndarray` or `~numpy.ma.MaskedArray`).
If `False` and ``masked=False``, the input data is modified
in-place. The default is `True`.
Returns
-------
result : array-like
If ``masked=True``, then a `~numpy.ma.MaskedArray` is
returned, where the mask is `True` for clipped values and
where the input mask was `True`.
If ``masked=False``, then a `~numpy.ndarray` is returned.
If ``return_bounds=True``, then in addition to the masked
array or array above, the minimum and maximum clipping
bounds are returned.
If ``masked=False`` and ``axis=None``, then the output
array is a flattened 1D `~numpy.ndarray` where the clipped
values have been removed. If ``return_bounds=True`` then the
returned minimum and maximum thresholds are scalars.
If ``masked=False`` and ``axis`` is specified, then the
output `~numpy.ndarray` will have the same shape as the
input ``data`` and contain ``np.nan`` where values were
clipped. In this case, integer-type ``data`` arrays will
be converted to `~numpy.float32`. If the input ``data``
was a masked array, then the output `~numpy.ndarray` will
also contain ``np.nan`` where the input mask was `True`. If
``return_bounds=True`` then the returned minimum and maximum
clipping thresholds will be be `~numpy.ndarray`\\s.
"""
data = np.asanyarray(data)
if data.size == 0:
if masked:
result = np.ma.MaskedArray(data)
else:
result = data
if return_bounds:
return result, self._min_value, self._max_value
else:
return result
if isinstance(data, np.ma.MaskedArray) and data.mask.all():
if masked:
result = data
else:
result = np.full(data.shape, np.nan)
if return_bounds:
return result, self._min_value, self._max_value
else:
return result
# Shortcut for common cases where a fast C implementation can be
# used.
if (
self.cenfunc in ("mean", "median")
and self.stdfunc in ("std", "mad_std")
and axis is not None
and not self.grow
):
return self._sigmaclip_fast(
data, axis=axis, masked=masked, return_bounds=return_bounds, copy=copy
)
# These two cases are treated separately because when
# ``axis=None`` we can simply remove clipped values from the
# array. This is not possible when ``axis`` or ``grow`` is
# specified.
if axis is None and not self.grow:
return self._sigmaclip_noaxis(
data, masked=masked, return_bounds=return_bounds, copy=copy
)
else:
return self._sigmaclip_withaxis(
data, axis=axis, masked=masked, return_bounds=return_bounds, copy=copy
)
def sigma_clip(
data: ArrayLike,
sigma: float = 3.0,
sigma_lower: float | None = None,
sigma_upper: float | None = None,
maxiters: int | None = 5,
cenfunc: Literal["median", "mean"] | Callable = "median",
stdfunc: Literal["std", "mad_std"] | Callable = "std",
axis: int | tuple[int, ...] | None = None,
masked: bool | None = True,
return_bounds: bool | None = False,
copy: bool | None = True,
grow: float | Literal[False] | None = False,
) -> ArrayLike | tuple[ArrayLike, float, float] | tuple[ArrayLike, ...]:
"""
Perform sigma-clipping on the provided data.
The data will be iterated over, each time rejecting values that are
less or more than a specified number of standard deviations from a
center value.
Clipped (rejected) pixels are those where::
data < center - (sigma_lower * std)
data > center + (sigma_upper * std)
where::
center = cenfunc(data [, axis=])
std = stdfunc(data [, axis=])
Invalid data values (i.e., NaN or inf) are automatically clipped.
For an object-oriented interface to sigma clipping, see
:class:`SigmaClip`.
.. note::
`scipy.stats.sigmaclip` provides a subset of the functionality
in this class. Also, its input data cannot be a masked array
and it does not handle data that contains invalid values (i.e.,
NaN or inf). Also note that it uses the mean as the centering
function. The equivalent settings to `scipy.stats.sigmaclip`
are::
sigma_clip(sigma=4., cenfunc='mean', maxiters=None, axis=None,
... masked=False, return_bounds=True)
Parameters
----------
data : array-like or `~numpy.ma.MaskedArray`
The data to be sigma clipped.
sigma : float, optional
The number of standard deviations to use for both the lower
and upper clipping limit. These limits are overridden by
``sigma_lower`` and ``sigma_upper``, if input. The default is 3.
sigma_lower : float or None, optional
The number of standard deviations to use as the lower bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
sigma_upper : float or None, optional
The number of standard deviations to use as the upper bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
maxiters : int or None, optional
The maximum number of sigma-clipping iterations to perform or
`None` to clip until convergence is achieved (i.e., iterate
until the last iteration clips nothing). If convergence is
achieved prior to ``maxiters`` iterations, the clipping
iterations will stop. The default is 5.
cenfunc : {'median', 'mean'} or callable, optional
The statistic or callable function/object used to compute
the center value for the clipping. If using a callable
function/object and the ``axis`` keyword is used, then it must
be able to ignore NaNs (e.g., `numpy.nanmean`) and it must have
an ``axis`` keyword to return an array with axis dimension(s)
removed. The default is ``'median'``.
stdfunc : {'std', 'mad_std'} or callable, optional
The statistic or callable function/object used to compute the
standard deviation about the center value. If using a callable
function/object and the ``axis`` keyword is used, then it must
be able to ignore NaNs (e.g., `numpy.nanstd`) and it must have
an ``axis`` keyword to return an array with axis dimension(s)
removed. The default is ``'std'``.
axis : None or int or tuple of int, optional
The axis or axes along which to sigma clip the data. If `None`,
then the flattened data will be used. ``axis`` is passed to the
``cenfunc`` and ``stdfunc``. The default is `None`.
masked : bool, optional
If `True`, then a `~numpy.ma.MaskedArray` is returned, where
the mask is `True` for clipped values. If `False`, then a
`~numpy.ndarray` is returned. The default is `True`.
return_bounds : bool, optional
If `True`, then the minimum and maximum clipping bounds are also
returned.
copy : bool, optional
If `True`, then the ``data`` array will be copied. If `False`
and ``masked=True``, then the returned masked array data will
contain the same array as the input ``data`` (if ``data`` is a
`~numpy.ndarray` or `~numpy.ma.MaskedArray`). If `False` and
``masked=False``, the input data is modified in-place. The
default is `True`.
grow : float or `False`, optional
Radius within which to mask the neighbouring pixels of those
that fall outwith the clipping limits (only applied along
``axis``, if specified). As an example, for a 2D image a value
of 1 will mask the nearest pixels in a cross pattern around each
deviant pixel, while 1.5 will also reject the nearest diagonal
neighbours and so on.
Returns
-------
result : array-like
If ``masked=True``, then a `~numpy.ma.MaskedArray` is returned,
where the mask is `True` for clipped values and where the input
mask was `True`.
If ``masked=False``, then a `~numpy.ndarray` is returned.
If ``return_bounds=True``, then in addition to the masked array
or array above, the minimum and maximum clipping bounds are
returned.
If ``masked=False`` and ``axis=None``, then the output array
is a flattened 1D `~numpy.ndarray` where the clipped values
have been removed. If ``return_bounds=True`` then the returned
minimum and maximum thresholds are scalars.
If ``masked=False`` and ``axis`` is specified, then the
output `~numpy.ndarray` will have the same shape as the input
``data`` and contain ``np.nan`` where values were clipped. In
this case, integer-type ``data`` arrays will be converted to
`~numpy.float32`. If the input ``data`` was a masked array,
then the output `~numpy.ndarray` will also contain ``np.nan``
where the input mask was `True`. If ``return_bounds=True`` then
the returned minimum and maximum clipping thresholds will be
`~numpy.ndarray`\\s.
See Also
--------
SigmaClip, sigma_clipped_stats, SigmaClippedStats
Notes
-----
The best performance will typically be obtained by setting
``cenfunc`` and ``stdfunc`` to one of the built-in functions
specified as a string. If one of the options is set to a string
while the other has a custom callable, you may in some cases see
better performance if you have the `bottleneck`_ package installed.
To preserve accuracy, bottleneck is only used for float64 computations.
.. _bottleneck: https://github.com/pydata/bottleneck
Examples
--------
This example uses a data array of random variates from a Gaussian
distribution. We clip all points that are more than 2 sample
standard deviations from the median. The result is a masked array,
where the mask is `True` for clipped data::
>>> from astropy.stats import sigma_clip
>>> from numpy.random import randn
>>> randvar = randn(10000)
>>> filtered_data = sigma_clip(randvar, sigma=2, maxiters=5)
This example clips all points that are more than 3 sigma relative
to the sample *mean*, clips until convergence, returns an unmasked
`~numpy.ndarray`, and does not copy the data::
>>> from astropy.stats import sigma_clip
>>> from numpy.random import randn
>>> from numpy import mean
>>> randvar = randn(10000)
>>> filtered_data = sigma_clip(randvar, sigma=3, maxiters=None,
... cenfunc=mean, masked=False, copy=False)
This example sigma clips along one axis::
>>> from astropy.stats import sigma_clip
>>> from numpy.random import normal
>>> from numpy import arange, diag, ones
>>> data = arange(5) + normal(0., 0.05, (5, 5)) + diag(ones(5))
>>> filtered_data = sigma_clip(data, sigma=2.3, axis=0)
Note that along the other axis, no points would be clipped, as the
standard deviation is higher.
"""
sigclip = SigmaClip(
sigma=sigma,
sigma_lower=sigma_lower,
sigma_upper=sigma_upper,
maxiters=maxiters,
cenfunc=cenfunc,
stdfunc=stdfunc,
grow=grow,
)
return sigclip(
data, axis=axis, masked=masked, return_bounds=return_bounds, copy=copy
)
| SigmaClip |
python | PyCQA__pylint | pylint/testutils/_primer/primer.py | {
"start": 669,
"end": 4685
} | class ____:
"""Main class to handle priming of packages."""
def __init__(self, primer_directory: Path, json_path: Path) -> None:
# Preparing arguments
self.primer_directory = primer_directory
self._argument_parser = argparse.ArgumentParser(prog="Pylint Primer")
self._subparsers = self._argument_parser.add_subparsers(
dest="command", required=True
)
# All arguments for the prepare parser
prepare_parser = self._subparsers.add_parser("prepare")
prepare_parser.add_argument(
"--clone", help="Clone all packages.", action="store_true", default=False
)
prepare_parser.add_argument(
"--check",
help="Check consistencies and commits of all packages.",
action="store_true",
default=False,
)
prepare_parser.add_argument(
"--make-commit-string",
help="Get latest commit string.",
action="store_true",
default=False,
)
prepare_parser.add_argument(
"--read-commit-string",
help="Print latest commit string.",
action="store_true",
default=False,
)
# All arguments for the run parser
run_parser = self._subparsers.add_parser("run")
run_parser.add_argument(
"--type", choices=["main", "pr"], required=True, help="Type of primer run."
)
run_parser.add_argument(
"--batches",
required=False,
type=int,
help="Number of batches",
)
run_parser.add_argument(
"--batchIdx",
required=False,
type=int,
help="Portion of primer packages to run.",
)
# All arguments for the compare parser
compare_parser = self._subparsers.add_parser("compare")
compare_parser.add_argument(
"--base-file",
required=True,
help="Location of output file of the base run.",
)
compare_parser.add_argument(
"--new-file",
required=True,
help="Location of output file of the new run.",
)
compare_parser.add_argument(
"--commit",
required=True,
help="Commit hash of the PR commit being checked.",
)
compare_parser.add_argument(
"--batches",
required=False,
type=int,
help="Number of batches (filepaths with the placeholder BATCHIDX will be numbered)",
)
# Storing arguments
self.config = self._argument_parser.parse_args()
self.packages = self._get_packages_to_lint_from_json(json_path)
"""All packages to prime."""
if self.config.command == "prepare":
command_class: type[PrimerCommand] = PrepareCommand
elif self.config.command == "run":
command_class = RunCommand
elif self.config.command == "compare":
command_class = CompareCommand
# pylint: disable-next=possibly-used-before-assignment
self.command = command_class(self.primer_directory, self.packages, self.config)
def run(self) -> None:
self.command.run()
@staticmethod
def _minimum_python_supported(package_data: dict[str, str]) -> bool:
min_python_str = package_data.get("minimum_python", None)
if not min_python_str:
return True
min_python_tuple = tuple(int(n) for n in min_python_str.split("."))
return min_python_tuple <= sys.version_info[:2]
@staticmethod
def _get_packages_to_lint_from_json(json_path: Path) -> dict[str, PackageToLint]:
with open(json_path, encoding="utf8") as f:
return {
name: PackageToLint(**package_data)
for name, package_data in json.load(f).items()
if Primer._minimum_python_supported(package_data)
}
| Primer |
python | walkccc__LeetCode | solutions/2032. Two Out of Three/2032.py | {
"start": 0,
"end": 291
} | class ____:
def twoOutOfThree(
self,
nums1: list[int],
nums2: list[int],
nums3: list[int],
) -> list[int]:
count = collections.Counter()
for nums in nums1, nums2, nums3:
count.update(set(nums))
return [i for i, c in count.items() if c >= 2]
| Solution |
python | pandas-dev__pandas | pandas/core/indexers/objects.py | {
"start": 530,
"end": 3509
} | class ____:
"""
Base class for window bounds calculations.
Parameters
----------
index_array : np.ndarray, default None
Array-like structure representing the indices for the data points.
If None, the default indices are assumed. This can be useful for
handling non-uniform indices in data, such as in time series
with irregular timestamps.
window_size : int, default 0
Size of the moving window. This is the number of observations used
for calculating the statistic. The default is to consider all
observations within the window.
**kwargs
Additional keyword arguments passed to the subclass's methods.
See Also
--------
DataFrame.rolling : Provides rolling window calculations on dataframe.
Series.rolling : Provides rolling window calculations on series.
Examples
--------
>>> from pandas.api.indexers import BaseIndexer
>>> class CustomIndexer(BaseIndexer):
... def get_window_bounds(self, num_values, min_periods, center, closed, step):
... start = np.arange(num_values, dtype=np.int64)
... end = np.arange(num_values, dtype=np.int64) + self.window_size
... return start, end
>>> df = pd.DataFrame({"values": range(5)})
>>> indexer = CustomIndexer(window_size=2)
>>> df.rolling(indexer).sum()
values
0 1.0
1 3.0
2 5.0
3 7.0
4 4.0
"""
def __init__(
self, index_array: np.ndarray | None = None, window_size: int = 0, **kwargs
) -> None:
self.index_array = index_array
self.window_size = window_size
# Set user defined kwargs as attributes that can be used in get_window_bounds
for key, value in kwargs.items():
setattr(self, key, value)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: int | None = None,
center: bool | None = None,
closed: str | None = None,
step: int | None = None,
) -> tuple[np.ndarray, np.ndarray]:
"""
Computes the bounds of a window.
Parameters
----------
num_values : int, default 0
number of values that will be aggregated over
window_size : int, default 0
the number of rows in a window
min_periods : int, default None
min_periods passed from the top level rolling API
center : bool, default None
center passed from the top level rolling API
closed : str, default None
closed passed from the top level rolling API
step : int, default None
step passed from the top level rolling API
win_type : str, default None
win_type passed from the top level rolling API
Returns
-------
A tuple of ndarray[int64]s, indicating the boundaries of each
window
"""
raise NotImplementedError
| BaseIndexer |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_hide02.py | {
"start": 315,
"end": 887
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("hide02.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet1 = workbook.add_worksheet()
worksheet2 = workbook.add_worksheet()
worksheet3 = workbook.add_worksheet()
worksheet2.very_hidden()
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | celery__celery | t/unit/tasks/test_stamping.py | {
"start": 779,
"end": 1579
} | class ____(StampingVisitor):
def clean_stamps(self, actual_sig: Signature) -> None:
if "stamped_headers" in actual_sig.options and actual_sig.options["stamped_headers"]:
for stamp in actual_sig.options["stamped_headers"]:
if stamp in actual_sig.options:
actual_sig.options.pop(stamp)
def clean_links(self, actual_sig: Signature) -> None:
if "link" in actual_sig.options:
actual_sig.options.pop("link")
if "link_error" in actual_sig.options:
actual_sig.options.pop("link_error")
def on_signature(self, actual_sig: Signature, **headers) -> dict:
self.clean_stamps(actual_sig)
self.clean_links(actual_sig)
return super().on_signature(actual_sig, **headers)
| CleanupVisitor |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_bugbear/B024.py | {
"start": 453,
"end": 530
} | class ____(ABC):
@abstractmethod
def method(self):
foo()
| Base_2 |
python | wandb__wandb | wandb/vendor/pygments/lexers/parsers.py | {
"start": 1277,
"end": 5102
} | class ____(RegexLexer):
"""
A pure `Ragel <http://www.complang.org/ragel/>`_ lexer. Use this for
fragments of Ragel. For ``.rl`` files, use RagelEmbeddedLexer instead
(or one of the language-specific subclasses).
.. versionadded:: 1.1
"""
name = 'Ragel'
aliases = ['ragel']
filenames = []
tokens = {
'whitespace': [
(r'\s+', Whitespace)
],
'comments': [
(r'\#.*$', Comment),
],
'keywords': [
(r'(access|action|alphtype)\b', Keyword),
(r'(getkey|write|machine|include)\b', Keyword),
(r'(any|ascii|extend|alpha|digit|alnum|lower|upper)\b', Keyword),
(r'(xdigit|cntrl|graph|print|punct|space|zlen|empty)\b', Keyword)
],
'numbers': [
(r'0x[0-9A-Fa-f]+', Number.Hex),
(r'[+-]?[0-9]+', Number.Integer),
],
'literals': [
(r'"(\\\\|\\"|[^"])*"', String), # double quote string
(r"'(\\\\|\\'|[^'])*'", String), # single quote string
(r'\[(\\\\|\\\]|[^\]])*\]', String), # square bracket literals
(r'/(?!\*)(\\\\|\\/|[^/])*/', String.Regex), # regular expressions
],
'identifiers': [
(r'[a-zA-Z_]\w*', Name.Variable),
],
'operators': [
(r',', Operator), # Join
(r'\||&|--?', Operator), # Union, Intersection and Subtraction
(r'\.|<:|:>>?', Operator), # Concatention
(r':', Operator), # Label
(r'->', Operator), # Epsilon Transition
(r'(>|\$|%|<|@|<>)(/|eof\b)', Operator), # EOF Actions
(r'(>|\$|%|<|@|<>)(!|err\b)', Operator), # Global Error Actions
(r'(>|\$|%|<|@|<>)(\^|lerr\b)', Operator), # Local Error Actions
(r'(>|\$|%|<|@|<>)(~|to\b)', Operator), # To-State Actions
(r'(>|\$|%|<|@|<>)(\*|from\b)', Operator), # From-State Actions
(r'>|@|\$|%', Operator), # Transition Actions and Priorities
(r'\*|\?|\+|\{[0-9]*,[0-9]*\}', Operator), # Repetition
(r'!|\^', Operator), # Negation
(r'\(|\)', Operator), # Grouping
],
'root': [
include('literals'),
include('whitespace'),
include('comments'),
include('keywords'),
include('numbers'),
include('identifiers'),
include('operators'),
(r'\{', Punctuation, 'host'),
(r'=', Operator),
(r';', Punctuation),
],
'host': [
(r'(' + r'|'.join(( # keep host code in largest possible chunks
r'[^{}\'"/#]+', # exclude unsafe characters
r'[^\\]\\[{}]', # allow escaped { or }
# strings and comments may safely contain unsafe characters
r'"(\\\\|\\"|[^"])*"', # double quote string
r"'(\\\\|\\'|[^'])*'", # single quote string
r'//.*$\n?', # single line comment
r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment
r'\#.*$\n?', # ruby comment
# regular expression: There's no reason for it to start
# with a * and this stops confusion with comments.
r'/(?!\*)(\\\\|\\/|[^/])*/',
# / is safe now that we've handled regex and javadoc comments
r'/',
)) + r')+', Other),
(r'\{', Punctuation, '#push'),
(r'\}', Punctuation, '#pop'),
],
}
| RagelLexer |
python | jina-ai__jina | jina/resources/base-gateway/gateway.py | {
"start": 62,
"end": 110
} | class ____(BaseGateway):
pass
| PlaceHolderGateway |
python | scipy__scipy | scipy/sparse/linalg/_isolve/tests/test_lsmr.py | {
"start": 4159,
"end": 6362
} | class ____:
def setup_method(self):
self.n = 10
self.A = lowerBidiagonalMatrix(20, self.n)
self.xtrue = transpose(arange(self.n, 0, -1))
self.Afun = aslinearoperator(self.A)
self.b = self.Afun.matvec(self.xtrue)
self.x0 = ones(self.n)
self.x00 = self.x0.copy()
self.returnValues = lsmr(self.A, self.b)
self.returnValuesX0 = lsmr(self.A, self.b, x0=self.x0)
def test_unchanged_x0(self):
x, istop, itn, normr, normar, normA, condA, normx = self.returnValuesX0
assert_allclose(self.x00, self.x0)
def testNormr(self):
x, istop, itn, normr, normar, normA, condA, normx = self.returnValues
assert norm(self.b - self.Afun.matvec(x)) == pytest.approx(normr)
def testNormar(self):
x, istop, itn, normr, normar, normA, condA, normx = self.returnValues
assert (norm(self.Afun.rmatvec(self.b - self.Afun.matvec(x)))
== pytest.approx(normar))
def testNormx(self):
x, istop, itn, normr, normar, normA, condA, normx = self.returnValues
assert norm(x) == pytest.approx(normx)
def lowerBidiagonalMatrix(m, n):
# This is a simple example for testing LSMR.
# It uses the leading m*n submatrix from
# A = [ 1
# 1 2
# 2 3
# 3 4
# ...
# n ]
# suitably padded by zeros.
#
# 04 Jun 2010: First version for distribution with lsmr.py
if m <= n:
row = hstack((arange(m, dtype=int),
arange(1, m, dtype=int)))
col = hstack((arange(m, dtype=int),
arange(m-1, dtype=int)))
data = hstack((arange(1, m+1, dtype=float),
arange(1,m, dtype=float)))
return coo_array((data, (row, col)), shape=(m,n))
else:
row = hstack((arange(n, dtype=int),
arange(1, n+1, dtype=int)))
col = hstack((arange(n, dtype=int),
arange(n, dtype=int)))
data = hstack((arange(1, n+1, dtype=float),
arange(1,n+1, dtype=float)))
return coo_array((data,(row, col)), shape=(m,n))
| TestLSMRReturns |
python | huggingface__transformers | src/transformers/models/granitemoehybrid/modular_granitemoehybrid.py | {
"start": 3633,
"end": 3810
} | class ____(BambaMixer):
def __init__(self, config: GraniteMoeHybridConfig, layer_idx: int):
super().__init__(BambaConfig(config), layer_idx)
| GraniteMoeHybridMambaLayer |
python | Textualize__textual | docs/examples/guide/widgets/hello02.py | {
"start": 223,
"end": 403
} | class ____(App):
CSS_PATH = "hello02.tcss"
def compose(self) -> ComposeResult:
yield Hello()
if __name__ == "__main__":
app = CustomApp()
app.run()
| CustomApp |
python | getsentry__sentry | tests/apidocs/endpoints/teams/test_by_slug.py | {
"start": 136,
"end": 932
} | class ____(APIDocsTestCase):
def setUp(self) -> None:
team = self.create_team(organization=self.organization)
self.url = reverse(
"sentry-api-0-team-details",
kwargs={
"organization_id_or_slug": self.organization.slug,
"team_id_or_slug": team.slug,
},
)
self.login_as(user=self.user)
def test_get(self) -> None:
response = self.client.get(self.url)
request = RequestFactory().get(self.url)
self.validate_schema(request, response)
def test_put(self) -> None:
data = {"name": "foo"}
response = self.client.put(self.url, data)
request = RequestFactory().put(self.url, data)
self.validate_schema(request, response)
| TeamsBySlugDocs |
python | tensorflow__tensorflow | tensorflow/python/compiler/tensorrt/test/testdata/gen_tf_readvariableop_model.py | {
"start": 1255,
"end": 2272
} | class ____(module.Module):
"""Simple model with two variables."""
def __init__(self):
self.var1 = variables.Variable(
np.array([[[13.]]], dtype=np.float32), name="var1")
self.var2 = variables.Variable(
np.array([[[37.]]], dtype=np.float32), name="var2")
@def_function.function
def __call__(self, input1, input2):
mul1 = input1 * self.var1
mul2 = input2 * self.var2
add = mul1 + mul2
sub = add - 45.
return array_ops.identity(sub, name="output")
def GenerateModelWithReadVariableOp(tf_saved_model_dir):
"""Generate a model with ReadVariableOp nodes."""
my_model = MyModel()
cfunc = my_model.__call__.get_concrete_function(
tensor_spec.TensorSpec([None, 1, 1], dtypes.float32),
tensor_spec.TensorSpec([None, 1, 1], dtypes.float32))
# pylint: disable=not-callable
save(my_model, tf_saved_model_dir, signatures=cfunc)
if __name__ == "__main__":
GenerateModelWithReadVariableOp(
tf_saved_model_dir="tf_readvariableop_saved_model")
| MyModel |
python | chroma-core__chroma | chromadb/execution/expression/operator.py | {
"start": 437,
"end": 809
} | class ____:
collection: Collection
knn: Segment
metadata: Segment
record: Segment
@property
def version(self) -> RequestVersionContext:
return RequestVersionContext(
collection_version=self.collection.version,
log_position=self.collection.log_position,
)
# Where expression types for filtering
@dataclass
| Scan |
python | getsentry__sentry | src/sentry/integrations/api/serializers/rest_framework/doc_integration.py | {
"start": 1128,
"end": 4076
} | class ____(Serializer):
name = serializers.CharField(max_length=255)
author = serializers.CharField(max_length=255)
description = serializers.CharField()
url = URLField()
popularity = serializers.IntegerField(min_value=0, max_value=32767, allow_null=True)
is_draft = serializers.BooleanField(default=True)
metadata = MetadataField(allow_null=True, required=False)
features = serializers.MultipleChoiceField(
choices=Feature.as_choices(), allow_blank=True, allow_null=True, required=False
)
def validate_name(self, value: str) -> str:
slug = sentry_slugify(value)
max_slug_length = DocIntegration._meta.get_field("slug").max_length or -1
if len(slug) > max_slug_length:
raise ValidationError(
f"Generated slug '{slug}' is too long, please use a shorter name."
)
# Only check this for validating creation, not updates
if self.instance is None:
queryset = DocIntegration.objects.filter(slug=slug)
if queryset.exists():
raise ValidationError(f"Name '{value}' is already taken, please use another.")
return value
def create(self, validated_data: MutableMapping[str, Any]) -> DocIntegration:
# sentry_slugify ensures the slug is not entirely numeric
slug = sentry_slugify(validated_data["name"])
features = validated_data.pop("features") if validated_data.get("features") else []
with transaction.atomic(router.db_for_write(DocIntegration)):
doc_integration = DocIntegration.objects.create(slug=slug, **validated_data)
IntegrationFeature.objects.bulk_create(
[
IntegrationFeature(
target_id=doc_integration.id,
target_type=IntegrationTypes.DOC_INTEGRATION.value,
feature=feature,
)
for feature in features
]
)
return doc_integration
def update(
self, doc_integration: DocIntegration, validated_data: MutableMapping[str, Any]
) -> DocIntegration:
if validated_data.get("features"):
features = validated_data.pop("features")
IntegrationFeature.objects.clean_update(
incoming_features=features,
target=doc_integration,
target_type=IntegrationTypes.DOC_INTEGRATION,
)
# If we're publishing...
if not validated_data.get("is_draft", True):
if not doc_integration.avatar.exists():
raise serializers.ValidationError({"avatar": "A logo is required for publishing."})
# Update the DocIntegration
for key, value in validated_data.items():
setattr(doc_integration, key, value)
doc_integration.save()
return doc_integration
| DocIntegrationSerializer |
python | great-expectations__great_expectations | docs/sphinx_api_docs_source/public_api_report.py | {
"start": 28813,
"end": 35972
} | class ____:
"""Generate a report from entity definitions (class, method and function)."""
def __init__(self, definitions: Set[Definition], repo_root: pathlib.Path) -> None:
"""Create a PublicAPIReport object.
Args:
definitions: Entity definitions to include in the report. Generally,
these are filtered before inclusion.
repo_root: Path to the repo root for stripping filenames relative
to the repository root.
"""
self.definitions = definitions
self.repo_root = repo_root
def write_printable_definitions_to_file(
self,
filepath: pathlib.Path,
) -> None:
"""Generate then write the printable version of definitions to a file.
Args:
filepath: Output filepath.
"""
printable_definitions = self.generate_printable_definitions()
with open(filepath, "w") as f:
f.write("\n".join([str(d) for d in printable_definitions]))
def generate_printable_definitions(
self,
) -> List[PrintableDefinition]:
"""Generate a printable (human readable) definition.
Returns:
List of strings representing each Definition.
"""
sorted_definitions_list = sorted(
list(self.definitions), key=operator.attrgetter("filepath", "name")
)
sorted_printable_definitions: List[PrintableDefinition] = []
for definition in sorted_definitions_list:
if definition.filepath.is_absolute():
filepath = definition.filepath.relative_to(self.repo_root)
else:
filepath = definition.filepath
printable_definition = PrintableDefinition(
file=filepath, name=definition.name
)
sorted_printable_definitions.append(printable_definition)
return self._deduplicate_definitions(sorted_printable_definitions)
def _deduplicate_definitions(
self, printable_definitions: List[PrintableDefinition]
) -> List[PrintableDefinition]:
"""Deduplicate a list of strings, keeping order intact."""
seen = set()
no_duplicates = []
for definition in printable_definitions:
if definition not in seen:
no_duplicates.append(definition)
seen.add(definition)
return no_duplicates
def _repo_root() -> pathlib.Path:
repo_root_path = pathlib.Path(__file__).parents[2]
return repo_root_path
def _default_doc_example_absolute_paths() -> Set[pathlib.Path]:
"""Get all paths of doc examples (docs examples)."""
base_directory = _repo_root() / "docs" / "docusaurus" / "docs"
paths = base_directory.rglob("*.py")
return set(paths)
def _default_code_absolute_paths() -> Set[pathlib.Path]:
"""All Great Expectations modules related to the main library."""
base_directory = _repo_root() / "great_expectations"
paths = base_directory.rglob("**/*.py")
return set(paths)
def _default_docs_absolute_paths() -> Set[pathlib.Path]:
"""All Great Expectations modules related to the main library."""
base_directory = _repo_root() / "docs"
paths: list[pathlib.Path] = []
for extension in ("md", "mdx", "yml", "yaml"):
paths.extend(base_directory.rglob(f"**/*.{extension}"))
return set(paths)
def _parse_file_to_ast_tree(filepath: pathlib.Path) -> ast.AST:
with open(filepath) as f:
file_contents: str = f.read()
tree = ast.parse(file_contents)
return tree
def generate_public_api_report(write_to_file: bool = False) -> None:
docs_example_file_contents = FileContents.create_from_local_files(
_default_doc_example_absolute_paths()
)
code_file_contents = FileContents.create_from_local_files(
_default_code_absolute_paths()
)
references_from_docs_content = parse_docs_contents_for_class_names(
FileContents.create_from_local_files(_default_docs_absolute_paths())
)
docs_example_parser = DocsExampleParser(file_contents=docs_example_file_contents)
code_parser = CodeParser(file_contents=code_file_contents)
public_api_checker = PublicAPIChecker(code_parser=code_parser)
code_reference_filter = CodeReferenceFilter(
repo_root=_repo_root(),
docs_example_parser=docs_example_parser,
code_parser=code_parser,
public_api_checker=public_api_checker,
references_from_docs_content=references_from_docs_content,
)
filtered_definitions = code_reference_filter.filter_definitions()
public_api_report = PublicAPIReport(
definitions=filtered_definitions, repo_root=_repo_root()
)
missing_from_the_public_api = public_api_report.generate_printable_definitions()
missing_threshold = len(
public_api_missing_threshold.ITEMS_IGNORED_FROM_PUBLIC_API
) # TODO: reduce this number again once this works for the Fluent DS dynamic methods
logger.info(
f"Number of items referenced in docs not decorated with @public_api: {len(missing_from_the_public_api)}"
)
logger.info(
f"Number of items we allow to to be missing the decorator: {missing_threshold}."
)
# The missing_threshold should be reduced and kept at 0. Please do
# not increase this threshold, but instead add to the public API by decorating
# any methods or classes you are adding to documentation with the @public_api
# decorator and any relevant "new" or "deprecated" public api decorators.
# If the actual is lower than the threshold, please reduce the threshold.
missing_threshold = len(
public_api_missing_threshold.ITEMS_IGNORED_FROM_PUBLIC_API
) # TODO: reduce this number again once this works for the Fluent DS dynamic methods
has_errors = False
undocumented_and_unignored = set(missing_from_the_public_api) - set(
public_api_missing_threshold.ITEMS_IGNORED_FROM_PUBLIC_API
)
documented_and_ignored = set(
public_api_missing_threshold.ITEMS_IGNORED_FROM_PUBLIC_API
) - set(missing_from_the_public_api)
if undocumented_and_unignored:
logger.error(
f"Items are missing from the public API: {len(undocumented_and_unignored)}"
)
for item in sorted(undocumented_and_unignored):
logger.error(" - " + str(item))
has_errors = True
if documented_and_ignored:
logger.error(
f"Items that should be removed from public_api_missing_threshold.ITEMS_IGNORED_FROM_PUBLIC_API: {len(documented_and_ignored)}"
)
for item in sorted(documented_and_ignored):
logger.error(" - " + str(item))
has_errors = True
if has_errors:
sys.exit(1)
logger.info(
"All of the missing items are accounted for in the missing threshold, but this threshold should be reduced to 0 over time."
)
if write_to_file:
public_api_report.write_printable_definitions_to_file(
filepath=_repo_root() / "public_api_report.txt",
)
if __name__ == "__main__":
generate_public_api_report()
| PublicAPIReport |
python | weaviate__weaviate-python-client | weaviate/collections/queries/fetch_objects_by_ids/generate/sync.py | {
"start": 332,
"end": 495
} | class ____(
Generic[Properties, References],
_FetchObjectsByIDsGenerateExecutor[ConnectionSync, Properties, References],
):
pass
| _FetchObjectsByIDsGenerate |
python | numpy__numpy | numpy/distutils/system_info.py | {
"start": 99530,
"end": 100987
} | class ____(system_info):
section = 'numerix'
def calc_info(self):
which = None, None
if os.getenv("NUMERIX"):
which = os.getenv("NUMERIX"), "environment var"
# If all the above fail, default to numpy.
if which[0] is None:
which = "numpy", "defaulted"
try:
import numpy # noqa: F401
which = "numpy", "defaulted"
except ImportError as e:
msg1 = str(e)
try:
import Numeric # noqa: F401
which = "numeric", "defaulted"
except ImportError as e:
msg2 = str(e)
try:
import numarray # noqa: F401
which = "numarray", "defaulted"
except ImportError as e:
msg3 = str(e)
log.info(msg1)
log.info(msg2)
log.info(msg3)
which = which[0].strip().lower(), which[1]
if which[0] not in ["numeric", "numarray", "numpy"]:
raise ValueError("numerix selector must be either 'Numeric' "
"or 'numarray' or 'numpy' but the value obtained"
" from the %s was '%s'." % (which[1], which[0]))
os.environ['NUMERIX'] = which[0]
self.set_info(**get_info(which[0]))
| numerix_info |
python | scipy__scipy | scipy/stats/tests/test_multivariate.py | {
"start": 122715,
"end": 125128
} | class ____:
def test_reproducibility(self):
rng = np.random.RandomState(514)
x = unitary_group.rvs(3, random_state=rng)
x2 = unitary_group.rvs(3, random_state=514)
expected = np.array(
[[0.308771+0.360312j, 0.044021+0.622082j, 0.160327+0.600173j],
[0.732757+0.297107j, 0.076692-0.4614j, -0.394349+0.022613j],
[-0.148844+0.357037j, -0.284602-0.557949j, 0.607051+0.299257j]]
)
assert_array_almost_equal(x, expected)
assert_array_almost_equal(x2, expected)
def test_invalid_dim(self):
assert_raises(ValueError, unitary_group.rvs, None)
assert_raises(ValueError, unitary_group.rvs, (2, 2))
assert_raises(ValueError, unitary_group.rvs, -1)
assert_raises(ValueError, unitary_group.rvs, 2.5)
def test_frozen_matrix(self):
dim = 7
frozen = unitary_group(dim)
frozen_seed = unitary_group(dim, seed=514)
rvs1 = frozen.rvs(random_state=514)
rvs2 = unitary_group.rvs(dim, random_state=514)
rvs3 = frozen_seed.rvs(size=1)
assert_equal(rvs1, rvs2)
assert_equal(rvs1, rvs3)
def test_unitarity(self):
xs = [unitary_group.rvs(dim)
for dim in range(2,12)
for i in range(3)]
# Test that these are unitary matrices
for x in xs:
assert_allclose(np.dot(x, x.conj().T), np.eye(x.shape[0]), atol=1e-15)
def test_haar(self):
# Test that the eigenvalues, which lie on the unit circle in
# the complex plane, are uncorrelated.
# Generate samples
for dim in (1, 5):
samples = 1000 # Not too many, or the test takes too long
# Note that the test is sensitive to seed too
xs = unitary_group.rvs(
dim, size=samples, random_state=np.random.default_rng(514)
)
# The angles "x" of the eigenvalues should be uniformly distributed
# Overall this seems to be a necessary but weak test of the distribution.
eigs = np.vstack([scipy.linalg.eigvals(x) for x in xs])
x = np.arctan2(eigs.imag, eigs.real)
res = kstest(x.ravel(), uniform(-np.pi, 2*np.pi).cdf)
assert_(res.pvalue > 0.05)
def test_zero_by_zero(self):
assert_equal(unitary_group.rvs(0, size=4).shape, (4, 0, 0))
| TestUnitaryGroup |
python | pola-rs__polars | py-polars/src/polars/_dependencies.py | {
"start": 675,
"end": 11448
} | class ____(ModuleType):
"""
Module that can act both as a lazy-loader and as a proxy.
Notes
-----
We do NOT register this module with `sys.modules` so as not to cause
confusion in the global environment. This way we have a valid proxy
module for our own use, but it lives *exclusively* within polars.
"""
__lazy__ = True
_mod_pfx: ClassVar[dict[str, str]] = {
"numpy": "np.",
"pandas": "pd.",
"pyarrow": "pa.",
"polars_cloud": "pc.",
}
def __init__(
self,
module_name: str,
*,
module_available: bool,
) -> None:
"""
Initialise lazy-loading proxy module.
Parameters
----------
module_name : str
the name of the module to lazy-load (if available).
module_available : bool
indicate if the referenced module is actually available (we will proxy it
in both cases, but raise a helpful error when invoked if it doesn't exist).
"""
self._module_available = module_available
self._module_name = module_name
self._globals = globals()
super().__init__(module_name)
def _import(self) -> ModuleType:
# import the referenced module, replacing the proxy in this module's globals
module = import_module(self.__name__)
self._globals[self._module_name] = module
self.__dict__.update(module.__dict__)
return module
def __getattr__(self, name: str) -> Any:
# have "hasattr('__wrapped__')" return False without triggering import
# (it's for decorators, not modules, but keeps "make doctest" happy)
if name == "__wrapped__":
msg = f"{self._module_name!r} object has no attribute {name!r}"
raise AttributeError(msg)
# accessing the proxy module's attributes triggers import of the real thing
if self._module_available:
# import the module and return the requested attribute
module = self._import()
return getattr(module, name)
# user has not installed the proxied/lazy module
elif name == "__name__":
return self._module_name
elif re.match(r"^__\w+__$", name) and name != "__version__":
# allow some minimal introspection on private module
# attrs to avoid unnecessary error-handling elsewhere
return None
else:
# all other attribute access raises a helpful exception
pfx = self._mod_pfx.get(self._module_name, "")
msg = f"{pfx}{name} requires {self._module_name!r} module to be installed"
raise ModuleNotFoundError(msg) from None
def _lazy_import(module_name: str) -> tuple[ModuleType, bool]:
"""
Lazy import the given module; avoids up-front import costs.
Parameters
----------
module_name : str
name of the module to import, eg: "pyarrow".
Notes
-----
If the requested module is not available (eg: has not been installed), a proxy
module is created in its place, which raises an exception on any attribute
access. This allows for import and use as normal, without requiring explicit
guard conditions - if the module is never used, no exception occurs; if it
is, then a helpful exception is raised.
Returns
-------
tuple of (Module, bool)
A lazy-loading module and a boolean indicating if the requested/underlying
module exists (if not, the returned module is a proxy).
"""
# check if module is LOADED
if module_name in sys.modules:
return sys.modules[module_name], True
# check if module is AVAILABLE
try:
module_spec = find_spec(module_name)
module_available = not (module_spec is None or module_spec.loader is None)
except ModuleNotFoundError:
module_available = False
# create lazy/proxy module that imports the real one on first use
# (or raises an explanatory ModuleNotFoundError if not available)
return (
_LazyModule(
module_name=module_name,
module_available=module_available,
),
module_available,
)
if TYPE_CHECKING:
import dataclasses
import html
import json
import pickle
import subprocess
import altair
import boto3
import deltalake
import fsspec
import gevent
import great_tables
import hypothesis
import numpy
import pandas
import polars_cloud
import pyarrow
import pydantic
import pyiceberg
import pyiceberg.schema
import pytz
import torch
else:
# infrequently-used builtins
dataclasses, _ = _lazy_import("dataclasses")
html, _ = _lazy_import("html")
json, _ = _lazy_import("json")
pickle, _ = _lazy_import("pickle")
subprocess, _ = _lazy_import("subprocess")
# heavy/optional third party libs
altair, _ALTAIR_AVAILABLE = _lazy_import("altair")
boto3, _BOTO3_AVAILABLE = _lazy_import("boto3")
deltalake, _DELTALAKE_AVAILABLE = _lazy_import("deltalake")
fsspec, _FSSPEC_AVAILABLE = _lazy_import("fsspec")
gevent, _GEVENT_AVAILABLE = _lazy_import("gevent")
great_tables, _GREAT_TABLES_AVAILABLE = _lazy_import("great_tables")
hypothesis, _HYPOTHESIS_AVAILABLE = _lazy_import("hypothesis")
numpy, _NUMPY_AVAILABLE = _lazy_import("numpy")
pandas, _PANDAS_AVAILABLE = _lazy_import("pandas")
polars_cloud, _POLARS_CLOUD_AVAILABLE = _lazy_import("polars_cloud")
pyarrow, _PYARROW_AVAILABLE = _lazy_import("pyarrow")
pydantic, _PYDANTIC_AVAILABLE = _lazy_import("pydantic")
pyiceberg, _PYICEBERG_AVAILABLE = _lazy_import("pyiceberg")
torch, _TORCH_AVAILABLE = _lazy_import("torch")
pytz, _PYTZ_AVAILABLE = _lazy_import("pytz")
@cache
def _might_be(cls: type, type_: str) -> bool:
# infer whether the given class "might" be associated with the given
# module (in which case it's reasonable to do a real isinstance check;
# we defer that so as not to unnecessarily trigger module import)
try:
return any(f"{type_}." in str(o) for o in cls.mro())
except TypeError:
return False
def _check_for_numpy(obj: Any, *, check_type: bool = True) -> bool:
return _NUMPY_AVAILABLE and _might_be(
cast("Hashable", type(obj) if check_type else obj), "numpy"
)
def _check_for_pandas(obj: Any, *, check_type: bool = True) -> bool:
return _PANDAS_AVAILABLE and _might_be(
cast("Hashable", type(obj) if check_type else obj), "pandas"
)
def _check_for_pyarrow(obj: Any, *, check_type: bool = True) -> bool:
return _PYARROW_AVAILABLE and _might_be(
cast("Hashable", type(obj) if check_type else obj), "pyarrow"
)
def _check_for_pydantic(obj: Any, *, check_type: bool = True) -> bool:
return _PYDANTIC_AVAILABLE and _might_be(
cast("Hashable", type(obj) if check_type else obj), "pydantic"
)
def _check_for_torch(obj: Any, *, check_type: bool = True) -> bool:
return _TORCH_AVAILABLE and _might_be(
cast("Hashable", type(obj) if check_type else obj), "torch"
)
def _check_for_pytz(obj: Any, *, check_type: bool = True) -> bool:
return _PYTZ_AVAILABLE and _might_be(
cast("Hashable", type(obj) if check_type else obj), "pytz"
)
def import_optional(
module_name: str,
err_prefix: str = "required package",
err_suffix: str = "not found",
min_version: str | tuple[int, ...] | None = None,
min_err_prefix: str = "requires",
install_message: str | None = None,
) -> Any:
"""
Import an optional dependency, returning the module.
Parameters
----------
module_name : str
Name of the dependency to import.
err_prefix : str, optional
Error prefix to use in the raised exception (appears before the module name).
err_suffix: str, optional
Error suffix to use in the raised exception (follows the module name).
min_version : {str, tuple[int]}, optional
If a minimum module version is required, specify it here.
min_err_prefix : str, optional
Override the standard "requires" prefix for the minimum version error message.
install_message : str, optional
Override the standard "Please install it using..." exception message fragment.
Examples
--------
>>> from polars._dependencies import import_optional
>>> import_optional(
... "definitely_a_real_module",
... err_prefix="super-important package",
... ) # doctest: +SKIP
ImportError: super-important package 'definitely_a_real_module' not installed.
Please install it using the command `pip install definitely_a_real_module`.
"""
from polars._utils.various import parse_version
from polars.exceptions import ModuleUpgradeRequiredError
module_root = module_name.split(".", 1)[0]
try:
module = import_module(module_name)
except ImportError:
prefix = f"{err_prefix.strip(' ')} " if err_prefix else ""
suffix = f" {err_suffix.strip(' ')}" if err_suffix else ""
err_message = f"{prefix}'{module_name}'{suffix}.\n" + (
install_message
or f"Please install using the command `pip install {module_root}`."
)
raise ModuleNotFoundError(err_message) from None
if min_version:
min_version = parse_version(min_version)
mod_version = parse_version(module.__version__)
if mod_version < min_version:
msg = (
f"{min_err_prefix} {module_root} "
f"{'.'.join(str(v) for v in min_version)} or higher"
f" (found {'.'.join(str(v) for v in mod_version)})"
)
raise ModuleUpgradeRequiredError(msg)
return module
__all__ = [
# lazy-load rarely-used/heavy builtins (for fast startup)
"dataclasses",
"html",
"json",
"pickle",
"subprocess",
# lazy-load third party libs
"altair",
"boto3",
"deltalake",
"fsspec",
"gevent",
"great_tables",
"numpy",
"pandas",
"polars_cloud",
"pydantic",
"pyiceberg",
"pyarrow",
"torch",
"pytz",
# lazy utilities
"_check_for_numpy",
"_check_for_pandas",
"_check_for_pyarrow",
"_check_for_pydantic",
"_check_for_torch",
"_check_for_pytz",
# exported flags/guards
"_ALTAIR_AVAILABLE",
"_DELTALAKE_AVAILABLE",
"_FSSPEC_AVAILABLE",
"_GEVENT_AVAILABLE",
"_GREAT_TABLES_AVAILABLE",
"_HYPOTHESIS_AVAILABLE",
"_NUMPY_AVAILABLE",
"_PANDAS_AVAILABLE",
"_POLARS_CLOUD_AVAILABLE",
"_PYARROW_AVAILABLE",
"_PYDANTIC_AVAILABLE",
"_PYICEBERG_AVAILABLE",
"_TORCH_AVAILABLE",
]
| _LazyModule |
python | PrefectHQ__prefect | tests/_experimental/plugins/test_plugins.py | {
"start": 20407,
"end": 20922
} | class ____:
"""Tests for SetupSummary data structure."""
def test_setup_summary_creation(self):
"""Test creating a SetupSummary."""
summary = SetupSummary(
plugin="test-plugin",
env_preview={"KEY": "value"},
note="Test note",
error=None,
)
assert summary.plugin == "test-plugin"
assert summary.env_preview == {"KEY": "value"}
assert summary.note == "Test note"
assert summary.error is None
| TestSetupSummary |
python | getsentry__sentry | src/sentry/grouping/component.py | {
"start": 16239,
"end": 16388
} | class ____(
BaseGroupingComponent[HostnameGroupingComponent | SaltGroupingComponent]
):
id: str = "expect_staple"
| ExpectStapleGroupingComponent |
python | ansible__ansible | lib/ansible/plugins/inventory/constructed.py | {
"start": 4143,
"end": 7353
} | class ____(BaseInventoryPlugin, Constructable):
""" constructs groups and vars using Jinja2 template expressions """
NAME = 'constructed'
# implicit trust behavior is already added by the YAML parser invoked by the loader
def verify_file(self, path):
valid = False
if super(InventoryModule, self).verify_file(path):
file_name, ext = os.path.splitext(path)
if not ext or ext in ['.config'] + C.YAML_FILENAME_EXTENSIONS:
valid = True
return valid
def get_all_host_vars(self, host, loader, sources):
""" requires host object """
return combine_vars(self.host_groupvars(host, loader, sources), self.host_vars(host, loader, sources))
def host_groupvars(self, host, loader, sources):
""" requires host object """
gvars = get_group_vars(host.get_groups())
if self.get_option('use_vars_plugins'):
gvars = combine_vars(gvars, get_vars_from_inventory_sources(loader, sources, host.get_groups(), 'all'))
return gvars
def host_vars(self, host, loader, sources):
""" requires host object """
hvars = host.get_vars()
if self.get_option('use_vars_plugins'):
hvars = combine_vars(hvars, get_vars_from_inventory_sources(loader, sources, [host], 'all'))
return hvars
def parse(self, inventory, loader, path, cache=False):
""" parses the inventory file """
super(InventoryModule, self).parse(inventory, loader, path, cache=cache)
self._read_config_data(path)
sources = []
try:
sources = inventory.processed_sources
except AttributeError:
if self.get_option('use_vars_plugins'):
raise
strict = self.get_option('strict')
cache = cache_loader.get(C.CACHE_PLUGIN)
try:
# Go over hosts (less var copies)
for host in inventory.hosts:
# get available variables to templar
hostvars = self.get_all_host_vars(inventory.hosts[host], loader, sources)
if cache.contains(host): # adds facts if cache is active
hostvars = combine_vars(hostvars, cache.get(host))
# create composite vars
self._set_composite_vars(self.get_option('compose'), hostvars, host, strict=strict)
# refetch host vars in case new ones have been created above
hostvars = self.get_all_host_vars(inventory.hosts[host], loader, sources)
if cache.contains(host): # adds facts if cache is active
hostvars = combine_vars(hostvars, cache.get(host))
# constructed groups based on conditionals
self._add_host_to_composed_groups(self.get_option('groups'), hostvars, host, strict=strict, fetch_hostvars=False)
# constructed groups based variable values
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), hostvars, host, strict=strict, fetch_hostvars=False)
except Exception as ex:
raise AnsibleParserError(f"Failed to parse {path!r}.") from ex
| InventoryModule |
python | dask__distributed | distributed/diagnostics/tests/test_worker_plugin.py | {
"start": 15817,
"end": 16869
} | class ____(WorkerPlugin):
def teardown(self, worker):
raise RuntimeError("test error")
@gen_cluster(client=True, nthreads=[("", 1)])
async def test_unregister_worker_plugin_with_broken_teardown_raises(c, s, a):
await c.register_plugin(BrokenTeardownPlugin(), name="TestPlugin1")
with pytest.raises(RuntimeError, match="test error"):
with captured_logger("distributed.worker", level=logging.ERROR) as caplog:
await c.unregister_worker_plugin("TestPlugin1")
logs = caplog.getvalue()
assert "TestPlugin1 failed to teardown" in logs
assert "test error" in logs
@gen_cluster(client=True, nthreads=[])
async def test_plugin_with_broken_teardown_logs_on_close(c, s):
await c.register_plugin(BrokenTeardownPlugin(), name="TestPlugin1")
with captured_logger("distributed.worker", level=logging.ERROR) as caplog:
async with Worker(s.address):
pass
logs = caplog.getvalue()
assert "TestPlugin1 failed to teardown" in logs
assert "test error" in logs
| BrokenTeardownPlugin |
python | pytorch__pytorch | torch/fx/experimental/symbolic_shapes.py | {
"start": 328557,
"end": 329722
} | class ____(torch.fx.Interpreter):
def run_node(self, n: torch.fx.Node) -> Result:
"""
Run an FX node, propagating unbacked Symbol bindings to the new fake tensor
"""
from torch._guards import detect_fake_mode
result = super().run_node(n)
fake_mode = detect_fake_mode()
assert fake_mode is not None
rebind_unbacked(fake_mode.shape_env, n, result)
return result
def _find_user_code_frame() -> Optional[types.FrameType]:
frame = inspect.currentframe()
while frame is not None:
if not frame.f_code.co_filename.startswith(
os.path.dirname(inspect.getfile(torch)) + os.path.sep
):
break
frame = frame.f_back
return frame
def _blame_user_code(e: Exception, frame: types.FrameType) -> None:
frame_summary = traceback.FrameSummary(
frame.f_code.co_filename,
frame.f_lineno,
frame.f_code.co_name,
)
msg = e.args[0]
msg += "\n\nThe following call raised this error:\n" + "".join(
traceback.StackSummary.from_list([frame_summary]).format()
)
e.args = (msg,)
| PropagateUnbackedSymInts |
python | scrapy__scrapy | tests/spiders.py | {
"start": 15309,
"end": 16107
} | class ____(MetaSpider):
full_response_length = 2**18
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
spider = super().from_crawler(crawler, *args, **kwargs)
crawler.signals.connect(spider.bytes_received, signals.bytes_received)
return spider
async def start(self):
body = b"a" * self.full_response_length
url = self.mockserver.url("/alpayload")
yield Request(url, method="POST", body=body, errback=self.errback)
def parse(self, response):
self.meta["response"] = response
def errback(self, failure):
self.meta["failure"] = failure
def bytes_received(self, data, request, spider):
self.meta["bytes_received"] = data
raise StopDownload(fail=False)
| BytesReceivedCallbackSpider |
python | sympy__sympy | sympy/physics/biomechanics/musculotendon.py | {
"start": 42867,
"end": 58289
} | class ____(MusculotendonBase):
r"""Musculotendon model using the curves of De Groote et al., 2016 [1]_.
Examples
========
This class models the musculotendon actuator parametrized by the
characteristic curves described in De Groote et al., 2016 [1]_. Like all
musculotendon models in SymPy's biomechanics module, it requires a pathway
to define its line of action. We'll begin by creating a simple
``LinearPathway`` between two points that our musculotendon will follow.
We'll create a point ``O`` to represent the musculotendon's origin and
another ``I`` to represent its insertion.
>>> from sympy import symbols
>>> from sympy.physics.mechanics import (LinearPathway, Point,
... ReferenceFrame, dynamicsymbols)
>>> N = ReferenceFrame('N')
>>> O, I = O, P = symbols('O, I', cls=Point)
>>> q, u = dynamicsymbols('q, u', real=True)
>>> I.set_pos(O, q*N.x)
>>> O.set_vel(N, 0)
>>> I.set_vel(N, u*N.x)
>>> pathway = LinearPathway(O, I)
>>> pathway.attachments
(O, I)
>>> pathway.length
Abs(q(t))
>>> pathway.extension_velocity
sign(q(t))*Derivative(q(t), t)
A musculotendon also takes an instance of an activation dynamics model as
this will be used to provide symbols for the activation in the formulation
of the musculotendon dynamics. We'll use an instance of
``FirstOrderActivationDeGroote2016`` to represent first-order activation
dynamics. Note that a single name argument needs to be provided as SymPy
will use this as a suffix.
>>> from sympy.physics.biomechanics import FirstOrderActivationDeGroote2016
>>> activation = FirstOrderActivationDeGroote2016('muscle')
>>> activation.x
Matrix([[a_muscle(t)]])
>>> activation.r
Matrix([[e_muscle(t)]])
>>> activation.p
Matrix([
[tau_a_muscle],
[tau_d_muscle],
[ b_muscle]])
>>> activation.rhs()
Matrix([[((1/2 - tanh(b_muscle*(-a_muscle(t) + e_muscle(t)))/2)*(3*...]])
The musculotendon class requires symbols or values to be passed to represent
the constants in the musculotendon dynamics. We'll use SymPy's ``symbols``
function to create symbols for the maximum isometric force ``F_M_max``,
optimal fiber length ``l_M_opt``, tendon slack length ``l_T_slack``, maximum
fiber velocity ``v_M_max``, optimal pennation angle ``alpha_opt, and fiber
damping coefficient ``beta``.
>>> F_M_max = symbols('F_M_max', real=True)
>>> l_M_opt = symbols('l_M_opt', real=True)
>>> l_T_slack = symbols('l_T_slack', real=True)
>>> v_M_max = symbols('v_M_max', real=True)
>>> alpha_opt = symbols('alpha_opt', real=True)
>>> beta = symbols('beta', real=True)
We can then import the class ``MusculotendonDeGroote2016`` from the
biomechanics module and create an instance by passing in the various objects
we have previously instantiated. By default, a musculotendon model with
rigid tendon musculotendon dynamics will be created.
>>> from sympy.physics.biomechanics import MusculotendonDeGroote2016
>>> rigid_tendon_muscle = MusculotendonDeGroote2016(
... 'muscle',
... pathway,
... activation,
... tendon_slack_length=l_T_slack,
... peak_isometric_force=F_M_max,
... optimal_fiber_length=l_M_opt,
... maximal_fiber_velocity=v_M_max,
... optimal_pennation_angle=alpha_opt,
... fiber_damping_coefficient=beta,
... )
We can inspect the various properties of the musculotendon, including
getting the symbolic expression describing the force it produces using its
``force`` attribute.
>>> rigid_tendon_muscle.force
-F_M_max*(beta*(-l_T_slack + Abs(q(t)))*sign(q(t))*Derivative(q(t), t)...
When we created the musculotendon object, we passed in an instance of an
activation dynamics object that governs the activation within the
musculotendon. SymPy makes a design choice here that the activation dynamics
instance will be treated as a child object of the musculotendon dynamics.
Therefore, if we want to inspect the state and input variables associated
with the musculotendon model, we will also be returned the state and input
variables associated with the child object, or the activation dynamics in
this case. As the musculotendon model that we created here uses rigid tendon
dynamics, no additional states or inputs relating to the musculotendon are
introduces. Consequently, the model has a single state associated with it,
the activation, and a single input associated with it, the excitation. The
states and inputs can be inspected using the ``x`` and ``r`` attributes
respectively. Note that both ``x`` and ``r`` have the alias attributes of
``state_vars`` and ``input_vars``.
>>> rigid_tendon_muscle.x
Matrix([[a_muscle(t)]])
>>> rigid_tendon_muscle.r
Matrix([[e_muscle(t)]])
To see which constants are symbolic in the musculotendon model, we can use
the ``p`` or ``constants`` attribute. This returns a ``Matrix`` populated
by the constants that are represented by a ``Symbol`` rather than a numeric
value.
>>> rigid_tendon_muscle.p
Matrix([
[ l_T_slack],
[ F_M_max],
[ l_M_opt],
[ v_M_max],
[ alpha_opt],
[ beta],
[ tau_a_muscle],
[ tau_d_muscle],
[ b_muscle],
[ c_0_fl_T_muscle],
[ c_1_fl_T_muscle],
[ c_2_fl_T_muscle],
[ c_3_fl_T_muscle],
[ c_0_fl_M_pas_muscle],
[ c_1_fl_M_pas_muscle],
[ c_0_fl_M_act_muscle],
[ c_1_fl_M_act_muscle],
[ c_2_fl_M_act_muscle],
[ c_3_fl_M_act_muscle],
[ c_4_fl_M_act_muscle],
[ c_5_fl_M_act_muscle],
[ c_6_fl_M_act_muscle],
[ c_7_fl_M_act_muscle],
[ c_8_fl_M_act_muscle],
[ c_9_fl_M_act_muscle],
[c_10_fl_M_act_muscle],
[c_11_fl_M_act_muscle],
[ c_0_fv_M_muscle],
[ c_1_fv_M_muscle],
[ c_2_fv_M_muscle],
[ c_3_fv_M_muscle]])
Finally, we can call the ``rhs`` method to return a ``Matrix`` that
contains as its elements the righthand side of the ordinary differential
equations corresponding to each of the musculotendon's states. Like the
method with the same name on the ``Method`` classes in SymPy's mechanics
module, this returns a column vector where the number of rows corresponds to
the number of states. For our example here, we have a single state, the
dynamic symbol ``a_muscle(t)``, so the returned value is a 1-by-1
``Matrix``.
>>> rigid_tendon_muscle.rhs()
Matrix([[((1/2 - tanh(b_muscle*(-a_muscle(t) + e_muscle(t)))/2)*(3*...]])
The musculotendon class supports elastic tendon musculotendon models in
addition to rigid tendon ones. You can choose to either use the fiber length
or tendon force as an additional state. You can also specify whether an
explicit or implicit formulation should be used. To select a formulation,
pass a member of the ``MusculotendonFormulation`` enumeration to the
``musculotendon_dynamics`` parameter when calling the constructor. This
enumeration is an ``IntEnum``, so you can also pass an integer, however it
is recommended to use the enumeration as it is clearer which formulation you
are actually selecting. Below, we'll use the ``FIBER_LENGTH_EXPLICIT``
member to create a musculotendon with an elastic tendon that will use the
(normalized) muscle fiber length as an additional state and will produce
the governing ordinary differential equation in explicit form.
>>> from sympy.physics.biomechanics import MusculotendonFormulation
>>> elastic_tendon_muscle = MusculotendonDeGroote2016(
... 'muscle',
... pathway,
... activation,
... musculotendon_dynamics=MusculotendonFormulation.FIBER_LENGTH_EXPLICIT,
... tendon_slack_length=l_T_slack,
... peak_isometric_force=F_M_max,
... optimal_fiber_length=l_M_opt,
... maximal_fiber_velocity=v_M_max,
... optimal_pennation_angle=alpha_opt,
... fiber_damping_coefficient=beta,
... )
>>> elastic_tendon_muscle.force
-F_M_max*TendonForceLengthDeGroote2016((-sqrt(l_M_opt**2*...
>>> elastic_tendon_muscle.x
Matrix([
[l_M_tilde_muscle(t)],
[ a_muscle(t)]])
>>> elastic_tendon_muscle.r
Matrix([[e_muscle(t)]])
>>> elastic_tendon_muscle.p
Matrix([
[ l_T_slack],
[ F_M_max],
[ l_M_opt],
[ v_M_max],
[ alpha_opt],
[ beta],
[ tau_a_muscle],
[ tau_d_muscle],
[ b_muscle],
[ c_0_fl_T_muscle],
[ c_1_fl_T_muscle],
[ c_2_fl_T_muscle],
[ c_3_fl_T_muscle],
[ c_0_fl_M_pas_muscle],
[ c_1_fl_M_pas_muscle],
[ c_0_fl_M_act_muscle],
[ c_1_fl_M_act_muscle],
[ c_2_fl_M_act_muscle],
[ c_3_fl_M_act_muscle],
[ c_4_fl_M_act_muscle],
[ c_5_fl_M_act_muscle],
[ c_6_fl_M_act_muscle],
[ c_7_fl_M_act_muscle],
[ c_8_fl_M_act_muscle],
[ c_9_fl_M_act_muscle],
[c_10_fl_M_act_muscle],
[c_11_fl_M_act_muscle],
[ c_0_fv_M_muscle],
[ c_1_fv_M_muscle],
[ c_2_fv_M_muscle],
[ c_3_fv_M_muscle]])
>>> elastic_tendon_muscle.rhs()
Matrix([
[v_M_max*FiberForceVelocityInverseDeGroote2016((l_M_opt*...],
[ ((1/2 - tanh(b_muscle*(-a_muscle(t) + e_muscle(t)))/2)*(3*...]])
It is strongly recommended to use the alternate ``with_defaults``
constructor when creating an instance because this will ensure that the
published constants are used in the musculotendon characteristic curves.
>>> elastic_tendon_muscle = MusculotendonDeGroote2016.with_defaults(
... 'muscle',
... pathway,
... activation,
... musculotendon_dynamics=MusculotendonFormulation.FIBER_LENGTH_EXPLICIT,
... tendon_slack_length=l_T_slack,
... peak_isometric_force=F_M_max,
... optimal_fiber_length=l_M_opt,
... )
>>> elastic_tendon_muscle.x
Matrix([
[l_M_tilde_muscle(t)],
[ a_muscle(t)]])
>>> elastic_tendon_muscle.r
Matrix([[e_muscle(t)]])
>>> elastic_tendon_muscle.p
Matrix([
[ l_T_slack],
[ F_M_max],
[ l_M_opt],
[tau_a_muscle],
[tau_d_muscle],
[ b_muscle]])
Parameters
==========
name : str
The name identifier associated with the musculotendon. This name is used
as a suffix when automatically generated symbols are instantiated. It
must be a string of nonzero length.
pathway : PathwayBase
The pathway that the actuator follows. This must be an instance of a
concrete subclass of ``PathwayBase``, e.g. ``LinearPathway``.
activation_dynamics : ActivationBase
The activation dynamics that will be modeled within the musculotendon.
This must be an instance of a concrete subclass of ``ActivationBase``,
e.g. ``FirstOrderActivationDeGroote2016``.
musculotendon_dynamics : MusculotendonFormulation | int
The formulation of musculotendon dynamics that should be used
internally, i.e. rigid or elastic tendon model, the choice of
musculotendon state etc. This must be a member of the integer
enumeration ``MusculotendonFormulation`` or an integer that can be cast
to a member. To use a rigid tendon formulation, set this to
``MusculotendonFormulation.RIGID_TENDON`` (or the integer value ``0``,
which will be cast to the enumeration member). There are four possible
formulations for an elastic tendon model. To use an explicit formulation
with the fiber length as the state, set this to
``MusculotendonFormulation.FIBER_LENGTH_EXPLICIT`` (or the integer value
``1``). To use an explicit formulation with the tendon force as the
state, set this to ``MusculotendonFormulation.TENDON_FORCE_EXPLICIT``
(or the integer value ``2``). To use an implicit formulation with the
fiber length as the state, set this to
``MusculotendonFormulation.FIBER_LENGTH_IMPLICIT`` (or the integer value
``3``). To use an implicit formulation with the tendon force as the
state, set this to ``MusculotendonFormulation.TENDON_FORCE_IMPLICIT``
(or the integer value ``4``). The default is
``MusculotendonFormulation.RIGID_TENDON``, which corresponds to a rigid
tendon formulation.
tendon_slack_length : Expr | None
The length of the tendon when the musculotendon is in its unloaded
state. In a rigid tendon model the tendon length is the tendon slack
length. In all musculotendon models, tendon slack length is used to
normalize tendon length to give
:math:`\tilde{l}^T = \frac{l^T}{l^T_{slack}}`.
peak_isometric_force : Expr | None
The maximum force that the muscle fiber can produce when it is
undergoing an isometric contraction (no lengthening velocity). In all
musculotendon models, peak isometric force is used to normalized tendon
and muscle fiber force to give
:math:`\tilde{F}^T = \frac{F^T}{F^M_{max}}`.
optimal_fiber_length : Expr | None
The muscle fiber length at which the muscle fibers produce no passive
force and their maximum active force. In all musculotendon models,
optimal fiber length is used to normalize muscle fiber length to give
:math:`\tilde{l}^M = \frac{l^M}{l^M_{opt}}`.
maximal_fiber_velocity : Expr | None
The fiber velocity at which, during muscle fiber shortening, the muscle
fibers are unable to produce any active force. In all musculotendon
models, maximal fiber velocity is used to normalize muscle fiber
extension velocity to give :math:`\tilde{v}^M = \frac{v^M}{v^M_{max}}`.
optimal_pennation_angle : Expr | None
The pennation angle when muscle fiber length equals the optimal fiber
length.
fiber_damping_coefficient : Expr | None
The coefficient of damping to be used in the damping element in the
muscle fiber model.
with_defaults : bool
Whether ``with_defaults`` alternate constructors should be used when
automatically constructing child classes. Default is ``False``.
References
==========
.. [1] De Groote, F., Kinney, A. L., Rao, A. V., & Fregly, B. J., Evaluation
of direct collocation optimal control problem formulations for
solving the muscle redundancy problem, Annals of biomedical
engineering, 44(10), (2016) pp. 2922-2936
"""
curves = CharacteristicCurveCollection( # type: ignore
tendon_force_length=TendonForceLengthDeGroote2016,
tendon_force_length_inverse=TendonForceLengthInverseDeGroote2016,
fiber_force_length_passive=FiberForceLengthPassiveDeGroote2016,
fiber_force_length_passive_inverse=FiberForceLengthPassiveInverseDeGroote2016,
fiber_force_length_active=FiberForceLengthActiveDeGroote2016,
fiber_force_velocity=FiberForceVelocityDeGroote2016,
fiber_force_velocity_inverse=FiberForceVelocityInverseDeGroote2016,
)
| MusculotendonDeGroote2016 |
python | ray-project__ray | python/ray/serve/schema.py | {
"start": 35962,
"end": 36633
} | class ____(ServeActorDetails, frozen=True):
"""Detailed info about a single deployment replica."""
replica_id: str = Field(description="Unique ID for the replica.")
state: ReplicaState = Field(description="Current state of the replica.")
pid: Optional[int] = Field(description="PID of the replica actor process.")
start_time_s: float = Field(
description=(
"The time at which the replica actor was started. If the controller dies, "
"this is the time at which the controller recovers and retrieves replica "
"state from the running replica actor."
)
)
@PublicAPI(stability="alpha")
| ReplicaDetails |
python | GoogleCloudPlatform__python-docs-samples | appengine/standard/ndb/entities/snippets.py | {
"start": 6218,
"end": 6760
} | class ____(ndb.Model):
name = ndb.StringProperty()
def _pre_put_hook(self):
_notify("Gee wiz I have a new friend!")
@classmethod
def _post_delete_hook(cls, key, future):
_notify("I have found occasion to rethink our friendship.")
def demonstrate_model_put_and_delete_hooks():
f = Friend()
f.name = "Carole King"
f.put() # _pre_put_hook is called
yield f
fut = f.key.delete_async() # _post_delete_hook not yet called
fut.get_result() # _post_delete_hook is called
yield f
| Friend |
python | wandb__wandb | wandb/sdk/launch/runner/vertex_runner.py | {
"start": 674,
"end": 2214
} | class ____(AbstractRun):
def __init__(self, job: Any) -> None:
self._job = job
@property
def id(self) -> str:
# numeric ID of the custom training job
return self._job.name # type: ignore
async def get_logs(self) -> Optional[str]:
# TODO: implement
return None
@property
def name(self) -> str:
return self._job.display_name # type: ignore
@property
def gcp_region(self) -> str:
return self._job.location # type: ignore
@property
def gcp_project(self) -> str:
return self._job.project # type: ignore
def get_page_link(self) -> str:
return f"{GCP_CONSOLE_URI}/vertex-ai/locations/{self.gcp_region}/training/{self.id}?project={self.gcp_project}"
async def wait(self) -> bool:
# TODO: run this in a separate thread.
await self._job.wait()
return (await self.get_status()).state == "finished"
async def get_status(self) -> Status:
job_state = str(self._job.state) # extract from type PipelineState
if job_state == "JobState.JOB_STATE_SUCCEEDED":
return Status("finished")
if job_state == "JobState.JOB_STATE_FAILED":
return Status("failed")
if job_state == "JobState.JOB_STATE_RUNNING":
return Status("running")
if job_state == "JobState.JOB_STATE_PENDING":
return Status("starting")
return Status("unknown")
async def cancel(self) -> None:
self._job.cancel()
| VertexSubmittedRun |
python | huggingface__transformers | src/transformers/models/hubert/modeling_hubert.py | {
"start": 21600,
"end": 24653
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.pos_conv_embed = HubertPositionalConvEmbedding(config)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layers = nn.ModuleList(
[HubertEncoderLayerStableLayerNorm(config) for _ in range(config.num_hidden_layers)]
)
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
attention_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if attention_mask is not None:
# make sure padded tokens output 0
expand_attention_mask = attention_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2])
hidden_states[~expand_attention_mask] = 0
attention_mask = create_bidirectional_mask(
config=self.config,
input_embeds=hidden_states,
attention_mask=attention_mask,
)
position_embeddings = self.pos_conv_embed(hidden_states)
hidden_states = hidden_states + position_embeddings
hidden_states = self.dropout(hidden_states)
synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self)
for layer in self.layers:
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
# add LayerDrop (see https://huggingface.co/papers/1909.11556 for description)
dropout_probability = torch.rand([])
skip_the_layer = self.training and dropout_probability < self.config.layerdrop
if not skip_the_layer or synced_gpus:
# under fsdp or deepspeed zero3 all gpus must run in sync
# XXX: could optimize this like synced_gpus in generate_utils but not sure if it's worth the code complication
layer_outputs = layer(
hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
)
hidden_states = layer_outputs[0]
if skip_the_layer:
layer_outputs = (None, None)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
@auto_docstring
| HubertEncoderStableLayerNorm |
python | huggingface__transformers | src/transformers/models/falcon/modeling_falcon.py | {
"start": 31521,
"end": 32608
} | class ____(PreTrainedModel):
config: FalconConfig
base_model_prefix = "transformer"
supports_gradient_checkpointing = True
_no_split_modules = ["FalconDecoderLayer"]
_supports_flash_attn = True
_supports_sdpa = True
_can_compile_fullgraph = True
@torch.no_grad()
def _init_weights(self, module: nn.Module):
"""Initialize the weights."""
super()._init_weights(module)
if isinstance(module, FalconLinear):
init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
init.zeros_(module.bias)
# Adapted from transformers.modeling_utils.PreTrainedModel._check_and_enable_sdpa
@classmethod
def _check_and_enable_sdpa(cls, config, hard_check_only: bool = False):
_is_bettertransformer = getattr(cls, "use_bettertransformer", False)
if _is_bettertransformer:
return config
if not hard_check_only:
config._attn_implementation = "sdpa"
return config
@auto_docstring
| FalconPreTrainedModel |
python | pypa__virtualenv | src/virtualenv/create/via_global_ref/builtin/builtin_way.py | {
"start": 153,
"end": 520
} | class ____(Creator, Describe, ABC):
"""A creator that does operations itself without delegation, if we can create it we can also describe it."""
def __init__(self, options, interpreter) -> None:
Creator.__init__(self, options, interpreter)
Describe.__init__(self, self.dest, interpreter)
__all__ = [
"VirtualenvBuiltin",
]
| VirtualenvBuiltin |
python | ray-project__ray | python/ray/_common/tests/test_signature.py | {
"start": 3799,
"end": 6485
} | class ____:
"""Tests for the extract_signature utility function."""
def test_function_without_ignore_first(self):
"""Test extracting signature from function without ignoring first parameter."""
def test_func(a, b=10, c=None):
return a + b
params = extract_signature(test_func, ignore_first=False)
assert len(params) == 3
assert params[0].name == "a"
assert params[1].name == "b"
assert params[1].default == 10
assert params[2].name == "c"
assert params[2].default is None
def test_method_with_ignore_first(self):
"""Test extracting signature from method ignoring 'self' parameter."""
class TestClass:
def test_method(self, a, b=20):
return a + b
params = extract_signature(TestClass.test_method, ignore_first=True)
assert len(params) == 2
assert params[0].name == "a"
assert params[1].name == "b"
assert params[1].default == 20
def test_function_with_ignore_first(self):
"""Test extracting signature from regular function with ignore_first=True."""
def test_func(x, y, z=30):
return x + y + z
params = extract_signature(test_func, ignore_first=True)
assert len(params) == 2
assert params[0].name == "y"
assert params[1].name == "z"
assert params[1].default == 30
def test_empty_parameters_with_ignore_first(self):
"""Test error handling when method has no parameters but ignore_first=True."""
def test_func():
return "hello"
with pytest.raises(ValueError, match="Methods must take a 'self' argument"):
extract_signature(test_func, ignore_first=True)
def test_single_parameter_with_ignore_first(self):
"""Test extracting signature from method with only 'self' parameter."""
class TestClass:
def test_method(self):
return "hello"
params = extract_signature(TestClass.test_method, ignore_first=True)
assert len(params) == 0
def test_varargs_and_kwargs(self):
"""Test extracting signature with *args and **kwargs."""
def test_func(a, b=10, *args, **kwargs):
return a + b
params = extract_signature(test_func, ignore_first=False)
assert len(params) == 4
assert params[0].name == "a"
assert params[1].name == "b"
assert params[2].name == "args"
assert params[2].kind == inspect.Parameter.VAR_POSITIONAL
assert params[3].name == "kwargs"
assert params[3].kind == inspect.Parameter.VAR_KEYWORD
| TestExtractSignature |
python | huggingface__transformers | tests/models/dpr/test_tokenization_dpr.py | {
"start": 1321,
"end": 1637
} | class ____(test_tokenization_bert.BertTokenizationTest):
tokenizer_class = DPRQuestionEncoderTokenizer
rust_tokenizer_class = DPRQuestionEncoderTokenizerFast
test_rust_tokenizer = True
from_pretrained_id = "facebook/dpr-ctx_encoder-single-nq-base"
@require_tokenizers
| DPRQuestionEncoderTokenizationTest |
python | pypa__pip | tests/unit/test_wheel.py | {
"start": 5757,
"end": 6055
} | class ____:
def test_unpack_wheel_no_flatten(self, tmpdir: Path) -> None:
filepath = os.path.join(DATA_DIR, "packages", "meta-1.0-py2.py3-none-any.whl")
unpack_file(filepath, os.fspath(tmpdir))
assert os.path.isdir(os.path.join(tmpdir, "meta-1.0.dist-info"))
| TestWheelFile |
python | doocs__leetcode | solution/0600-0699/0648.Replace Words/Solution.py | {
"start": 683,
"end": 1041
} | class ____:
def replaceWords(self, dictionary: List[str], sentence: str) -> str:
trie = Trie()
for i, w in enumerate(dictionary):
trie.insert(w, i)
ans = []
for w in sentence.split():
idx = trie.search(w)
ans.append(dictionary[idx] if idx != -1 else w)
return " ".join(ans)
| Solution |
python | apache__airflow | providers/microsoft/psrp/src/airflow/providers/microsoft/psrp/operators/psrp.py | {
"start": 1381,
"end": 7358
} | class ____(BaseOperator):
"""
PowerShell Remoting Protocol operator.
Use one of the 'command', 'cmdlet', or 'powershell' arguments.
The 'securestring' template filter can be used to tag a value for
serialization into a `System.Security.SecureString` (applicable only
for DAGs which have `render_template_as_native_obj=True`).
When using the `cmdlet` or `powershell` arguments and when `do_xcom_push`
is enabled, the command output is converted to JSON by PowerShell using
the `ConvertTo-Json
<https://docs.microsoft.com/en-us/powershell/
module/microsoft.powershell.utility/convertto-json>`__ cmdlet such
that the operator return value is serializable to an XCom value.
:param psrp_conn_id: connection id
:param command: command to execute on remote host. (templated)
:param powershell: powershell to execute on remote host. (templated)
:param cmdlet:
cmdlet to execute on remote host (templated). Also used as the default
value for `task_id`.
:param arguments:
When using the `cmdlet` or `powershell` option, use `arguments` to
provide arguments (templated).
:param parameters:
When using the `cmdlet` or `powershell` option, use `parameters` to
provide parameters (templated). Note that a parameter with a value of `None`
becomes an *argument* (i.e., switch).
:param logging_level:
Logging level for message streams which are received during remote execution.
The default is to include all messages in the task log.
:param runspace_options:
optional dictionary which is passed when creating the runspace pool. See
:py:class:`~pypsrp.powershell.RunspacePool` for a description of the
available options.
:param wsman_options:
optional dictionary which is passed when creating the `WSMan` client. See
:py:class:`~pypsrp.wsman.WSMan` for a description of the available options.
:param psrp_session_init:
Optional command which will be added to the pipeline when a new PowerShell
session has been established, prior to invoking the action specified using
the `cmdlet`, `command`, or `powershell` parameters.
"""
template_fields: Sequence[str] = (
"cmdlet",
"command",
"arguments",
"parameters",
"powershell",
)
template_fields_renderers = {"command": "powershell", "powershell": "powershell"}
ui_color = "#c2e2ff"
def __init__(
self,
*,
psrp_conn_id: str,
command: str | None = None,
powershell: str | None = None,
cmdlet: str | None = None,
arguments: list[str] | None = None,
parameters: dict[str, str] | None = None,
logging_level: int = DEBUG,
runspace_options: dict[str, Any] | None = None,
wsman_options: dict[str, Any] | None = None,
psrp_session_init: Command | None = None,
**kwargs,
) -> None:
args = {command, powershell, cmdlet}
if not exactly_one(*args):
raise ValueError("Must provide exactly one of 'command', 'powershell', or 'cmdlet'")
if arguments and not (powershell or cmdlet):
raise ValueError("Arguments only allowed with 'powershell' or 'cmdlet'")
if parameters and not (powershell or cmdlet):
raise ValueError("Parameters only allowed with 'powershell' or 'cmdlet'")
if cmdlet:
kwargs.setdefault("task_id", cmdlet)
super().__init__(**kwargs)
self.conn_id = psrp_conn_id
self.command = command
self.powershell = powershell
self.cmdlet = cmdlet
self.arguments = arguments
self.parameters = parameters
self.logging_level = logging_level
self.runspace_options = runspace_options
self.wsman_options = wsman_options
self.psrp_session_init = psrp_session_init
def execute(self, context: Context) -> list[Any] | None:
with (
PsrpHook(
self.conn_id,
logging_level=self.logging_level,
runspace_options=self.runspace_options,
wsman_options=self.wsman_options,
on_output_callback=self.log.info if not self.do_xcom_push else None,
) as hook,
hook.invoke() as ps,
):
if self.psrp_session_init is not None:
ps.add_command(self.psrp_session_init)
if self.command:
ps.add_script(f"cmd.exe /c @'\n{self.command}\n'@")
else:
if self.cmdlet:
ps.add_cmdlet(self.cmdlet)
else:
ps.add_script(self.powershell)
for argument in self.arguments or ():
ps.add_argument(argument)
if self.parameters:
ps.add_parameters(self.parameters)
if self.do_xcom_push:
ps.add_cmdlet("ConvertTo-Json")
if ps.had_errors:
raise AirflowException("Process failed")
rc = ps.runspace_pool.host.rc
if rc:
raise AirflowException(f"Process exited with non-zero status code: {rc}")
if not self.do_xcom_push:
return None
return [json.loads(output) for output in ps.output]
def get_template_env(self):
# Create a template environment overlay in order to leave the underlying
# environment unchanged.
env = super().get_template_env().overlay()
native = isinstance(env, NativeEnvironment)
def securestring(value: str):
if not native:
raise AirflowException(
"Filter 'securestring' not applicable to non-native templating environment"
)
return TaggedValue("SS", value)
env.filters["securestring"] = securestring
return env
| PsrpOperator |
python | graphql-python__graphene | graphene/tests/issues/test_425.py | {
"start": 276,
"end": 341
} | class ____(ObjectTypeOptions):
other_attr = None
| SpecialOptions |
python | google__jax | docs/autodidax2_part1.py | {
"start": 17331,
"end": 19695
} | class ____(Interpreter):
def __init__(self):
self.equations = [] # A mutable list of all the ops we've seen so far
self.name_counter = 0 # Counter for generating unique names
def fresh_var(self):
self.name_counter += 1
return "v_" + str(self.name_counter)
def interpret_op(self, op, args):
binder = self.fresh_var()
self.equations.append(Equation(binder, op, args))
return binder
def build_jaxpr(f, num_args):
interpreter = StagingInterpreter()
parameters = tuple(interpreter.fresh_var() for _ in range(num_args))
with set_interpreter(interpreter):
result = f(*parameters)
return Jaxpr(parameters, interpreter.equations, result)
# -
# Now we can construct an IR for a Python program and print it out:
print(build_jaxpr(foo, 1))
# We can also evaluate our IR by writing an explicit interpreter that traverses
# the operations one by one:
# +
def eval_jaxpr(jaxpr, args):
# An environment mapping variables to values
env = dict(zip(jaxpr.parameters, args))
def eval_atom(x): return env[x] if isinstance(x, Var) else x
for eqn in jaxpr.equations:
args = tuple(eval_atom(x) for x in eqn.args)
env[eqn.var] = current_interpreter.interpret_op(eqn.op, args)
return eval_atom(jaxpr.return_val)
print(eval_jaxpr(build_jaxpr(foo, 1), (2.0,)))
# -
# We've written this interpreter in terms of `current_interpreter.interpret_op`
# which means we've done a full round-trip: interpretable Python program to IR
# to interpretable Python program. Since the result is "interpretable" we can
# differentiate it again, or stage it out or anything we like:
print(jvp(lambda x: eval_jaxpr(build_jaxpr(foo, 1), (x,)), 2.0, 1.0))
# ## Up next...
# That's it for part one of this tutorial. We've done two primitives, three
# interpreters and the tracing mechanism that weaves them together. In the next
# part we'll add types other than floats, error handling, compilation,
# reverse-mode AD and higher-order primitives. Note that the second part is
# structured differently. Rather than trying to have a top-to-bottom order that
# obeys both code dependencies (e.g. data structures need to be defined before
# they're used) and pedagogical dependencies (concepts need to be introduced
# before they're implemented) we're going with a single file that can be approached
# in any order.
| StagingInterpreter |
python | kamyu104__LeetCode-Solutions | Python/intersection-of-multiple-arrays.py | {
"start": 1059,
"end": 1363
} | class ____(object):
def intersection(self, nums):
"""
:type nums: List[List[int]]
:rtype: List[int]
"""
result = set(nums[0])
for i in xrange(1, len(nums)):
result = set(x for x in nums[i] if x in result)
return sorted(result)
| Solution3 |
python | PrefectHQ__prefect | src/prefect/events/actions.py | {
"start": 8591,
"end": 8728
} | class ____(AutomationAction):
"""Resumes a Work Queue"""
type: Literal["resume-automation"] = "resume-automation"
| ResumeAutomation |
python | huggingface__transformers | src/transformers/models/reformer/modeling_reformer.py | {
"start": 73785,
"end": 75860
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dropout = config.hidden_dropout_prob
self.layers = nn.ModuleList([ReformerLayer(config, i) for i in range(config.num_hidden_layers)])
# Reformer is using Rev Nets, thus last layer outputs are concatenated and
# Layer Norm is done over 2 * hidden_size
self.layer_norm = nn.LayerNorm(2 * config.hidden_size, eps=config.layer_norm_eps)
def forward(
self,
hidden_states,
attention_mask=None,
num_hashes=None,
past_buckets_states=None,
use_cache=False,
orig_sequence_length=None,
output_hidden_states=False,
output_attentions=False,
):
# hidden_states and attention lists to be filled if wished
all_hidden_states = []
all_attentions = []
# init cached hidden states if necessary
if use_cache and past_buckets_states is None:
past_buckets_states = ReformerDynamicCache()
# concat same tensor for reversible ResNet
hidden_states = torch.cat([hidden_states, hidden_states], dim=-1)
hidden_states = _ReversibleFunction.apply(
hidden_states,
self.layers,
attention_mask,
num_hashes,
all_hidden_states,
all_attentions,
past_buckets_states,
use_cache,
orig_sequence_length,
output_hidden_states,
output_attentions,
)
# Apply layer norm to concatenated hidden states
hidden_states = self.layer_norm(hidden_states)
# Apply dropout
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
next_cache = past_buckets_states if use_cache else None
return ReformerEncoderOutput(
hidden_states=hidden_states,
all_hidden_states=all_hidden_states,
all_attentions=all_attentions,
past_buckets_states=next_cache,
)
| ReformerEncoder |
python | pandas-dev__pandas | pandas/io/pytables.py | {
"start": 108929,
"end": 110416
} | class ____(GenericFixed):
pandas_kind = "series"
attributes = ["name"]
name: Hashable
@property
def shape(self) -> tuple[int] | None:
try:
return (len(self.group.values),)
except (TypeError, AttributeError):
return None
def read(
self,
where=None,
columns=None,
start: int | None = None,
stop: int | None = None,
) -> Series:
self.validate_read(columns, where)
index = self.read_index("index", start=start, stop=stop)
values = self.read_array("values", start=start, stop=stop)
try:
result = Series(values, index=index, name=self.name, copy=False)
except UnicodeEncodeError as err:
if (
self.errors == "surrogatepass"
and using_string_dtype()
and str(err).endswith("surrogates not allowed")
and HAS_PYARROW
):
result = Series(
values,
index=index,
name=self.name,
copy=False,
dtype=StringDtype(storage="python", na_value=np.nan),
)
else:
raise
return result
def write(self, obj, **kwargs) -> None:
super().write(obj, **kwargs)
self.write_index("index", obj.index)
self.write_array("values", obj)
self.attrs.name = obj.name
| SeriesFixed |
python | django__django | tests/serializers/test_natural.py | {
"start": 289,
"end": 9431
} | class ____(TestCase):
pass
def natural_key_serializer_test(self, format):
# Create all the objects defined in the test data
with connection.constraint_checks_disabled():
objects = [
NaturalKeyAnchor.objects.create(id=1100, data="Natural Key Anghor"),
FKDataNaturalKey.objects.create(id=1101, data_id=1100),
FKDataNaturalKey.objects.create(id=1102, data_id=None),
]
# Serialize the test database
serialized_data = serializers.serialize(
format, objects, indent=2, use_natural_foreign_keys=True
)
for obj in serializers.deserialize(format, serialized_data):
obj.save()
# Assert that the deserialized data is the same
# as the original source
for obj in objects:
instance = obj.__class__.objects.get(id=obj.pk)
self.assertEqual(
obj.data,
instance.data,
"Objects with PK=%d not equal; expected '%s' (%s), got '%s' (%s)"
% (
obj.pk,
obj.data,
type(obj.data),
instance,
type(instance.data),
),
)
def natural_key_test(self, format):
book1 = {
"data": "978-1590597255",
"title": "The Definitive Guide to Django: Web Development Done Right",
}
book2 = {"data": "978-1590599969", "title": "Practical Django Projects"}
# Create the books.
adrian = NaturalKeyAnchor.objects.create(**book1)
james = NaturalKeyAnchor.objects.create(**book2)
# Serialize the books.
string_data = serializers.serialize(
format,
NaturalKeyAnchor.objects.all(),
indent=2,
use_natural_foreign_keys=True,
use_natural_primary_keys=True,
)
# Delete one book (to prove that the natural key generation will only
# restore the primary keys of books found in the database via the
# get_natural_key manager method).
james.delete()
# Deserialize and test.
books = list(serializers.deserialize(format, string_data))
self.assertCountEqual(
[(book.object.title, book.object.pk) for book in books],
[
(book1["title"], adrian.pk),
(book2["title"], None),
],
)
def natural_pk_mti_test(self, format):
"""
If serializing objects in a multi-table inheritance relationship using
natural primary keys, the natural foreign key for the parent is output in
the fields of the child so it's possible to relate the child to the parent
when deserializing.
"""
child_1 = Child.objects.create(parent_data="1", child_data="1")
child_2 = Child.objects.create(parent_data="2", child_data="2")
string_data = serializers.serialize(
format,
[child_1.parent_ptr, child_2.parent_ptr, child_2, child_1],
use_natural_foreign_keys=True,
use_natural_primary_keys=True,
)
child_1.delete()
child_2.delete()
for obj in serializers.deserialize(format, string_data):
obj.save()
children = Child.objects.all()
self.assertEqual(len(children), 2)
for child in children:
# If it's possible to find the superclass from the subclass and it's
# the correct superclass, it's working.
self.assertEqual(child.child_data, child.parent_data)
def forward_ref_fk_test(self, format):
t1 = NaturalKeyThing.objects.create(key="t1")
t2 = NaturalKeyThing.objects.create(key="t2", other_thing=t1)
t1.other_thing = t2
t1.save()
string_data = serializers.serialize(
format,
[t1, t2],
use_natural_primary_keys=True,
use_natural_foreign_keys=True,
)
NaturalKeyThing.objects.all().delete()
objs_with_deferred_fields = []
for obj in serializers.deserialize(
format, string_data, handle_forward_references=True
):
obj.save()
if obj.deferred_fields:
objs_with_deferred_fields.append(obj)
for obj in objs_with_deferred_fields:
obj.save_deferred_fields()
t1 = NaturalKeyThing.objects.get(key="t1")
t2 = NaturalKeyThing.objects.get(key="t2")
self.assertEqual(t1.other_thing, t2)
self.assertEqual(t2.other_thing, t1)
def forward_ref_fk_with_error_test(self, format):
t1 = NaturalKeyThing.objects.create(key="t1")
t2 = NaturalKeyThing.objects.create(key="t2", other_thing=t1)
t1.other_thing = t2
t1.save()
string_data = serializers.serialize(
format,
[t1],
use_natural_primary_keys=True,
use_natural_foreign_keys=True,
)
NaturalKeyThing.objects.all().delete()
objs_with_deferred_fields = []
for obj in serializers.deserialize(
format, string_data, handle_forward_references=True
):
obj.save()
if obj.deferred_fields:
objs_with_deferred_fields.append(obj)
obj = objs_with_deferred_fields[0]
msg = "NaturalKeyThing matching query does not exist"
with self.assertRaisesMessage(serializers.base.DeserializationError, msg):
obj.save_deferred_fields()
def forward_ref_m2m_test(self, format):
t1 = NaturalKeyThing.objects.create(key="t1")
t2 = NaturalKeyThing.objects.create(key="t2")
t3 = NaturalKeyThing.objects.create(key="t3")
t1.other_things.set([t2, t3])
string_data = serializers.serialize(
format,
[t1, t2, t3],
use_natural_primary_keys=True,
use_natural_foreign_keys=True,
)
NaturalKeyThing.objects.all().delete()
objs_with_deferred_fields = []
for obj in serializers.deserialize(
format, string_data, handle_forward_references=True
):
obj.save()
if obj.deferred_fields:
objs_with_deferred_fields.append(obj)
for obj in objs_with_deferred_fields:
obj.save_deferred_fields()
t1 = NaturalKeyThing.objects.get(key="t1")
t2 = NaturalKeyThing.objects.get(key="t2")
t3 = NaturalKeyThing.objects.get(key="t3")
self.assertCountEqual(t1.other_things.all(), [t2, t3])
def forward_ref_m2m_with_error_test(self, format):
t1 = NaturalKeyThing.objects.create(key="t1")
t2 = NaturalKeyThing.objects.create(key="t2")
t3 = NaturalKeyThing.objects.create(key="t3")
t1.other_things.set([t2, t3])
t1.save()
string_data = serializers.serialize(
format,
[t1, t2],
use_natural_primary_keys=True,
use_natural_foreign_keys=True,
)
NaturalKeyThing.objects.all().delete()
objs_with_deferred_fields = []
for obj in serializers.deserialize(
format, string_data, handle_forward_references=True
):
obj.save()
if obj.deferred_fields:
objs_with_deferred_fields.append(obj)
obj = objs_with_deferred_fields[0]
msg = "NaturalKeyThing matching query does not exist"
with self.assertRaisesMessage(serializers.base.DeserializationError, msg):
obj.save_deferred_fields()
def pk_with_default(self, format):
"""
The deserializer works with natural keys when the primary key has a default
value.
"""
obj = NaturalPKWithDefault.objects.create(name="name")
string_data = serializers.serialize(
format,
NaturalPKWithDefault.objects.all(),
use_natural_foreign_keys=True,
use_natural_primary_keys=True,
)
objs = list(serializers.deserialize(format, string_data))
self.assertEqual(len(objs), 1)
self.assertEqual(objs[0].object.pk, obj.pk)
def fk_as_pk_natural_key_not_called(self, format):
"""
The deserializer doesn't rely on natural keys when a model has a custom
primary key that is a ForeignKey.
"""
o1 = NaturalKeyAnchor.objects.create(data="978-1590599969")
o2 = FKAsPKNoNaturalKey.objects.create(pk_fk=o1)
serialized_data = serializers.serialize(format, [o1, o2])
deserialized_objects = list(serializers.deserialize(format, serialized_data))
self.assertEqual(len(deserialized_objects), 2)
for obj in deserialized_objects:
self.assertEqual(obj.object.pk, o1.pk)
# Dynamically register tests for each serializer
register_tests(
NaturalKeySerializerTests,
"test_%s_natural_key_serializer",
natural_key_serializer_test,
)
register_tests(
NaturalKeySerializerTests, "test_%s_serializer_natural_keys", natural_key_test
)
register_tests(
NaturalKeySerializerTests, "test_%s_serializer_natural_pks_mti", natural_pk_mti_test
)
register_tests(
NaturalKeySerializerTests, "test_%s_forward_references_fks", forward_ref_fk_test
)
register_tests(
NaturalKeySerializerTests,
"test_%s_forward_references_fk_errors",
forward_ref_fk_with_error_test,
)
register_tests(
NaturalKeySerializerTests, "test_%s_forward_references_m2ms", forward_ref_m2m_test
)
register_tests(
NaturalKeySerializerTests,
"test_%s_forward_references_m2m_errors",
forward_ref_m2m_with_error_test,
)
register_tests(NaturalKeySerializerTests, "test_%s_pk_with_default", pk_with_default)
register_tests(
NaturalKeySerializerTests,
"test_%s_fk_as_pk_natural_key_not_called",
fk_as_pk_natural_key_not_called,
)
| NaturalKeySerializerTests |
python | tensorflow__tensorflow | tensorflow/python/checkpoint/sharding/sharding_policies.py | {
"start": 2625,
"end": 14993
} | class ____(sharding_util.ShardingCallback):
"""Policy that splits tensors into shards with a max shard size.
Shards may exceed the max shard size if they contain 1. a single scalar/string
tensor that could not be sliced and exceeds the max shard size or 2. the
checkpoint object graph, whose size cannot be calculated when saving.
"""
class MaxShardSizePartitioner():
"""Partition tensors into shards with a max shard size."""
max_shard_size: int
_large_scalars: MutableSequence[sharding_util.Shard]
_tensors_by_shard: MutableSequence[sharding_util.Shard]
_shard_size_remaining: int
_checkpoint_key: str
_dtype: dtypes.DType
_device: device_lib.DeviceSpec
_root_tensor: tensor_lib.Tensor
_slice_spec: variables.Variable.SaveSliceInfo
_full_shape: tensor_shape.TensorShape
_root_shape: tensor_shape.TensorShape
_root_offset: Sequence[int]
_dtype_size: int
_working_tensor_offset: MutableSequence[float]
_working_tensor_shape: tensor_shape.TensorShape
def _get_next_partition(self) -> tuple[int, float]:
"""Gets tensor partition with size closest to shard_size_remaining.
Returns:
A tuple containing the axis and size of the next partition.
"""
rank = self._working_tensor_shape.rank
if rank is None or rank == 0:
return 0, math.inf
num_elems = self._working_tensor_shape.num_elements()
def num_partitions(axis: int) -> float:
axis_len = self._working_tensor_shape.dims[axis].value
slice_elems = num_elems // axis_len
bytes_per_slice = slice_elems * self._dtype_size
slices_per_shard = self._shard_size_remaining // bytes_per_slice
if slices_per_shard == 0:
return math.inf
return math.ceil(axis_len / slices_per_shard)
# Find axis with minimum partitions. (axis with maximum partition size)
# (max partition size is as close as possible to the shard_size_remaining)
min_parts = num_partitions(0)
min_axis = 0
for axis in range(1, rank):
parts_along_axis = num_partitions(axis)
part_size = num_elems * self._dtype_size / parts_along_axis
if (parts_along_axis < min_parts and
part_size <= self._shard_size_remaining):
min_axis, min_parts = axis, int(parts_along_axis)
return (min_axis,
math.ceil(int(self._working_tensor_shape[min_axis]) / min_parts))
def _add_partition(self, part_axis: int, part_size: float):
"""Adds the tensor partition to the shard, if possible.
Args:
part_axis: The axis of the partition.
part_size: The size of the partition.
Raises:
RuntimeError: When the slice size is larger than the remaining shard
size.
"""
# Add what we can to the current shard.
relative_offset = list(
map(operator.sub, self._working_tensor_offset, self._root_offset))
slice_shape = list(map(operator.sub, self._root_shape, relative_offset))
slice_shape[part_axis] = part_size
slice_size_in_bytes = int(math.prod(slice_shape)) * self._dtype_size
with ops.device(self._device):
tensor_slice = array_ops.slice(
self._root_tensor, begin=relative_offset, size=slice_shape)
slice_spec = variables.Variable.SaveSliceInfo(
full_name=self._checkpoint_key,
full_shape=self._full_shape,
var_offset=self._working_tensor_offset,
var_shape=slice_shape).spec.strip()
if slice_size_in_bytes > self.max_shard_size:
logging.warning("Tensor %s's minimum slice %s has size %s bytes and "
"cannot be partitioned into a shard of max shard size "
"%s bytes. It will be added as an individual shard "
"that exceeds the max shard size.",
self._checkpoint_key, slice_spec, slice_size_in_bytes,
self.max_shard_size)
self._large_scalars.append(
{self._checkpoint_key: {slice_spec: tensor_slice}})
elif slice_size_in_bytes > self._shard_size_remaining:
raise RuntimeError(
f"Slice size ({slice_size_in_bytes} bytes) is larger than the "
f"remaining shard size ({self._shard_size_remaining} bytes). This "
"should have been caught in MaxShardSizePolicy._add_partition().")
else:
(self._tensors_by_shard[-1]
.setdefault(self._checkpoint_key, {})[slice_spec]) = tensor_slice
self._shard_size_remaining -= slice_size_in_bytes
if self._shard_size_remaining == 0:
self._tensors_by_shard.append({})
self._shard_size_remaining = self.max_shard_size
# Get remaining portion of tensor to add to the next shard(s).
self._working_tensor_offset[part_axis] += part_size
relative_offset[part_axis] += part_size
self._working_tensor_shape = tensor_shape.TensorShape(list(
map(operator.sub, self._root_shape, relative_offset)))
def get_shards(
self,
max_shard_size: int,
shardable_tensors: Sequence[sharding_util.ShardableTensor]
) -> Sequence[sharding_util.Shard]:
"""Callback to split tensors into shards with a max shard size.
Args:
max_shard_size: The maximum size of a shard file in bytes.
shardable_tensors: A list of ShardableTensors.
Returns:
List of shard dicts containing tensors.
[ {checkpoint key: {slice_spec: tensor} } ]
"""
self.max_shard_size = max_shard_size
self._tensors_by_shard = [{}]
self._large_scalars = []
string_size_warning_printed = False
self._shard_size_remaining = self.max_shard_size
for shardable_tensor in shardable_tensors:
self._checkpoint_key = shardable_tensor.checkpoint_key
self._dtype = shardable_tensor.dtype
self._device = shardable_tensor.device
self._root_tensor = shardable_tensor.tensor
self._slice_spec = shardable_tensor.slice_spec
# If the tensor has already been sliced, make sure to keep track of its
# parent tensor's shape & offset. These will be used when creating slice
# specs later.
if self._slice_spec:
save_slice_info = variables.Variable.SaveSliceInfo.from_spec(
self._slice_spec)
self._full_shape = tensor_shape.TensorShape(
save_slice_info.full_shape)
self._root_shape = tensor_shape.TensorShape(save_slice_info.var_shape)
self._root_offset = save_slice_info.var_offset
else:
self._full_shape = self._root_shape = shardable_tensor.shape
self._root_offset = [0] * self._root_shape.rank
self._dtype_size = dtypes.as_dtype(self._dtype).size
total_size = self._root_shape.num_elements() * self._dtype_size # bytes
# Calculate string tensor sizes.
if self._checkpoint_key == base.OBJECT_GRAPH_PROTO_KEY:
# In graph mode, the object graph is populated using feed_additions
# when the session is run. So, we can't calculate the size here.
# Fortunately, the serialized object graph string will never be that
# big, so we just place it in the current shard without worrying about
# its size.
total_size = self._dtype_size = 0
elif self._dtype == dtypes.variant:
# Can't determine a variant's type, so can't calculate its size.
total_size = self._dtype_size = 0
elif (self._dtype == dtypes.string
and not context.executing_eagerly()
and ops.get_default_session() is None):
# TODO(b/326287351): Get string tensor size in tf.function.
total_size = self._dtype_size = 0
if not string_size_warning_printed:
logging.warning("The checkpoint sharding policy is being executed "
"in a tf.function. The size of the string/variant "
"constant cannot be obtained.")
string_size_warning_printed = True
elif self._dtype == dtypes.string:
with ops.device(self._device):
if not context.executing_eagerly():
self._root_tensor = ops.get_default_session().run(
self._root_tensor)
if self._root_shape.rank is None or self._root_shape.rank == 0:
sizes = [string_ops.string_length(self._root_tensor,
unit="BYTE")]
else:
sizes = [string_ops.string_length(elem, unit="BYTE")
for elem in self._root_tensor]
if context.executing_eagerly():
sizes = [size.numpy() for size in sizes]
else:
sizes = ops.get_default_session().run(sizes)
total_size = sum(sizes)
self._dtype_size = max(sizes)
if self._root_shape.rank is None or self._root_shape.rank == 0:
if total_size > self.max_shard_size:
logging.warning(
"Tensor %s is a %s scalar of size %s bytes and cannot be "
"partitioned into a shard of max shard size %s bytes. It will "
"be added as an individual shard that exceeds the max shard "
"size.", self._checkpoint_key, self._dtype, total_size,
self.max_shard_size)
self._large_scalars.append(
{self._checkpoint_key: {self._slice_spec: self._root_tensor}})
else:
if total_size > self._shard_size_remaining:
self._tensors_by_shard.append({})
self._shard_size_remaining = self.max_shard_size
(self._tensors_by_shard[-1]
.setdefault(self._checkpoint_key, {})
[self._slice_spec]) = self._root_tensor
self._shard_size_remaining -= total_size
continue
# Partition tensor and add partitions to shards.
self._working_tensor_offset = self._root_offset[:]
self._working_tensor_shape = self._root_shape
working_tensor_size = total_size
while working_tensor_size > self._shard_size_remaining:
(part_axis, part_size) = self._get_next_partition()
if part_size == 0:
# Tensor partition couldn't fit in remaining shard space. Try again
# with the next full shard.
self._tensors_by_shard.append({})
self._shard_size_remaining = self.max_shard_size
else:
self._add_partition(part_axis, part_size)
working_tensor_size = (
int(math.prod(self._working_tensor_shape)) * self._dtype_size)
if self._working_tensor_shape.num_elements() > 0:
if self._working_tensor_offset and self._working_tensor_shape:
with ops.device(self._device):
working_tensor = array_ops.slice(
self._root_tensor,
begin=list(map(
operator.sub,
self._working_tensor_offset, self._root_offset)),
size=self._working_tensor_shape.as_list())
else:
working_tensor = self._root_tensor
remaining_tensor_slice_spec = variables.Variable.SaveSliceInfo(
full_name=self._checkpoint_key,
full_shape=self._full_shape,
var_offset=self._working_tensor_offset,
var_shape=self._working_tensor_shape).spec.strip()
(self._tensors_by_shard[-1]
.setdefault(self._checkpoint_key, {})
[remaining_tensor_slice_spec]) = working_tensor
self._shard_size_remaining -= working_tensor_size
shards = []
if self._tensors_by_shard[0]:
shards.extend(self._tensors_by_shard)
shards.extend(self._large_scalars)
return shards
def __init__(self, max_shard_size: int):
self.max_shard_size = max_shard_size
@property
def description(self) -> str:
return "Split tensors into shards with a max shard size."
def __call__(
self, shardable_tensors: Sequence[sharding_util.ShardableTensor]
) -> Sequence[sharding_util.Shard]:
return self.MaxShardSizePartitioner().get_shards(
self.max_shard_size, shardable_tensors)
| MaxShardSizePolicy |
python | davidhalter__jedi | test/test_api/test_classes.py | {
"start": 19108,
"end": 20142
} | class ____:
"""my class"""
@staticmethod
def hello():
func_var = 1
return func_var
'''
@pytest.mark.parametrize(
'code, pos, start, end', [
('def a_func():\n return "bar"\n', (1, 4), (1, 0), (2, 16)),
('var1 = 12', (1, 0), (1, 0), (1, 9)),
('var1 + 1', (1, 0), (1, 0), (1, 4)),
('class AClass: pass', (1, 6), (1, 0), (1, 18)),
('class AClass: pass\n', (1, 6), (1, 0), (1, 18)),
(cls_code, (1, 6), (1, 0), (6, 23)),
(cls_code, (4, 8), (4, 4), (6, 23)),
(cls_code, (5, 8), (5, 8), (5, 20)),
]
)
def test_definition_start_end_position(Script, code, pos, start, end):
'''Tests for definition_start_position and definition_end_position'''
name = next(
n for n in Script(code=code).get_names(all_scopes=True, references=True)
if n._name.tree_name.start_pos <= pos <= n._name.tree_name.end_pos
)
assert name.get_definition_start_position() == start
assert name.get_definition_end_position() == end
| AClass |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-github/llama_index/readers/github/repository/event.py | {
"start": 460,
"end": 782
} | class ____(BaseEvent):
"""Event dispatched when GitHub repository processing completes."""
repository_name: str
branch_or_commit: str
total_documents: int = 0
@classmethod
def class_name(cls) -> str:
return "GitHubRepositoryProcessingCompletedEvent"
| GitHubRepositoryProcessingCompletedEvent |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_config.py | {
"start": 11515,
"end": 18011
} | class ____(TestConfigEndpoint):
@pytest.mark.parametrize(
("section", "option", "headers", "expected_status_code", "expected_response"),
[
(
SECTION_CORE,
OPTION_KEY_PARALLELISM,
HEADERS_JSON,
200,
GET_CONFIG_VALUE_CORE_PARALLELISM_JSON_RESPONSE,
),
(
SECTION_CORE,
OPTION_KEY_PARALLELISM,
HEADERS_JSON_UTF8,
200,
GET_CONFIG_VALUE_CORE_PARALLELISM_JSON_RESPONSE,
),
(
SECTION_CORE,
OPTION_KEY_PARALLELISM,
HEADERS_ANY,
200,
GET_CONFIG_VALUE_CORE_PARALLELISM_JSON_RESPONSE,
),
(
SECTION_CORE,
OPTION_KEY_PARALLELISM,
HEADERS_NONE,
200,
GET_CONFIG_VALUE_CORE_PARALLELISM_JSON_RESPONSE,
),
(
SECTION_SMTP,
OPTION_KEY_SMTP_HOST,
HEADERS_TEXT,
200,
textwrap.dedent(
f"""\
[{SECTION_SMTP}]
{OPTION_KEY_SMTP_HOST} = {OPTION_VALUE_SMTP_HOST}
"""
),
),
(
SECTION_SMTP,
OPTION_KEY_SMTP_MAIL_FROM,
HEADERS_JSON,
200,
{
"sections": [
{
"name": SECTION_SMTP,
"options": [
{"key": OPTION_KEY_SMTP_MAIL_FROM, "value": OPTION_VALUE_SMTP_MAIL_FROM},
],
},
],
},
),
(
SECTION_DATABASE,
OPTION_KEY_SQL_ALCHEMY_CONN,
HEADERS_JSON,
200,
{
"sections": [
{
"name": SECTION_DATABASE,
"options": [
{"key": OPTION_KEY_SQL_ALCHEMY_CONN, "value": OPTION_VALUE_SENSITIVE_HIDDEN},
],
},
],
},
),
(
SECTION_DATABASE,
OPTION_KEY_SQL_ALCHEMY_CONN,
HEADERS_TEXT,
200,
textwrap.dedent(
f"""\
[{SECTION_DATABASE}]
{OPTION_KEY_SQL_ALCHEMY_CONN} = {OPTION_VALUE_SENSITIVE_HIDDEN}
"""
),
),
(SECTION_CORE, OPTION_KEY_PARALLELISM, HEADERS_JSON, 403, FORBIDDEN_RESPONSE),
(
SECTION_NOT_EXIST,
OPTION_KEY_PARALLELISM,
HEADERS_JSON,
404,
{"detail": f"Option [{SECTION_NOT_EXIST}/{OPTION_KEY_PARALLELISM}] not found."},
),
(
SECTION_CORE,
OPTION_NOT_EXIST,
HEADERS_JSON,
404,
{"detail": f"Option [{SECTION_CORE}/{OPTION_NOT_EXIST}] not found."},
),
],
)
def test_get_config_value(
self, test_client, section, option, headers, expected_status_code, expected_response
):
if expected_status_code == 403:
with conf_vars(AIRFLOW_CONFIG_DISABLE_EXPOSE_CONFIG):
response = test_client.get(f"/config/section/{section}/option/{option}", headers=headers)
else:
response = test_client.get(f"/config/section/{section}/option/{option}", headers=headers)
self._validate_response(headers, expected_response, expected_status_code, response)
@pytest.mark.parametrize(
("section", "option", "headers", "expected_status_code", "expected_response"),
[
(
SECTION_DATABASE,
OPTION_KEY_SQL_ALCHEMY_CONN,
HEADERS_JSON,
200,
GET_CONFIG_VALUE_NON_SENSITIVE_ONLY_DATABASE_SQL_ALCHEMY_CONN_JSON_RESPONSE,
),
(
SECTION_DATABASE,
OPTION_KEY_SQL_ALCHEMY_CONN,
HEADERS_JSON_UTF8,
200,
GET_CONFIG_VALUE_NON_SENSITIVE_ONLY_DATABASE_SQL_ALCHEMY_CONN_JSON_RESPONSE,
),
(
SECTION_DATABASE,
OPTION_KEY_SQL_ALCHEMY_CONN,
HEADERS_ANY,
200,
GET_CONFIG_VALUE_NON_SENSITIVE_ONLY_DATABASE_SQL_ALCHEMY_CONN_JSON_RESPONSE,
),
(
SECTION_DATABASE,
OPTION_KEY_SQL_ALCHEMY_CONN,
HEADERS_NONE,
200,
GET_CONFIG_VALUE_NON_SENSITIVE_ONLY_DATABASE_SQL_ALCHEMY_CONN_JSON_RESPONSE,
),
(
SECTION_DATABASE,
OPTION_KEY_SQL_ALCHEMY_CONN,
HEADERS_TEXT,
200,
textwrap.dedent(
f"""\
[{SECTION_DATABASE}]
{OPTION_KEY_SQL_ALCHEMY_CONN} = {OPTION_VALUE_SENSITIVE_HIDDEN}
"""
),
),
],
)
def test_get_config_value_non_sensitive_only(
self, test_client, section, option, headers, expected_status_code, expected_response
):
with conf_vars(AIRFLOW_CONFIG_NON_SENSITIVE_ONLY_CONFIG):
response = test_client.get(f"/config/section/{section}/option/{option}", headers=headers)
self._validate_response(headers, expected_response, expected_status_code, response)
def test_get_config_value_should_response_401(self, unauthenticated_test_client):
response = unauthenticated_test_client.get(
f"/config/section/{SECTION_DATABASE}/option/{OPTION_KEY_SQL_ALCHEMY_CONN}"
)
assert response.status_code == 401
def test_get_config_value_should_response_403(self, unauthorized_test_client):
response = unauthorized_test_client.get(
f"/config/section/{SECTION_DATABASE}/option/{OPTION_KEY_SQL_ALCHEMY_CONN}"
)
assert response.status_code == 403
| TestGetConfigValue |
python | numba__numba | numba/cpython/hashing.py | {
"start": 14161,
"end": 27042
} | class ____(Union):
_fields_ = [
# ensure 24 bytes
('uc', c_ubyte * 24),
# two Py_hash_t for FNV
('fnv', FNV),
# two uint64 for SipHash24
('siphash', SIPHASH),
# a different (!) Py_hash_t for small string optimization
('djbx33a', DJBX33A),
('expat', EXPAT),
]
_hashsecret_entry = namedtuple('_hashsecret_entry', ['symbol', 'value'])
# Only a few members are needed at present
def _build_hashsecret():
"""Read hash secret from the Python process
Returns
-------
info : dict
- keys are "djbx33a_suffix", "siphash_k0", siphash_k1".
- values are the namedtuple[symbol:str, value:int]
"""
# Read hashsecret and inject it into the LLVM symbol map under the
# prefix `_numba_hashsecret_`.
pyhashsecret = _Py_HashSecret_t.in_dll(pythonapi, '_Py_HashSecret')
info = {}
def inject(name, val):
symbol_name = "_numba_hashsecret_{}".format(name)
val = ctypes.c_uint64(val)
addr = ctypes.addressof(val)
ll.add_symbol(symbol_name, addr)
info[name] = _hashsecret_entry(symbol=symbol_name, value=val)
inject('djbx33a_suffix', pyhashsecret.djbx33a.suffix)
inject('siphash_k0', pyhashsecret.siphash.k0)
inject('siphash_k1', pyhashsecret.siphash.k1)
return info
_hashsecret = _build_hashsecret()
# ------------------------------------------------------------------------------
if _Py_hashfunc_name in ('siphash13', 'siphash24', 'fnv'):
# Check for use of the FNV hashing alg, warn users that it's not implemented
# and functionality relying of properties derived from hashing will be fine
# but hash values themselves are likely to be different.
if _Py_hashfunc_name == 'fnv':
msg = ("FNV hashing is not implemented in Numba. See PEP 456 "
"https://www.python.org/dev/peps/pep-0456/ "
"for rationale over not using FNV. Numba will continue to work, "
"but hashes for built in types will be computed using "
"siphash24. This will permit e.g. dictionaries to continue to "
"behave as expected, however anything relying on the value of "
"the hash opposed to hash as a derived property is likely to "
"not work as expected.")
warnings.warn(msg)
# This is a translation of CPython's siphash24 function:
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Python/pyhash.c#L287-L413 # noqa: E501
# and also, since Py 3.11, a translation of CPython's siphash13 function:
# https://github.com/python/cpython/blob/9dda9020abcf0d51d59b283a89c58c8e1fb0f574/Python/pyhash.c#L376-L424
# the only differences are in the use of SINGLE_ROUND in siphash13 vs.
# DOUBLE_ROUND in siphash24, and that siphash13 has an extra "ROUND" applied
# just before the final XORing of components to create the return value.
# /* *********************************************************************
# <MIT License>
# Copyright (c) 2013 Marek Majkowski <marek@popcount.org>
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# </MIT License>
# Original location:
# https://github.com/majek/csiphash/
# Solution inspired by code from:
# Samuel Neves (supercop/crypto_auth/siphash24/little)
#djb (supercop/crypto_auth/siphash24/little2)
# Jean-Philippe Aumasson (https://131002.net/siphash/siphash24.c)
# Modified for Python by Christian Heimes:
# - C89 / MSVC compatibility
# - _rotl64() on Windows
# - letoh64() fallback
# */
@register_jitable(locals={'x': types.uint64,
'b': types.uint64, })
def _ROTATE(x, b):
return types.uint64(((x) << (b)) | ((x) >> (types.uint64(64) - (b))))
@register_jitable(locals={'a': types.uint64,
'b': types.uint64,
'c': types.uint64,
'd': types.uint64,
's': types.uint64,
't': types.uint64, })
def _HALF_ROUND(a, b, c, d, s, t):
a += b
c += d
b = _ROTATE(b, s) ^ a
d = _ROTATE(d, t) ^ c
a = _ROTATE(a, 32)
return a, b, c, d
@register_jitable(locals={'v0': types.uint64,
'v1': types.uint64,
'v2': types.uint64,
'v3': types.uint64, })
def _SINGLE_ROUND(v0, v1, v2, v3):
v0, v1, v2, v3 = _HALF_ROUND(v0, v1, v2, v3, 13, 16)
v2, v1, v0, v3 = _HALF_ROUND(v2, v1, v0, v3, 17, 21)
return v0, v1, v2, v3
@register_jitable(locals={'v0': types.uint64,
'v1': types.uint64,
'v2': types.uint64,
'v3': types.uint64, })
def _DOUBLE_ROUND(v0, v1, v2, v3):
v0, v1, v2, v3 = _SINGLE_ROUND(v0, v1, v2, v3)
v0, v1, v2, v3 = _SINGLE_ROUND(v0, v1, v2, v3)
return v0, v1, v2, v3
def _gen_siphash(alg):
if alg == 'siphash13':
_ROUNDER = _SINGLE_ROUND
_EXTRA_ROUND = True
elif alg == 'siphash24':
_ROUNDER = _DOUBLE_ROUND
_EXTRA_ROUND = False
else:
assert 0, 'unreachable'
@register_jitable(locals={'v0': types.uint64,
'v1': types.uint64,
'v2': types.uint64,
'v3': types.uint64,
'b': types.uint64,
'mi': types.uint64,
't': types.uint64,
'mask': types.uint64,
'jmp': types.uint64,
'ohexefef': types.uint64})
def _siphash(k0, k1, src, src_sz):
b = types.uint64(src_sz) << 56
v0 = k0 ^ types.uint64(0x736f6d6570736575)
v1 = k1 ^ types.uint64(0x646f72616e646f6d)
v2 = k0 ^ types.uint64(0x6c7967656e657261)
v3 = k1 ^ types.uint64(0x7465646279746573)
idx = 0
while (src_sz >= 8):
mi = grab_uint64_t(src, idx)
idx += 1
src_sz -= 8
v3 ^= mi
v0, v1, v2, v3 = _ROUNDER(v0, v1, v2, v3)
v0 ^= mi
# this is the switch fallthrough:
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Python/pyhash.c#L390-L400 # noqa: E501
t = types.uint64(0x0)
boffset = idx * 8
ohexefef = types.uint64(0xff)
if src_sz >= 7:
jmp = (6 * 8)
mask = ~types.uint64(ohexefef << jmp)
t = (t & mask) | (types.uint64(grab_byte(src, boffset + 6))
<< jmp)
if src_sz >= 6:
jmp = (5 * 8)
mask = ~types.uint64(ohexefef << jmp)
t = (t & mask) | (types.uint64(grab_byte(src, boffset + 5))
<< jmp)
if src_sz >= 5:
jmp = (4 * 8)
mask = ~types.uint64(ohexefef << jmp)
t = (t & mask) | (types.uint64(grab_byte(src, boffset + 4))
<< jmp)
if src_sz >= 4:
t &= types.uint64(0xffffffff00000000)
for i in range(4):
jmp = i * 8
mask = ~types.uint64(ohexefef << jmp)
t = (t & mask) | (types.uint64(grab_byte(src, boffset + i))
<< jmp)
if src_sz >= 3:
jmp = (2 * 8)
mask = ~types.uint64(ohexefef << jmp)
t = (t & mask) | (types.uint64(grab_byte(src, boffset + 2))
<< jmp)
if src_sz >= 2:
jmp = (1 * 8)
mask = ~types.uint64(ohexefef << jmp)
t = (t & mask) | (types.uint64(grab_byte(src, boffset + 1))
<< jmp)
if src_sz >= 1:
mask = ~(ohexefef)
t = (t & mask) | (types.uint64(grab_byte(src, boffset + 0)))
b |= t
v3 ^= b
v0, v1, v2, v3 = _ROUNDER(v0, v1, v2, v3)
v0 ^= b
v2 ^= ohexefef
v0, v1, v2, v3 = _ROUNDER(v0, v1, v2, v3)
v0, v1, v2, v3 = _ROUNDER(v0, v1, v2, v3)
if _EXTRA_ROUND:
v0, v1, v2, v3 = _ROUNDER(v0, v1, v2, v3)
t = (v0 ^ v1) ^ (v2 ^ v3)
return t
return _siphash
_siphash13 = _gen_siphash('siphash13')
_siphash24 = _gen_siphash('siphash24')
_siphasher = _siphash13 if _Py_hashfunc_name == 'siphash13' else _siphash24
else:
msg = "Unsupported hashing algorithm in use %s" % _Py_hashfunc_name
raise ValueError(msg)
@intrinsic
def _inject_hashsecret_read(tyctx, name):
"""Emit code to load the hashsecret.
"""
if not isinstance(name, types.StringLiteral):
raise errors.TypingError("requires literal string")
sym = _hashsecret[name.literal_value].symbol
resty = types.uint64
sig = resty(name)
def impl(cgctx, builder, sig, args):
mod = builder.module
try:
# Search for existing global
gv = mod.get_global(sym)
except KeyError:
# Inject the symbol if not already exist.
gv = ir.GlobalVariable(mod, ir.IntType(64), name=sym)
v = builder.load(gv)
return v
return sig, impl
def _load_hashsecret(name):
return _hashsecret[name].value
@overload(_load_hashsecret)
def _impl_load_hashsecret(name):
def imp(name):
return _inject_hashsecret_read(name)
return imp
# This is a translation of CPythons's _Py_HashBytes:
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Python/pyhash.c#L145-L191 # noqa: E501
@register_jitable(locals={'_hash': _Py_uhash_t})
def _Py_HashBytes(val, _len):
if (_len == 0):
return process_return(0)
if (_len < _Py_HASH_CUTOFF):
# TODO: this branch needs testing, needs a CPython setup for it!
# /* Optimize hashing of very small strings with inline DJBX33A. */
_hash = _Py_uhash_t(5381) # /* DJBX33A starts with 5381 */
for idx in range(_len):
_hash = ((_hash << 5) + _hash) + np.uint8(grab_byte(val, idx))
_hash ^= _len
_hash ^= _load_hashsecret('djbx33a_suffix')
else:
tmp = _siphasher(types.uint64(_load_hashsecret('siphash_k0')),
types.uint64(_load_hashsecret('siphash_k1')),
val, _len)
_hash = process_return(tmp)
return process_return(_hash)
# This is an approximate translation of CPython's unicode_hash:
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Objects/unicodeobject.c#L11635-L11663 # noqa: E501
@overload_method(types.UnicodeType, '__hash__')
def unicode_hash(val):
from numba.cpython.unicode import _kind_to_byte_width
def impl(val):
kindwidth = _kind_to_byte_width(val._kind)
_len = len(val)
# use the cache if possible
current_hash = val._hash
if current_hash != -1:
return current_hash
else:
# cannot write hash value to cache in the unicode struct due to
# pass by value on the struct making the struct member immutable
return _Py_HashBytes(val._data, kindwidth * _len)
return impl
| _Py_HashSecret_t |
python | PyCQA__pylint | doc/data/messages/s/signature-differs/good.py | {
"start": 84,
"end": 220
} | class ____(Animal):
def run(self, distance=0):
super(Animal, self).run(distance)
print("Fetched that stick, wuff !")
| Dog |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_hyperlink22.py | {
"start": 315,
"end": 953
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("hyperlink22.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with hyperlinks."""
workbook = Workbook(self.got_filename)
# Turn off default URL format for testing.
workbook.default_url_format = None
worksheet = workbook.add_worksheet()
worksheet.write_url("A1", r"external:\\Vboxsvr\share\foo bar.xlsx")
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | kamyu104__LeetCode-Solutions | Python/minimum-deletions-for-at-most-k-distinct-characters.py | {
"start": 75,
"end": 603
} | class ____(object):
def minDeletion(self, s, k):
"""
:type s: str
:type k: int
:rtype: int
"""
cnt = [0]*26
for x in s:
cnt[ord(x)-ord('a')] += 1
cnt2 = [0]*(max(cnt)+1)
for x in cnt:
cnt2[x] += 1
result = 0
total = 26-k
for i, x in enumerate(cnt2):
c = min(total, x)
result += i*c
total -= c
if total == 0:
break
return result
| Solution |
python | getsentry__sentry | src/sentry/workflow_engine/migrations/0068_migrate_anomaly_detection_alerts.py | {
"start": 3651,
"end": 3810
} | class ____(StrEnum):
DEFAULT = "default"
CRITICAL = "critical"
WARNING = "warning"
ERROR = "error"
INFO = "info"
@dataclass
| PagerdutySeverity |
python | more-itertools__more-itertools | tests/test_recipes.py | {
"start": 626,
"end": 1371
} | class ____(TestCase):
"""Tests for ``take()``"""
def test_simple_take(self):
"""Test basic usage"""
t = mi.take(5, range(10))
self.assertEqual(t, [0, 1, 2, 3, 4])
def test_null_take(self):
"""Check the null case"""
t = mi.take(0, range(10))
self.assertEqual(t, [])
def test_negative_take(self):
"""Make sure taking negative items results in a ValueError"""
self.assertRaises(ValueError, lambda: mi.take(-3, range(10)))
def test_take_too_much(self):
"""Taking more than an iterator has remaining should return what the
iterator has remaining.
"""
t = mi.take(10, range(5))
self.assertEqual(t, [0, 1, 2, 3, 4])
| TakeTests |
python | pypa__pip | src/pip/_vendor/rich/highlighter.py | {
"start": 4755,
"end": 9586
} | class ____(RegexHighlighter):
"""Highlights the ISO8601 date time strings.
Regex reference: https://www.oreilly.com/library/view/regular-expressions-cookbook/9781449327453/ch04s07.html
"""
base_style = "iso8601."
highlights = [
#
# Dates
#
# Calendar month (e.g. 2008-08). The hyphen is required
r"^(?P<year>[0-9]{4})-(?P<month>1[0-2]|0[1-9])$",
# Calendar date w/o hyphens (e.g. 20080830)
r"^(?P<date>(?P<year>[0-9]{4})(?P<month>1[0-2]|0[1-9])(?P<day>3[01]|0[1-9]|[12][0-9]))$",
# Ordinal date (e.g. 2008-243). The hyphen is optional
r"^(?P<date>(?P<year>[0-9]{4})-?(?P<day>36[0-6]|3[0-5][0-9]|[12][0-9]{2}|0[1-9][0-9]|00[1-9]))$",
#
# Weeks
#
# Week of the year (e.g., 2008-W35). The hyphen is optional
r"^(?P<date>(?P<year>[0-9]{4})-?W(?P<week>5[0-3]|[1-4][0-9]|0[1-9]))$",
# Week date (e.g., 2008-W35-6). The hyphens are optional
r"^(?P<date>(?P<year>[0-9]{4})-?W(?P<week>5[0-3]|[1-4][0-9]|0[1-9])-?(?P<day>[1-7]))$",
#
# Times
#
# Hours and minutes (e.g., 17:21). The colon is optional
r"^(?P<time>(?P<hour>2[0-3]|[01][0-9]):?(?P<minute>[0-5][0-9]))$",
# Hours, minutes, and seconds w/o colons (e.g., 172159)
r"^(?P<time>(?P<hour>2[0-3]|[01][0-9])(?P<minute>[0-5][0-9])(?P<second>[0-5][0-9]))$",
# Time zone designator (e.g., Z, +07 or +07:00). The colons and the minutes are optional
r"^(?P<timezone>(Z|[+-](?:2[0-3]|[01][0-9])(?::?(?:[0-5][0-9]))?))$",
# Hours, minutes, and seconds with time zone designator (e.g., 17:21:59+07:00).
# All the colons are optional. The minutes in the time zone designator are also optional
r"^(?P<time>(?P<hour>2[0-3]|[01][0-9])(?P<minute>[0-5][0-9])(?P<second>[0-5][0-9]))(?P<timezone>Z|[+-](?:2[0-3]|[01][0-9])(?::?(?:[0-5][0-9]))?)$",
#
# Date and Time
#
# Calendar date with hours, minutes, and seconds (e.g., 2008-08-30 17:21:59 or 20080830 172159).
# A space is required between the date and the time. The hyphens and colons are optional.
# This regex matches dates and times that specify some hyphens or colons but omit others.
# This does not follow ISO 8601
r"^(?P<date>(?P<year>[0-9]{4})(?P<hyphen>-)?(?P<month>1[0-2]|0[1-9])(?(hyphen)-)(?P<day>3[01]|0[1-9]|[12][0-9])) (?P<time>(?P<hour>2[0-3]|[01][0-9])(?(hyphen):)(?P<minute>[0-5][0-9])(?(hyphen):)(?P<second>[0-5][0-9]))$",
#
# XML Schema dates and times
#
# Date, with optional time zone (e.g., 2008-08-30 or 2008-08-30+07:00).
# Hyphens are required. This is the XML Schema 'date' type
r"^(?P<date>(?P<year>-?(?:[1-9][0-9]*)?[0-9]{4})-(?P<month>1[0-2]|0[1-9])-(?P<day>3[01]|0[1-9]|[12][0-9]))(?P<timezone>Z|[+-](?:2[0-3]|[01][0-9]):[0-5][0-9])?$",
# Time, with optional fractional seconds and time zone (e.g., 01:45:36 or 01:45:36.123+07:00).
# There is no limit on the number of digits for the fractional seconds. This is the XML Schema 'time' type
r"^(?P<time>(?P<hour>2[0-3]|[01][0-9]):(?P<minute>[0-5][0-9]):(?P<second>[0-5][0-9])(?P<frac>\.[0-9]+)?)(?P<timezone>Z|[+-](?:2[0-3]|[01][0-9]):[0-5][0-9])?$",
# Date and time, with optional fractional seconds and time zone (e.g., 2008-08-30T01:45:36 or 2008-08-30T01:45:36.123Z).
# This is the XML Schema 'dateTime' type
r"^(?P<date>(?P<year>-?(?:[1-9][0-9]*)?[0-9]{4})-(?P<month>1[0-2]|0[1-9])-(?P<day>3[01]|0[1-9]|[12][0-9]))T(?P<time>(?P<hour>2[0-3]|[01][0-9]):(?P<minute>[0-5][0-9]):(?P<second>[0-5][0-9])(?P<ms>\.[0-9]+)?)(?P<timezone>Z|[+-](?:2[0-3]|[01][0-9]):[0-5][0-9])?$",
]
if __name__ == "__main__": # pragma: no cover
from .console import Console
console = Console()
console.print("[bold green]hello world![/bold green]")
console.print("'[bold green]hello world![/bold green]'")
console.print(" /foo")
console.print("/foo/")
console.print("/foo/bar")
console.print("foo/bar/baz")
console.print("/foo/bar/baz?foo=bar+egg&egg=baz")
console.print("/foo/bar/baz/")
console.print("/foo/bar/baz/egg")
console.print("/foo/bar/baz/egg.py")
console.print("/foo/bar/baz/egg.py word")
console.print(" /foo/bar/baz/egg.py word")
console.print("foo /foo/bar/baz/egg.py word")
console.print("foo /foo/bar/ba._++z/egg+.py word")
console.print("https://example.org?foo=bar#header")
console.print(1234567.34)
console.print(1 / 2)
console.print(-1 / 123123123123)
console.print(
"127.0.1.1 bar 192.168.1.4 2001:0db8:85a3:0000:0000:8a2e:0370:7334 foo"
)
import json
console.print_json(json.dumps(obj={"name": "apple", "count": 1}), indent=None)
| ISO8601Highlighter |
python | ipython__ipython | IPython/core/formatters.py | {
"start": 28483,
"end": 29038
} | class ____(BaseFormatter):
"""A LaTeX formatter.
To define the callables that compute the LaTeX representation of your
objects, define a :meth:`_repr_latex_` method or use the :meth:`for_type`
or :meth:`for_type_by_name` methods to register functions that handle
this.
The return value of this formatter should be a valid LaTeX equation,
enclosed in either ```$```, ```$$``` or another LaTeX equation
environment.
"""
format_type = Unicode('text/latex')
print_method = ObjectName('_repr_latex_')
| LatexFormatter |
python | spyder-ide__spyder | external-deps/qtconsole/qtconsole/completion_widget.py | {
"start": 141,
"end": 8151
} | class ____(QtWidgets.QListWidget):
""" A widget for GUI tab completion.
"""
#--------------------------------------------------------------------------
# 'QObject' interface
#--------------------------------------------------------------------------
def __init__(self, console_widget, height=0):
""" Create a completion widget that is attached to the specified Qt
text edit widget.
"""
text_edit = console_widget._control
assert isinstance(text_edit, (QtWidgets.QTextEdit, QtWidgets.QPlainTextEdit))
super().__init__(parent=console_widget)
self._text_edit = text_edit
self._height_max = height if height > 0 else self.sizeHint().height()
self.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
# We need Popup style to ensure correct mouse interaction
# (dialog would dissappear on mouse click with ToolTip style)
self.setWindowFlags(QtCore.Qt.Popup)
self.setAttribute(QtCore.Qt.WA_StaticContents)
original_policy = text_edit.focusPolicy()
self.setFocusPolicy(QtCore.Qt.NoFocus)
text_edit.setFocusPolicy(original_policy)
# Ensure that the text edit keeps focus when widget is displayed.
self.setFocusProxy(self._text_edit)
self.setFrameShadow(QtWidgets.QFrame.Plain)
self.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.itemActivated.connect(self._complete_current)
def eventFilter(self, obj, event):
""" Reimplemented to handle mouse input and to auto-hide when the
text edit loses focus.
"""
if obj is self:
if event.type() == QtCore.QEvent.MouseButtonPress:
pos = self.mapToGlobal(event.pos())
target = QtWidgets.QApplication.widgetAt(pos)
if (target and self.isAncestorOf(target) or target is self):
return False
else:
self.cancel_completion()
return super().eventFilter(obj, event)
def keyPressEvent(self, event):
key = event.key()
if key in (QtCore.Qt.Key_Return, QtCore.Qt.Key_Enter,
QtCore.Qt.Key_Tab):
self._complete_current()
elif key == QtCore.Qt.Key_Escape:
self.hide()
elif key in (QtCore.Qt.Key_Up, QtCore.Qt.Key_Down,
QtCore.Qt.Key_PageUp, QtCore.Qt.Key_PageDown,
QtCore.Qt.Key_Home, QtCore.Qt.Key_End):
return super().keyPressEvent(event)
else:
QtWidgets.QApplication.sendEvent(self._text_edit, event)
#--------------------------------------------------------------------------
# 'QWidget' interface
#--------------------------------------------------------------------------
def hideEvent(self, event):
""" Reimplemented to disconnect signal handlers and event filter.
"""
super().hideEvent(event)
try:
self._text_edit.cursorPositionChanged.disconnect(self._update_current)
except TypeError:
pass
self.removeEventFilter(self)
def showEvent(self, event):
""" Reimplemented to connect signal handlers and event filter.
"""
super().showEvent(event)
self._text_edit.cursorPositionChanged.connect(self._update_current)
self.installEventFilter(self)
#--------------------------------------------------------------------------
# 'CompletionWidget' interface
#--------------------------------------------------------------------------
def show_items(self, cursor, items, prefix_length=0):
""" Shows the completion widget with 'items' at the position specified
by 'cursor'.
"""
point = self._get_top_left_position(cursor)
self.clear()
path_items = []
for item in items:
# Check if the item could refer to a file or dir. The replacing
# of '"' is needed for items on Windows
if (os.path.isfile(os.path.abspath(item.replace("\"", ""))) or
os.path.isdir(os.path.abspath(item.replace("\"", "")))):
path_items.append(item.replace("\"", ""))
else:
list_item = QtWidgets.QListWidgetItem()
list_item.setData(QtCore.Qt.UserRole, item)
# Need to split to only show last element of a dot completion
list_item.setText(item.split(".")[-1])
self.addItem(list_item)
common_prefix = os.path.dirname(os.path.commonprefix(path_items))
for path_item in path_items:
list_item = QtWidgets.QListWidgetItem()
list_item.setData(QtCore.Qt.UserRole, path_item)
if common_prefix:
text = path_item.split(common_prefix)[-1]
else:
text = path_item
list_item.setText(text)
self.addItem(list_item)
if QT6:
screen_rect = self.screen().availableGeometry()
else:
screen_rect = QtWidgets.QApplication.desktop().availableGeometry(self)
screen_height = screen_rect.height()
height = int(min(self._height_max, screen_height - 50)) # -50px
if ((screen_height - point.y() - height) < 0):
point = self._text_edit.mapToGlobal(self._text_edit.cursorRect().topRight())
py = point.y()
point.setY(int(py - min(height, py - 10))) # -10px
w = (self.sizeHintForColumn(0) +
self.verticalScrollBar().sizeHint().width() +
2 * self.frameWidth())
self.setGeometry(point.x(), point.y(), w, height)
# Move cursor to start of the prefix to replace it
# when a item is selected
cursor.movePosition(QtGui.QTextCursor.Left, n=prefix_length)
self._start_position = cursor.position()
self.setCurrentRow(0)
self.raise_()
self.show()
#--------------------------------------------------------------------------
# Protected interface
#--------------------------------------------------------------------------
def _get_top_left_position(self, cursor):
""" Get top left position for this widget.
"""
return self._text_edit.mapToGlobal(self._text_edit.cursorRect().bottomRight())
def _complete_current(self):
""" Perform the completion with the currently selected item.
"""
text = self.currentItem().data(QtCore.Qt.UserRole)
self._current_text_cursor().insertText(text)
self.hide()
def _current_text_cursor(self):
""" Returns a cursor with text between the start position and the
current position selected.
"""
cursor = self._text_edit.textCursor()
if cursor.position() >= self._start_position:
cursor.setPosition(self._start_position,
QtGui.QTextCursor.KeepAnchor)
return cursor
def _update_current(self):
""" Updates the current item based on the current text and the
position of the widget.
"""
# Update widget position
cursor = self._text_edit.textCursor()
point = self._get_top_left_position(cursor)
point.setY(self.y())
self.move(point)
# Update current item
prefix = self._current_text_cursor().selection().toPlainText()
if prefix:
items = self.findItems(prefix, (QtCore.Qt.MatchStartsWith |
QtCore.Qt.MatchCaseSensitive))
if items:
self.setCurrentItem(items[0])
else:
self.hide()
else:
self.hide()
def cancel_completion(self):
self.hide()
| CompletionWidget |
python | astropy__astropy | astropy/io/votable/converters.py | {
"start": 27611,
"end": 27782
} | class ____(Integer):
"""
Handles the short datatype. Signed 16-bit integer.
"""
format = "i2"
val_range = (-32768, 32767)
bit_size = "16-bit"
| Short |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/context.py | {
"start": 112806,
"end": 117847
} | class ____(_ColumnEntity):
"""Column/expression based entity."""
supports_single_entity = False
__slots__ = (
"expr",
"mapper",
"column",
"_label_name",
"entity_zero_or_selectable",
"entity_zero",
"_extra_entities",
)
def __init__(
self,
compile_state,
column,
entities_collection,
parententity,
raw_column_index,
is_current_entities,
parent_bundle=None,
):
annotations = column._annotations
_entity = parententity
# an AliasedClass won't have proxy_key in the annotations for
# a column if it was acquired using the class' adapter directly,
# such as using AliasedInsp._adapt_element(). this occurs
# within internal loaders.
orm_key = annotations.get("proxy_key", None)
proxy_owner = annotations.get("proxy_owner", _entity)
if orm_key:
self.expr = getattr(proxy_owner.entity, orm_key)
self.translate_raw_column = False
else:
# if orm_key is not present, that means this is an ad-hoc
# SQL ColumnElement, like a CASE() or other expression.
# include this column position from the invoked statement
# in the ORM-level ResultSetMetaData on each execute, so that
# it can be targeted by identity after caching
self.expr = column
self.translate_raw_column = raw_column_index is not None
self.raw_column_index = raw_column_index
if is_current_entities:
if parent_bundle:
self._label_name = orm_key if orm_key else column._proxy_key
else:
self._label_name = compile_state._label_convention(
column, col_name=orm_key
)
else:
self._label_name = None
_entity._post_inspect
self.entity_zero = self.entity_zero_or_selectable = ezero = _entity
self.mapper = mapper = _entity.mapper
if parent_bundle:
parent_bundle._entities.append(self)
else:
entities_collection.append(self)
compile_state._has_orm_entities = True
self.column = column
self._fetch_column = self._row_processor = None
self._extra_entities = (self.expr, self.column)
if mapper._should_select_with_poly_adapter:
compile_state._create_with_polymorphic_adapter(
ezero, ezero.selectable
)
def corresponds_to(self, entity):
if _is_aliased_class(entity):
# TODO: polymorphic subclasses ?
return entity is self.entity_zero
else:
return not _is_aliased_class(
self.entity_zero
) and entity.common_parent(self.entity_zero)
def setup_dml_returning_compile_state(
self,
compile_state: _ORMCompileState,
adapter: Optional[_DMLReturningColFilter],
) -> None:
self._fetch_column = column = self.column
if adapter:
column = adapter(column, False)
if column is not None:
compile_state.dedupe_columns.add(column)
compile_state.primary_columns.append(column)
def setup_compile_state(self, compile_state):
current_adapter = compile_state._get_current_adapter()
if current_adapter:
column = current_adapter(self.column, False)
if column is None:
assert compile_state.is_dml_returning
self._fetch_column = self.column
return
else:
column = self.column
ezero = self.entity_zero
single_table_crit = self.mapper._single_table_criterion
if (
single_table_crit is not None
or ("additional_entity_criteria", self.mapper)
in compile_state.global_attributes
):
compile_state.extra_criteria_entities[ezero] = (
ezero,
ezero._adapter if ezero.is_aliased_class else None,
)
if column._annotations and not column._expression_label:
# annotated columns perform more slowly in compiler and
# result due to the __eq__() method, so use deannotated
column = column._deannotate()
# use entity_zero as the from if we have it. this is necessary
# for polymorphic scenarios where our FROM is based on ORM entity,
# not the FROM of the column. but also, don't use it if our column
# doesn't actually have any FROMs that line up, such as when its
# a scalar subquery.
if set(self.column._from_objects).intersection(
ezero.selectable._from_objects
):
compile_state._fallback_from_clauses.append(ezero.selectable)
compile_state.dedupe_columns.add(column)
compile_state.primary_columns.append(column)
self._fetch_column = column
| _ORMColumnEntity |
python | encode__httpx | httpx/_auth.py | {
"start": 5501,
"end": 11744
} | class ____(Auth):
_ALGORITHM_TO_HASH_FUNCTION: dict[str, typing.Callable[[bytes], _Hash]] = {
"MD5": hashlib.md5,
"MD5-SESS": hashlib.md5,
"SHA": hashlib.sha1,
"SHA-SESS": hashlib.sha1,
"SHA-256": hashlib.sha256,
"SHA-256-SESS": hashlib.sha256,
"SHA-512": hashlib.sha512,
"SHA-512-SESS": hashlib.sha512,
}
def __init__(self, username: str | bytes, password: str | bytes) -> None:
self._username = to_bytes(username)
self._password = to_bytes(password)
self._last_challenge: _DigestAuthChallenge | None = None
self._nonce_count = 1
def auth_flow(self, request: Request) -> typing.Generator[Request, Response, None]:
if self._last_challenge:
request.headers["Authorization"] = self._build_auth_header(
request, self._last_challenge
)
response = yield request
if response.status_code != 401 or "www-authenticate" not in response.headers:
# If the response is not a 401 then we don't
# need to build an authenticated request.
return
for auth_header in response.headers.get_list("www-authenticate"):
if auth_header.lower().startswith("digest "):
break
else:
# If the response does not include a 'WWW-Authenticate: Digest ...'
# header, then we don't need to build an authenticated request.
return
self._last_challenge = self._parse_challenge(request, response, auth_header)
self._nonce_count = 1
request.headers["Authorization"] = self._build_auth_header(
request, self._last_challenge
)
if response.cookies:
Cookies(response.cookies).set_cookie_header(request=request)
yield request
def _parse_challenge(
self, request: Request, response: Response, auth_header: str
) -> _DigestAuthChallenge:
"""
Returns a challenge from a Digest WWW-Authenticate header.
These take the form of:
`Digest realm="realm@host.com",qop="auth,auth-int",nonce="abc",opaque="xyz"`
"""
scheme, _, fields = auth_header.partition(" ")
# This method should only ever have been called with a Digest auth header.
assert scheme.lower() == "digest"
header_dict: dict[str, str] = {}
for field in parse_http_list(fields):
key, value = field.strip().split("=", 1)
header_dict[key] = unquote(value)
try:
realm = header_dict["realm"].encode()
nonce = header_dict["nonce"].encode()
algorithm = header_dict.get("algorithm", "MD5")
opaque = header_dict["opaque"].encode() if "opaque" in header_dict else None
qop = header_dict["qop"].encode() if "qop" in header_dict else None
return _DigestAuthChallenge(
realm=realm, nonce=nonce, algorithm=algorithm, opaque=opaque, qop=qop
)
except KeyError as exc:
message = "Malformed Digest WWW-Authenticate header"
raise ProtocolError(message, request=request) from exc
def _build_auth_header(
self, request: Request, challenge: _DigestAuthChallenge
) -> str:
hash_func = self._ALGORITHM_TO_HASH_FUNCTION[challenge.algorithm.upper()]
def digest(data: bytes) -> bytes:
return hash_func(data).hexdigest().encode()
A1 = b":".join((self._username, challenge.realm, self._password))
path = request.url.raw_path
A2 = b":".join((request.method.encode(), path))
# TODO: implement auth-int
HA2 = digest(A2)
nc_value = b"%08x" % self._nonce_count
cnonce = self._get_client_nonce(self._nonce_count, challenge.nonce)
self._nonce_count += 1
HA1 = digest(A1)
if challenge.algorithm.lower().endswith("-sess"):
HA1 = digest(b":".join((HA1, challenge.nonce, cnonce)))
qop = self._resolve_qop(challenge.qop, request=request)
if qop is None:
# Following RFC 2069
digest_data = [HA1, challenge.nonce, HA2]
else:
# Following RFC 2617/7616
digest_data = [HA1, challenge.nonce, nc_value, cnonce, qop, HA2]
format_args = {
"username": self._username,
"realm": challenge.realm,
"nonce": challenge.nonce,
"uri": path,
"response": digest(b":".join(digest_data)),
"algorithm": challenge.algorithm.encode(),
}
if challenge.opaque:
format_args["opaque"] = challenge.opaque
if qop:
format_args["qop"] = b"auth"
format_args["nc"] = nc_value
format_args["cnonce"] = cnonce
return "Digest " + self._get_header_value(format_args)
def _get_client_nonce(self, nonce_count: int, nonce: bytes) -> bytes:
s = str(nonce_count).encode()
s += nonce
s += time.ctime().encode()
s += os.urandom(8)
return hashlib.sha1(s).hexdigest()[:16].encode()
def _get_header_value(self, header_fields: dict[str, bytes]) -> str:
NON_QUOTED_FIELDS = ("algorithm", "qop", "nc")
QUOTED_TEMPLATE = '{}="{}"'
NON_QUOTED_TEMPLATE = "{}={}"
header_value = ""
for i, (field, value) in enumerate(header_fields.items()):
if i > 0:
header_value += ", "
template = (
QUOTED_TEMPLATE
if field not in NON_QUOTED_FIELDS
else NON_QUOTED_TEMPLATE
)
header_value += template.format(field, to_str(value))
return header_value
def _resolve_qop(self, qop: bytes | None, request: Request) -> bytes | None:
if qop is None:
return None
qops = re.split(b", ?", qop)
if b"auth" in qops:
return b"auth"
if qops == [b"auth-int"]:
raise NotImplementedError("Digest auth-int support is not yet implemented")
message = f'Unexpected qop value "{qop!r}" in digest auth'
raise ProtocolError(message, request=request)
| DigestAuth |
python | etianen__django-reversion | tests/test_app/tests/test_api.py | {
"start": 9801,
"end": 10319
} | class ____(TestModelMixin, TestBase):
def testSetDateCreated(self):
date_created = timezone.now() - timedelta(days=20)
with reversion.create_revision():
reversion.set_date_created(date_created)
obj = TestModel.objects.create()
self.assertSingleRevision((obj,), date_created=date_created)
def testDateCreatedNoBlock(self):
with self.assertRaises(reversion.RevisionManagementError):
reversion.set_date_created(timezone.now())
| SetDateCreatedTest |
python | doocs__leetcode | solution/3200-3299/3217.Delete Nodes From Linked List Present in Array/Solution.py | {
"start": 151,
"end": 512
} | class ____:
def modifiedList(
self, nums: List[int], head: Optional[ListNode]
) -> Optional[ListNode]:
s = set(nums)
pre = dummy = ListNode(next=head)
while pre.next:
if pre.next.val in s:
pre.next = pre.next.next
else:
pre = pre.next
return dummy.next
| Solution |
python | tensorflow__tensorflow | tensorflow/dtensor/python/tests/variable_test.py | {
"start": 2134,
"end": 2196
} | class ____(object):
def __init__(self):
self.v = None
| Var |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/search.py | {
"start": 766,
"end": 6951
} | class ____:
"""
A search 'query', associated with a search field (like a SearchToolbar).
Every searchable `BufferControl` points to a `search_buffer_control`
(another `BufferControls`) which represents the search field. The
`SearchState` attached to that search field is used for storing the current
search query.
It is possible to have one searchfield for multiple `BufferControls`. In
that case, they'll share the same `SearchState`.
If there are multiple `BufferControls` that display the same `Buffer`, then
they can have a different `SearchState` each (if they have a different
search control).
"""
__slots__ = ("text", "direction", "ignore_case")
def __init__(
self,
text: str = "",
direction: SearchDirection = SearchDirection.FORWARD,
ignore_case: FilterOrBool = False,
) -> None:
self.text = text
self.direction = direction
self.ignore_case = to_filter(ignore_case)
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.text!r}, direction={self.direction!r}, ignore_case={self.ignore_case!r})"
def __invert__(self) -> SearchState:
"""
Create a new SearchState where backwards becomes forwards and the other
way around.
"""
if self.direction == SearchDirection.BACKWARD:
direction = SearchDirection.FORWARD
else:
direction = SearchDirection.BACKWARD
return SearchState(
text=self.text, direction=direction, ignore_case=self.ignore_case
)
def start_search(
buffer_control: BufferControl | None = None,
direction: SearchDirection = SearchDirection.FORWARD,
) -> None:
"""
Start search through the given `buffer_control` using the
`search_buffer_control`.
:param buffer_control: Start search for this `BufferControl`. If not given,
search through the current control.
"""
from prompt_toolkit.layout.controls import BufferControl
assert buffer_control is None or isinstance(buffer_control, BufferControl)
layout = get_app().layout
# When no control is given, use the current control if that's a BufferControl.
if buffer_control is None:
if not isinstance(layout.current_control, BufferControl):
return
buffer_control = layout.current_control
# Only if this control is searchable.
search_buffer_control = buffer_control.search_buffer_control
if search_buffer_control:
buffer_control.search_state.direction = direction
# Make sure to focus the search BufferControl
layout.focus(search_buffer_control)
# Remember search link.
layout.search_links[search_buffer_control] = buffer_control
# If we're in Vi mode, make sure to go into insert mode.
get_app().vi_state.input_mode = InputMode.INSERT
def stop_search(buffer_control: BufferControl | None = None) -> None:
"""
Stop search through the given `buffer_control`.
"""
layout = get_app().layout
if buffer_control is None:
buffer_control = layout.search_target_buffer_control
if buffer_control is None:
# (Should not happen, but possible when `stop_search` is called
# when we're not searching.)
return
search_buffer_control = buffer_control.search_buffer_control
else:
assert buffer_control in layout.search_links.values()
search_buffer_control = _get_reverse_search_links(layout)[buffer_control]
# Focus the original buffer again.
layout.focus(buffer_control)
if search_buffer_control is not None:
# Remove the search link.
del layout.search_links[search_buffer_control]
# Reset content of search control.
search_buffer_control.buffer.reset()
# If we're in Vi mode, go back to navigation mode.
get_app().vi_state.input_mode = InputMode.NAVIGATION
def do_incremental_search(direction: SearchDirection, count: int = 1) -> None:
"""
Apply search, but keep search buffer focused.
"""
assert is_searching()
layout = get_app().layout
# Only search if the current control is a `BufferControl`.
from prompt_toolkit.layout.controls import BufferControl
search_control = layout.current_control
if not isinstance(search_control, BufferControl):
return
prev_control = layout.search_target_buffer_control
if prev_control is None:
return
search_state = prev_control.search_state
# Update search_state.
direction_changed = search_state.direction != direction
search_state.text = search_control.buffer.text
search_state.direction = direction
# Apply search to current buffer.
if not direction_changed:
prev_control.buffer.apply_search(
search_state, include_current_position=False, count=count
)
def accept_search() -> None:
"""
Accept current search query. Focus original `BufferControl` again.
"""
layout = get_app().layout
search_control = layout.current_control
target_buffer_control = layout.search_target_buffer_control
from prompt_toolkit.layout.controls import BufferControl
if not isinstance(search_control, BufferControl):
return
if target_buffer_control is None:
return
search_state = target_buffer_control.search_state
# Update search state.
if search_control.buffer.text:
search_state.text = search_control.buffer.text
# Apply search.
target_buffer_control.buffer.apply_search(
search_state, include_current_position=True
)
# Add query to history of search line.
search_control.buffer.append_to_history()
# Stop search and focus previous control again.
stop_search(target_buffer_control)
def _get_reverse_search_links(
layout: Layout,
) -> dict[BufferControl, SearchBufferControl]:
"""
Return mapping from BufferControl to SearchBufferControl.
"""
return {
buffer_control: search_buffer_control
for search_buffer_control, buffer_control in layout.search_links.items()
}
| SearchState |
python | catalyst-team__catalyst | catalyst/core/callback.py | {
"start": 4054,
"end": 4254
} | class ____(Callback):
"""Metric callback interface, abstraction over metric step."""
def __init__(self):
"""Init."""
super().__init__(order=CallbackOrder.Metric)
| IMetricCallback |
python | prabhupant__python-ds | data_structures/graphs/level_of_nodes.py | {
"start": 319,
"end": 1190
} | class ____:
def __init__(self, vertices):
self.vertices = vertices
self.graph = defaultdict(list)
def add_edge(self, u, v):
self.graph[u].append(v)
self.graph[v].append(u)
def print_levels(self, s):
levels = [None] * self.vertices
levels[s] = 0
queue = []
queue.append(s)
while queue:
s = queue.pop(0)
for i in self.graph[s]:
if levels[i] == None:
levels[i] = levels[s] + 1
queue.append(i)
print('Node \t Level')
for node, level in enumerate(levels):
print(f'{node} \t {level}')
g = Graph(8)
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 3)
g.add_edge(1, 4)
g.add_edge(1, 5)
g.add_edge(2, 5)
g.add_edge(2, 6)
g.add_edge(6, 7)
g.print_levels(0) | Graph |
python | readthedocs__readthedocs.org | readthedocs/projects/tests/test_domain_views.py | {
"start": 9096,
"end": 9319
} | class ____(TestDomainViews):
def setUp(self):
super().setUp()
self.org = get(
Organization, owners=[self.user], projects=[self.project, self.subproject]
)
| TestDomainViewsWithOrganizations |
python | great-expectations__great_expectations | contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_india_zip.py | {
"start": 729,
"end": 1724
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_india_zip"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_india_zip(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
| ColumnValuesToBeValidIndiaZip |
python | agronholm__apscheduler | src/apscheduler/_events.py | {
"start": 4669,
"end": 4803
} | class ____(Event):
"""Base class for events originating from a scheduler."""
@attrs.define(kw_only=True, frozen=True)
| SchedulerEvent |
python | tensorflow__tensorflow | tensorflow/python/autograph/pyct/common_transformers/anf_test.py | {
"start": 2988,
"end": 12300
} | class ____(AnfTestBase):
def test_basic(self):
def test_function():
a = 0
return a
node, _ = parser.parse_entity(test_function, future_features=())
node = anf.transform(node, self._simple_context())
result, _, _ = loader.load_ast(node)
self.assertEqual(test_function(), result.test_function())
def test_binop_basic(self):
def test_function(x, y, z):
a = x + y + z
return a
def expected_result(x, y, z):
tmp_1001 = x + y
a = tmp_1001 + z
return a
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_if_basic(self):
def test_function(a, b, c, e, f, g):
if a + b + c:
d = e + f + g
return d
def expected_result(a, b, c, e, f, g):
tmp_1001 = a + b
tmp_1002 = tmp_1001 + c
if tmp_1002:
tmp_1003 = e + f
d = tmp_1003 + g
return d
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_nested_binop_and_return(self):
def test_function(b, c, d, e):
return (2 * b + c) + (d + e)
def expected_result(b, c, d, e):
tmp_1001 = 2 * b
tmp_1002 = tmp_1001 + c
tmp_1003 = d + e
tmp_1004 = tmp_1002 + tmp_1003
return tmp_1004
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_function_call_and_expr(self):
def test_function(call_something, a, b, y, z, c, d, e, f, g, h, i):
call_something(a + b, y * z, kwarg=c + d, *(e + f), **(g + h + i))
def expected_result(call_something, a, b, y, z, c, d, e, f, g, h, i):
tmp_1001 = g + h
tmp_1002 = a + b
tmp_1003 = y * z
tmp_1004 = e + f
tmp_1005 = c + d
tmp_1006 = tmp_1001 + i
call_something(tmp_1002, tmp_1003, kwarg=tmp_1005, *tmp_1004, **tmp_1006)
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_with_and_print(self):
def test_function(a, b, c):
with a + b + c as d:
print(2 * d + 1)
def expected_result(a, b, c):
tmp_1001 = a + b
tmp_1002 = tmp_1001 + c
with tmp_1002 as d:
tmp_1003 = 2 * d
tmp_1004 = tmp_1003 + 1
print(tmp_1004)
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_nested_multi_value_assign(self):
def test_function(a, b, c):
x, y = a, a + b
(z, y), x = (c, y + b), x + a
return z, (y, x)
def expected_result(a, b, c):
tmp_1001 = a + b
x, y = a, tmp_1001
tmp_1002 = y + b
tmp_1003 = (c, tmp_1002)
tmp_1004 = x + a
(z, y), x = tmp_1003, tmp_1004
tmp_1005 = y, x
tmp_1006 = z, tmp_1005
return tmp_1006
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_deeply_nested_multi_value_assign(self):
def test_function(a):
[([(b, c), [d, e]], (f, g)), [(h, i, j), k]] = a
return [([(b, c), [d, e]], (f, g)), [(h, i, j), k]]
def expected_result(a):
[([(b, c), [d, e]], (f, g)), [(h, i, j), k]] = a
tmp_1001 = b, c
tmp_1002 = [d, e]
tmp_1003 = [tmp_1001, tmp_1002]
tmp_1004 = f, g
tmp_1005 = h, i, j
tmp_1006 = tmp_1003, tmp_1004
tmp_1007 = [tmp_1005, k]
tmp_1008 = [tmp_1006, tmp_1007]
return tmp_1008
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_local_definition_and_binary_compare(self):
def test_function():
def foo(a, b):
return 2 * a < b
return foo
def expected_result():
def foo(a, b):
tmp_1001 = 2 * a
tmp_1002 = tmp_1001 < b
return tmp_1002
return foo
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_list_literal(self):
def test_function(a, b, c, d, e, f):
return [a + b, c + d, e + f]
def expected_result(a, b, c, d, e, f):
tmp_1001 = a + b
tmp_1002 = c + d
tmp_1003 = e + f
tmp_1004 = [tmp_1001, tmp_1002, tmp_1003]
return tmp_1004
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_tuple_literal_and_unary(self):
def test_function(a, b, c, d, e, f):
return (a + b, -(c + d), e + f)
def expected_result(a, b, c, d, e, f):
tmp_1001 = c + d
tmp_1002 = a + b
tmp_1003 = -tmp_1001
tmp_1004 = e + f
tmp_1005 = (tmp_1002, tmp_1003, tmp_1004)
return tmp_1005
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_set_literal(self):
def test_function(a, b, c, d, e, f):
return set(a + b, c + d, e + f)
def expected_result(a, b, c, d, e, f):
tmp_1001 = a + b
tmp_1002 = c + d
tmp_1003 = e + f
tmp_1004 = set(tmp_1001, tmp_1002, tmp_1003)
return tmp_1004
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_dict_literal_and_repr(self):
def test_function(foo, bar, baz):
return repr({foo + bar + baz: 7 | 8})
def expected_result(foo, bar, baz):
tmp_1001 = foo + bar
tmp_1002 = tmp_1001 + baz
tmp_1003 = 7 | 8
tmp_1004 = {tmp_1002: tmp_1003}
tmp_1005 = repr(tmp_1004)
return tmp_1005
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_field_read_and_write(self):
def test_function(a, d):
a.b.c = d.e.f + 3
def expected_result(a, d):
tmp_1001 = a.b
tmp_1002 = d.e
tmp_1003 = tmp_1002.f
tmp_1001.c = tmp_1003 + 3
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_subscript_read_and_write(self):
def test_function(a, b, c, d, e, f):
a[b][c] = d[e][f] + 3
def expected_result(a, b, c, d, e, f):
tmp_1001 = a[b]
tmp_1002 = d[e]
tmp_1003 = tmp_1002[f]
tmp_1001[c] = tmp_1003 + 3
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_augassign_and_delete(self):
def test_function(a, x, y, z):
a += x + y + z
del a
del z[y][x]
def expected_result(a, x, y, z):
tmp_1001 = x + y
a += tmp_1001 + z
del a
tmp_1002 = z[y]
del tmp_1002[x]
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_raise_yield_and_raise(self):
def test_function(a, c, some_computed, exception):
yield a ** c
raise some_computed('complicated' + exception)
def expected_result(a, c, some_computed, exception):
tmp_1001 = a ** c
yield tmp_1001
tmp_1002 = 'complicated' + exception
tmp_1003 = some_computed(tmp_1002)
raise tmp_1003
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_with_and_if_with_expressions(self):
def test_function(foo, bar, function, quux, quozzle, w, x, y, z):
with foo + bar:
function(x + y)
if quux + quozzle:
function(z / w)
def expected_result(foo, bar, function, quux, quozzle, w, x, y, z):
tmp_1001 = foo + bar
with tmp_1001:
tmp_1002 = x + y
function(tmp_1002)
tmp_1003 = quux + quozzle
if tmp_1003:
tmp_1004 = z / w
function(tmp_1004)
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_exec(self):
self.assert_body_anfs_as_expected(exec_expected_result, exec_test_function)
def test_simple_while_and_assert(self):
def test_function(foo, quux):
while foo:
assert quux
foo = foo + 1 * 3
def expected_result(foo, quux):
while foo:
assert quux
tmp_1001 = 1 * 3
foo = foo + tmp_1001
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_for(self):
def test_function(compute, something, complicated, foo):
for foo in compute(something + complicated):
bar = foo + 1 * 3
return bar
def expected_result(compute, something, complicated, foo):
tmp_1001 = something + complicated
tmp_1002 = compute(tmp_1001)
for foo in tmp_1002:
tmp_1003 = 1 * 3
bar = foo + tmp_1003
return bar
self.assert_body_anfs_as_expected(expected_result, test_function)
# This test collects several examples where the definition of A-normal form
# implemented by this transformer is questionable. Mostly it's here to spell
# out what the definition is in these cases.
def test_controversial(self):
def test_function(b, c, d, f):
a = c + d
a.b = c + d
a[b] = c + d
a += c + d
a, b = c
a, b = c, d
a = f(c)
a = f(c + d)
a[b + d] = f.e(c + d)
def expected_result(b, c, d, f):
a = c + d
a.b = c + d # Should be a.b = tmp? (Definitely not tmp = c + d)
a[b] = c + d # Should be a[b] = tmp? (Definitely not tmp = c + d)
a += c + d # Should be a += tmp? (Definitely not tmp = c + d)
a, b = c # Should be a = c[0], b = c[1]? Or not?
a, b = c, d # Should be a = c, b = d? Or not?
a = f(c)
tmp_1001 = c + d
a = f(tmp_1001)
tmp_1002 = b + d
tmp_1003 = f.e
tmp_1004 = c + d
a[tmp_1002] = tmp_1003(tmp_1004) # Or should be a[tmp1] = tmp2?
self.assert_body_anfs_as_expected(expected_result, test_function)
| AnfTransformerTest |
python | great-expectations__great_expectations | great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_greater.py | {
"start": 393,
"end": 2074
} | class ____(ColumnPairMapMetricProvider):
condition_metric_name = "column_pair_values.a_greater_than_b"
condition_domain_keys = (
"batch_id",
"table",
"column_A",
"column_B",
"row_condition",
"condition_parser",
"ignore_row_if",
)
condition_value_keys = ("or_equal",)
# noinspection PyPep8Naming
@column_pair_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column_A, column_B, **kwargs):
temp_column_A = column_A
temp_column_B = column_B
or_equal: bool = kwargs.get("or_equal") or False
if or_equal:
return temp_column_A >= temp_column_B
else:
return temp_column_A > temp_column_B
# noinspection PyPep8Naming
@column_pair_condition_partial(engine=SqlAlchemyExecutionEngine)
def _sqlalchemy(cls, column_A, column_B, **kwargs):
or_equal: bool = kwargs.get("or_equal") or False
if or_equal:
return sa.or_(
column_A >= column_B,
sa.and_(column_A == None, column_B == None), # noqa: E711 # FIXME CoP
)
else:
return column_A > column_B
# noinspection PyPep8Naming
@column_pair_condition_partial(engine=SparkDFExecutionEngine)
def _spark(cls, column_A, column_B, **kwargs):
temp_column_A = column_A
temp_column_B = column_B
or_equal: bool = kwargs.get("or_equal") or False
if or_equal:
return (temp_column_A >= temp_column_B) | (temp_column_A.eqNullSafe(temp_column_B))
else:
return temp_column_A > temp_column_B
| ColumnPairValuesAGreaterThanB |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/image_ops/decode_bmp_op_test.py | {
"start": 952,
"end": 3291
} | class ____(test.TestCase):
def testex1(self):
img_bytes = [[[0, 0, 255], [0, 255, 0]], [[255, 0, 0], [255, 255, 255]]]
# Encoded BMP bytes from Wikipedia
# BMP header bytes: https://en.wikipedia.org/wiki/List_of_file_signatures
encoded_bytes = [
0x42, 0x4d,
0x46, 0, 0, 0,
0, 0,
0, 0,
0x36, 0, 0, 0,
0x28, 0, 0, 0,
0x2, 0, 0, 0,
0x2, 0, 0, 0,
0x1, 0,
0x18, 0,
0, 0, 0, 0,
0x10, 0, 0, 0,
0x13, 0xb, 0, 0,
0x13, 0xb, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0xff,
0xff, 0xff, 0xff,
0, 0,
0xff, 0, 0,
0, 0xff, 0,
0, 0,
]
byte_string = bytes(bytearray(encoded_bytes))
img_in = constant_op.constant(byte_string, dtype=dtypes.string)
decode = array_ops.squeeze(image_ops.decode_bmp(img_in))
with self.cached_session():
decoded = self.evaluate(decode)
self.assertAllEqual(decoded, img_bytes)
def testGrayscale(self):
img_bytes = [[[255], [0]], [[255], [0]]]
# BMP header bytes: https://en.wikipedia.org/wiki/List_of_file_signatures
encoded_bytes = [
0x42,
0x4d,
0x3d,
0,
0,
0,
0,
0,
0,
0,
0x36,
0,
0,
0,
0x28,
0,
0,
0,
0x2,
0,
0,
0,
0x2,
0,
0,
0,
0x1,
0,
0x8,
0,
0,
0,
0,
0,
0x10,
0,
0,
0,
0x13,
0xb,
0,
0,
0x13,
0xb,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0xff,
0,
0,
0,
0xff,
0,
0,
0,
]
byte_string = bytes(bytearray(encoded_bytes))
img_in = constant_op.constant(byte_string, dtype=dtypes.string)
# TODO(b/159600494): Currently, `decode_bmp` op does not validate input
# magic bytes.
decode = image_ops.decode_bmp(img_in)
with self.cached_session():
decoded = self.evaluate(decode)
self.assertAllEqual(decoded, img_bytes)
if __name__ == "__main__":
test.main()
| DecodeBmpOpTest |
python | ansible__ansible | lib/ansible/module_utils/_internal/_datatag/__init__.py | {
"start": 16062,
"end": 16380
} | class ____:
"""Marker mixin for types that should raise an error when encountered."""
__slots__ = _NO_INSTANCE_STORAGE
def trip(self) -> t.NoReturn:
"""Derived types should implement a failure behavior."""
raise NotImplementedError()
@dataclasses.dataclass(**_tag_dataclass_kwargs)
| Tripwire |
python | pytorch__pytorch | test/distributed/_composable/fsdp/test_fully_shard_compile.py | {
"start": 1591,
"end": 1971
} | class ____(torch.nn.Module):
def __init__(self):
super().__init__()
self.encoder = torch.nn.Sequential(
torch.nn.Linear(28 * 28, 1024, device=device_type),
torch.nn.Linear(1024, 1024, device=device_type),
torch.nn.Linear(1024, 4096, device=device_type),
)
def forward(self, x):
return self.encoder(x)
| Mod |
python | qdrant__qdrant-client | tools/async_client_generator/remote_generator.py | {
"start": 1134,
"end": 5330
} | class ____(BaseGenerator):
def __init__(
self,
keep_sync: Optional[list[str]] = None,
class_replace_map: Optional[dict] = None,
import_replace_map: Optional[dict] = None,
exclude_methods: Optional[list[str]] = None,
rename_methods: Optional[dict[str, str]] = None,
):
super().__init__()
self._async_methods: Optional[list[str]] = None
self.transformers.append(
RemoteImportFromTransformer(import_replace_map=import_replace_map)
)
self.transformers.append(ClassDefTransformer(class_replace_map=class_replace_map))
self.transformers.append(
CallTransformer(class_replace_map=class_replace_map, async_methods=self.async_methods)
)
self.transformers.append(ImportTransformer(import_replace_map=import_replace_map))
self.transformers.append(
RemoteFunctionDefTransformer(
keep_sync=keep_sync,
exclude_methods=exclude_methods,
async_methods=self.async_methods,
)
)
self.transformers.append(
NameTransformer(
class_replace_map=class_replace_map,
import_replace_map=import_replace_map,
rename_methods=rename_methods,
)
)
@staticmethod
def _get_grpc_methods(grpc_stub_class: type) -> list[str]:
init_source = inspect.getsource(grpc_stub_class)
# Parse the source code using ast
parsed = ast.parse(init_source)
# Extract attribute names
field_names = []
for node in ast.walk(parsed):
if isinstance(node, ast.Assign):
for target in node.targets:
if (
isinstance(target, ast.Attribute)
and isinstance(target.value, ast.Name)
and target.value.id == "self"
):
field_name = target.attr
field_names.append(field_name)
return field_names
@property
def async_methods(self) -> list[str]:
if self._async_methods is None:
self._async_methods = []
for cls_ in (
AsyncQdrantBase,
AsyncDistributedApi,
AsyncCollectionsApi,
AsyncPointsApi,
AsyncServiceApi,
AsyncSnapshotsApi,
AsyncIndexesApi,
AsyncAliasesApi,
AsyncSearchApi,
AsyncApiClient,
):
self._async_methods.extend(self.get_async_methods(cls_))
for cls_ in (PointsStub, SnapshotsStub, CollectionsStub, QdrantStub):
self._async_methods.extend(self._get_grpc_methods(cls_))
return self._async_methods
@staticmethod
def get_async_methods(class_obj: type) -> list[str]:
async_methods = []
for name, method in inspect.getmembers(class_obj):
if inspect.iscoroutinefunction(method):
async_methods.append(name)
return async_methods
if __name__ == "__main__":
from tools.async_client_generator.config import CLIENT_DIR, CODE_DIR
with open(CLIENT_DIR / "qdrant_remote.py", "r") as source_file:
code = source_file.read()
generator = RemoteGenerator(
class_replace_map={
"QdrantBase": "AsyncQdrantBase",
"QdrantFastembedMixin": "AsyncQdrantFastembedMixin",
"QdrantClient": "AsyncQdrantClient",
"QdrantRemote": "AsyncQdrantRemote",
},
import_replace_map={
"qdrant_client.client_base": "qdrant_client.async_client_base",
"QdrantBase": "AsyncQdrantBase",
"QdrantRemote": "AsyncQdrantRemote",
"ApiClient": "AsyncApiClient",
"SyncApis": "AsyncApis",
},
exclude_methods=[
"__del__",
"migrate",
],
)
modified_code = generator.generate(code)
with open(CODE_DIR / "async_qdrant_remote.py", "w") as target_file:
target_file.write(modified_code)
| RemoteGenerator |
python | weaviate__weaviate-python-client | weaviate/cluster/models.py | {
"start": 241,
"end": 514
} | class ____(str, Enum):
"""Enum for replication operation states."""
REGISTERED = "REGISTERED"
HYDRATING = "HYDRATING"
FINALIZING = "FINALIZING"
DEHYDRATING = "DEHYDRATING"
READY = "READY"
CANCELLED = "CANCELLED"
@dataclass
| ReplicateOperationState |
python | huggingface__transformers | tests/utils/test_tokenization_utils.py | {
"start": 1237,
"end": 3229
} | class ____(unittest.TestCase):
def test_cached_files_are_used_when_internet_is_down(self):
# A mock response for an HTTP head request to emulate server down
response_mock = mock.Mock()
response_mock.status_code = 500
response_mock.headers = {}
response_mock.raise_for_status.side_effect = httpx.HTTPStatusError(
"failed", request=mock.Mock(), response=mock.Mock()
)
response_mock.json.return_value = {}
# Download this model to make sure it's in the cache.
_ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("httpx.Client.request", return_value=response_mock) as mock_head:
_ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def test_cached_files_are_used_when_internet_is_down_missing_files(self):
# A mock response for an HTTP head request to emulate server down
response_mock = mock.Mock()
response_mock.status_code = 500
response_mock.headers = {}
response_mock.raise_for_status.side_effect = httpx.HTTPStatusError(
"failed", request=mock.Mock(), response=mock.Mock()
)
response_mock.json.return_value = {}
# Download this model to make sure it's in the cache.
_ = GPT2TokenizerFast.from_pretrained("openai-community/gpt2")
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("httpx.Client.request", return_value=response_mock) as mock_head:
_ = GPT2TokenizerFast.from_pretrained("openai-community/gpt2")
# This check we did call the fake head request
mock_head.assert_called()
@is_staging_test
| TokenizerUtilTester |
python | mlflow__mlflow | mlflow/genai/judges/tools/search_trace_regex.py | {
"start": 1029,
"end": 5969
} | class ____(JudgeTool):
"""
Tool for searching through entire traces using regex patterns.
Performs case-insensitive regex search across all trace fields including
spans, metadata, tags, requests, responses, and other fields. Returns
matched text with surrounding context to help understand where matches occur.
"""
@property
def name(self) -> str:
"""Return the tool name."""
return ToolNames.SEARCH_TRACE_REGEX
def get_definition(self) -> ToolDefinition:
"""Get the tool definition for LiteLLM/OpenAI function calling."""
return ToolDefinition(
function=FunctionToolDefinition(
name=ToolNames.SEARCH_TRACE_REGEX,
description=(
"Search through the entire trace using a regular expression pattern. "
"Performs case-insensitive matching across all trace fields including spans, "
"metadata, tags, requests, and responses. Returns all matches with surrounding "
"context. Useful for finding specific patterns, values, or text anywhere in "
"the trace."
),
parameters=ToolParamsSchema(
type="object",
properties={
"pattern": {
"type": "string",
"description": (
"Regular expression pattern to search for. The search is "
"case-insensitive. Examples: 'error.*timeout', 'user_id:\\s*\\d+', "
"'function_name\\(.*\\)'"
),
},
"max_matches": {
"type": "integer",
"description": "Maximum number of matches to return (default: 50)",
"default": 50,
},
"surrounding_content_length": {
"type": "integer",
"description": (
"Number of characters to include before and after each match "
"for context (default: 100)"
),
"default": 100,
},
},
required=["pattern"],
),
),
type="function",
)
def invoke(
self,
trace: Trace,
pattern: str,
max_matches: int = 50,
surrounding_content_length: int = 100,
) -> SearchTraceRegexResult:
"""
Search through the trace using a regex pattern.
Args:
trace: The MLflow trace object to search through
pattern: Regular expression pattern to search for
max_matches: Maximum number of matches to return
surrounding_content_length: Number of characters to include before and after each
match for context
Returns:
SearchTraceRegexResult containing the search results
"""
try:
regex = re.compile(pattern, re.IGNORECASE)
except re.error as e:
return SearchTraceRegexResult(
pattern=pattern,
total_matches=0,
matches=[],
error=f"Invalid regex pattern: {e}",
)
trace_json = trace.to_json()
matches = []
total_found = 0
for match in regex.finditer(trace_json):
if total_found >= max_matches:
break
matches.append(
self._create_regex_match(
match, trace_json, surrounding_content_length=surrounding_content_length
)
)
total_found += 1
return SearchTraceRegexResult(
pattern=pattern,
total_matches=total_found,
matches=matches,
)
def _create_regex_match(
self,
match: re.Match[str],
text: str,
span_id: str = "trace",
surrounding_content_length: int = 100,
) -> RegexMatch:
"""Create a RegexMatch with surrounding context from a regex match object."""
matched_text = match.group()
start, end = match.span()
context_start = max(0, start - surrounding_content_length)
context_end = min(len(text), end + surrounding_content_length)
surrounding = text[context_start:context_end]
if context_start > 0:
surrounding = "..." + surrounding
if context_end < len(text):
surrounding = surrounding + "..."
return RegexMatch(
span_id=span_id,
matched_text=matched_text,
surrounding_text=surrounding,
)
| SearchTraceRegexTool |
python | tornadoweb__tornado | tornado/options.py | {
"start": 4266,
"end": 4362
} | class ____(Exception):
"""Exception raised by errors in the options module."""
pass
| Error |
python | pypa__pipenv | pipenv/vendor/pexpect/pxssh.py | {
"start": 1845,
"end": 24487
} | class ____ (spawn):
'''This class extends pexpect.spawn to specialize setting up SSH
connections. This adds methods for login, logout, and expecting the shell
prompt. It does various tricky things to handle many situations in the SSH
login process. For example, if the session is your first login, then pxssh
automatically accepts the remote certificate; or if you have public key
authentication setup then pxssh won't wait for the password prompt.
pxssh uses the shell prompt to synchronize output from the remote host. In
order to make this more robust it sets the shell prompt to something more
unique than just $ or #. This should work on most Borne/Bash or Csh style
shells.
Example that runs a few commands on a remote server and prints the result::
from pipenv.vendor.pexpect import pxssh
import getpass
try:
s = pxssh.pxssh()
hostname = raw_input('hostname: ')
username = raw_input('username: ')
password = getpass.getpass('password: ')
s.login(hostname, username, password)
s.sendline('uptime') # run a command
s.prompt() # match the prompt
print(s.before) # print everything before the prompt.
s.sendline('ls -l')
s.prompt()
print(s.before)
s.sendline('df')
s.prompt()
print(s.before)
s.logout()
except pxssh.ExceptionPxssh as e:
print("pxssh failed on login.")
print(e)
Example showing how to specify SSH options::
from pipenv.vendor.pexpect import pxssh
s = pxssh.pxssh(options={
"StrictHostKeyChecking": "no",
"UserKnownHostsFile": "/dev/null"})
...
Note that if you have ssh-agent running while doing development with pxssh
then this can lead to a lot of confusion. Many X display managers (xdm,
gdm, kdm, etc.) will automatically start a GUI agent. You may see a GUI
dialog box popup asking for a password during development. You should turn
off any key agents during testing. The 'force_password' attribute will turn
off public key authentication. This will only work if the remote SSH server
is configured to allow password logins. Example of using 'force_password'
attribute::
s = pxssh.pxssh()
s.force_password = True
hostname = raw_input('hostname: ')
username = raw_input('username: ')
password = getpass.getpass('password: ')
s.login (hostname, username, password)
`debug_command_string` is only for the test suite to confirm that the string
generated for SSH is correct, using this will not allow you to do
anything other than get a string back from `pxssh.pxssh.login()`.
'''
def __init__ (self, timeout=30, maxread=2000, searchwindowsize=None,
logfile=None, cwd=None, env=None, ignore_sighup=True, echo=True,
options={}, encoding=None, codec_errors='strict',
debug_command_string=False, use_poll=False):
spawn.__init__(self, None, timeout=timeout, maxread=maxread,
searchwindowsize=searchwindowsize, logfile=logfile,
cwd=cwd, env=env, ignore_sighup=ignore_sighup, echo=echo,
encoding=encoding, codec_errors=codec_errors, use_poll=use_poll)
self.name = '<pxssh>'
#SUBTLE HACK ALERT! Note that the command that SETS the prompt uses a
#slightly different string than the regular expression to match it. This
#is because when you set the prompt the command will echo back, but we
#don't want to match the echoed command. So if we make the set command
#slightly different than the regex we eliminate the problem. To make the
#set command different we add a backslash in front of $. The $ doesn't
#need to be escaped, but it doesn't hurt and serves to make the set
#prompt command different than the regex.
# used to match the command-line prompt
self.UNIQUE_PROMPT = r"\[PEXPECT\][\$\#] "
self.PROMPT = self.UNIQUE_PROMPT
# used to set shell command-line prompt to UNIQUE_PROMPT.
self.PROMPT_SET_SH = r"PS1='[PEXPECT]\$ '"
self.PROMPT_SET_CSH = r"set prompt='[PEXPECT]\$ '"
self.PROMPT_SET_ZSH = "prompt restore;\nPS1='[PEXPECT]%(!.#.$) '"
self.SSH_OPTS = (" -o 'PubkeyAuthentication=no'")
# Disabling host key checking, makes you vulnerable to MITM attacks.
# + " -o 'StrictHostKeyChecking=no'"
# + " -o 'UserKnownHostsFile /dev/null' ")
# Disabling X11 forwarding gets rid of the annoying SSH_ASKPASS from
# displaying a GUI password dialog. I have not figured out how to
# disable only SSH_ASKPASS without also disabling X11 forwarding.
# Unsetting SSH_ASKPASS on the remote side doesn't disable it! Annoying!
#self.SSH_OPTS = "-x -o 'PubkeyAuthentication=no'"
self.force_password = False
self.debug_command_string = debug_command_string
# User defined SSH options, eg,
# ssh.otions = dict(StrictHostKeyChecking="no",UserKnownHostsFile="/dev/null")
self.options = options
def levenshtein_distance(self, a, b):
'''This calculates the Levenshtein distance between a and b.
'''
n, m = len(a), len(b)
if n > m:
a,b = b,a
n,m = m,n
current = range(n+1)
for i in range(1,m+1):
previous, current = current, [i]+[0]*n
for j in range(1,n+1):
add, delete = previous[j]+1, current[j-1]+1
change = previous[j-1]
if a[j-1] != b[i-1]:
change = change + 1
current[j] = min(add, delete, change)
return current[n]
def try_read_prompt(self, timeout_multiplier):
'''This facilitates using communication timeouts to perform
synchronization as quickly as possible, while supporting high latency
connections with a tunable worst case performance. Fast connections
should be read almost immediately. Worst case performance for this
method is timeout_multiplier * 3 seconds.
'''
# maximum time allowed to read the first response
first_char_timeout = timeout_multiplier * 0.5
# maximum time allowed between subsequent characters
inter_char_timeout = timeout_multiplier * 0.1
# maximum time for reading the entire prompt
total_timeout = timeout_multiplier * 3.0
prompt = self.string_type()
begin = time.time()
expired = 0.0
timeout = first_char_timeout
while expired < total_timeout:
try:
prompt += self.read_nonblocking(size=1, timeout=timeout)
expired = time.time() - begin # updated total time expired
timeout = inter_char_timeout
except TIMEOUT:
break
return prompt
def sync_original_prompt (self, sync_multiplier=1.0):
'''This attempts to find the prompt. Basically, press enter and record
the response; press enter again and record the response; if the two
responses are similar then assume we are at the original prompt.
This can be a slow function. Worst case with the default sync_multiplier
can take 12 seconds. Low latency connections are more likely to fail
with a low sync_multiplier. Best case sync time gets worse with a
high sync multiplier (500 ms with default). '''
# All of these timing pace values are magic.
# I came up with these based on what seemed reliable for
# connecting to a heavily loaded machine I have.
self.sendline()
time.sleep(0.1)
try:
# Clear the buffer before getting the prompt.
self.try_read_prompt(sync_multiplier)
except TIMEOUT:
pass
self.sendline()
x = self.try_read_prompt(sync_multiplier)
self.sendline()
a = self.try_read_prompt(sync_multiplier)
self.sendline()
b = self.try_read_prompt(sync_multiplier)
ld = self.levenshtein_distance(a,b)
len_a = len(a)
if len_a == 0:
return False
if float(ld)/len_a < 0.4:
return True
return False
### TODO: This is getting messy and I'm pretty sure this isn't perfect.
### TODO: I need to draw a flow chart for this.
### TODO: Unit tests for SSH tunnels, remote SSH command exec, disabling original prompt sync
def login (self, server, username=None, password='', terminal_type='ansi',
original_prompt=r"[#$]", login_timeout=10, port=None,
auto_prompt_reset=True, ssh_key=None, quiet=True,
sync_multiplier=1, check_local_ip=True,
password_regex=r'(?i)(?:password:)|(?:passphrase for key)',
ssh_tunnels={}, spawn_local_ssh=True,
sync_original_prompt=True, ssh_config=None, cmd='ssh'):
'''This logs the user into the given server.
It uses 'original_prompt' to try to find the prompt right after login.
When it finds the prompt it immediately tries to reset the prompt to
something more easily matched. The default 'original_prompt' is very
optimistic and is easily fooled. It's more reliable to try to match the original
prompt as exactly as possible to prevent false matches by server
strings such as the "Message Of The Day". On many systems you can
disable the MOTD on the remote server by creating a zero-length file
called :file:`~/.hushlogin` on the remote server. If a prompt cannot be found
then this will not necessarily cause the login to fail. In the case of
a timeout when looking for the prompt we assume that the original
prompt was so weird that we could not match it, so we use a few tricks
to guess when we have reached the prompt. Then we hope for the best and
blindly try to reset the prompt to something more unique. If that fails
then login() raises an :class:`ExceptionPxssh` exception.
In some situations it is not possible or desirable to reset the
original prompt. In this case, pass ``auto_prompt_reset=False`` to
inhibit setting the prompt to the UNIQUE_PROMPT. Remember that pxssh
uses a unique prompt in the :meth:`prompt` method. If the original prompt is
not reset then this will disable the :meth:`prompt` method unless you
manually set the :attr:`PROMPT` attribute.
Set ``password_regex`` if there is a MOTD message with `password` in it.
Changing this is like playing in traffic, don't (p)expect it to match straight
away.
If you require to connect to another SSH server from the your original SSH
connection set ``spawn_local_ssh`` to `False` and this will use your current
session to do so. Setting this option to `False` and not having an active session
will trigger an error.
Set ``ssh_key`` to a file path to an SSH private key to use that SSH key
for the session authentication.
Set ``ssh_key`` to `True` to force passing the current SSH authentication socket
to the desired ``hostname``.
Set ``ssh_config`` to a file path string of an SSH client config file to pass that
file to the client to handle itself. You may set any options you wish in here, however
doing so will require you to post extra information that you may not want to if you
run into issues.
Alter the ``cmd`` to change the ssh client used, or to prepend it with network
namespaces. For example ```cmd="ip netns exec vlan2 ssh"``` to execute the ssh in
network namespace named ```vlan```.
'''
session_regex_array = ["(?i)are you sure you want to continue connecting", original_prompt, password_regex, "(?i)permission denied", "(?i)terminal type", TIMEOUT]
session_init_regex_array = []
session_init_regex_array.extend(session_regex_array)
session_init_regex_array.extend(["(?i)connection closed by remote host", EOF])
ssh_options = ''.join([" -o '%s=%s'" % (o, v) for (o, v) in self.options.items()])
if quiet:
ssh_options = ssh_options + ' -q'
if not check_local_ip:
ssh_options = ssh_options + " -o'NoHostAuthenticationForLocalhost=yes'"
if self.force_password:
ssh_options = ssh_options + ' ' + self.SSH_OPTS
if ssh_config is not None:
if spawn_local_ssh and not os.path.isfile(ssh_config):
raise ExceptionPxssh('SSH config does not exist or is not a file.')
ssh_options = ssh_options + ' -F ' + ssh_config
if port is not None:
ssh_options = ssh_options + ' -p %s'%(str(port))
if ssh_key is not None:
# Allow forwarding our SSH key to the current session
if ssh_key==True:
ssh_options = ssh_options + ' -A'
else:
if spawn_local_ssh and not os.path.isfile(ssh_key):
raise ExceptionPxssh('private ssh key does not exist or is not a file.')
ssh_options = ssh_options + ' -i %s' % (ssh_key)
# SSH tunnels, make sure you know what you're putting into the lists
# under each heading. Do not expect these to open 100% of the time,
# The port you're requesting might be bound.
#
# The structure should be like this:
# { 'local': ['2424:localhost:22'], # Local SSH tunnels
# 'remote': ['2525:localhost:22'], # Remote SSH tunnels
# 'dynamic': [8888] } # Dynamic/SOCKS tunnels
if ssh_tunnels!={} and isinstance({},type(ssh_tunnels)):
tunnel_types = {
'local':'L',
'remote':'R',
'dynamic':'D'
}
for tunnel_type in tunnel_types:
cmd_type = tunnel_types[tunnel_type]
if tunnel_type in ssh_tunnels:
tunnels = ssh_tunnels[tunnel_type]
for tunnel in tunnels:
if spawn_local_ssh==False:
tunnel = quote(str(tunnel))
ssh_options = ssh_options + ' -' + cmd_type + ' ' + str(tunnel)
if username is not None:
ssh_options = ssh_options + ' -l ' + username
elif ssh_config is None:
raise TypeError('login() needs either a username or an ssh_config')
else: # make sure ssh_config has an entry for the server with a username
with open(ssh_config, 'rt') as f:
lines = [l.strip() for l in f.readlines()]
server_regex = r'^Host\s+%s\s*$' % server
user_regex = r'^User\s+\w+\s*$'
config_has_server = False
server_has_username = False
for line in lines:
if not config_has_server and re.match(server_regex, line, re.IGNORECASE):
config_has_server = True
elif config_has_server and 'hostname' in line.lower():
pass
elif config_has_server and 'host' in line.lower():
server_has_username = False # insurance
break # we have left the relevant section
elif config_has_server and re.match(user_regex, line, re.IGNORECASE):
server_has_username = True
break
if lines:
del line
del lines
if not config_has_server:
raise TypeError('login() ssh_config has no Host entry for %s' % server)
elif not server_has_username:
raise TypeError('login() ssh_config has no user entry for %s' % server)
cmd += " %s %s" % (ssh_options, server)
if self.debug_command_string:
return(cmd)
# Are we asking for a local ssh command or to spawn one in another session?
if spawn_local_ssh:
spawn._spawn(self, cmd)
else:
self.sendline(cmd)
# This does not distinguish between a remote server 'password' prompt
# and a local ssh 'passphrase' prompt (for unlocking a private key).
i = self.expect(session_init_regex_array, timeout=login_timeout)
# First phase
if i==0:
# New certificate -- always accept it.
# This is what you get if SSH does not have the remote host's
# public key stored in the 'known_hosts' cache.
self.sendline("yes")
i = self.expect(session_regex_array)
if i==2: # password or passphrase
self.sendline(password)
i = self.expect(session_regex_array)
if i==4:
self.sendline(terminal_type)
i = self.expect(session_regex_array)
if i==7:
self.close()
raise ExceptionPxssh('Could not establish connection to host')
# Second phase
if i==0:
# This is weird. This should not happen twice in a row.
self.close()
raise ExceptionPxssh('Weird error. Got "are you sure" prompt twice.')
elif i==1: # can occur if you have a public key pair set to authenticate.
### TODO: May NOT be OK if expect() got tricked and matched a false prompt.
pass
elif i==2: # password prompt again
# For incorrect passwords, some ssh servers will
# ask for the password again, others return 'denied' right away.
# If we get the password prompt again then this means
# we didn't get the password right the first time.
self.close()
raise ExceptionPxssh('password refused')
elif i==3: # permission denied -- password was bad.
self.close()
raise ExceptionPxssh('permission denied')
elif i==4: # terminal type again? WTF?
self.close()
raise ExceptionPxssh('Weird error. Got "terminal type" prompt twice.')
elif i==5: # Timeout
#This is tricky... I presume that we are at the command-line prompt.
#It may be that the shell prompt was so weird that we couldn't match
#it. Or it may be that we couldn't log in for some other reason. I
#can't be sure, but it's safe to guess that we did login because if
#I presume wrong and we are not logged in then this should be caught
#later when I try to set the shell prompt.
pass
elif i==6: # Connection closed by remote host
self.close()
raise ExceptionPxssh('connection closed')
else: # Unexpected
self.close()
raise ExceptionPxssh('unexpected login response')
if sync_original_prompt:
if not self.sync_original_prompt(sync_multiplier):
self.close()
raise ExceptionPxssh('could not synchronize with original prompt')
# We appear to be in.
# set shell prompt to something unique.
if auto_prompt_reset:
if not self.set_unique_prompt():
self.close()
raise ExceptionPxssh('could not set shell prompt '
'(received: %r, expected: %r).' % (
self.before, self.PROMPT,))
return True
def logout (self):
'''Sends exit to the remote shell.
If there are stopped jobs then this automatically sends exit twice.
'''
self.sendline("exit")
index = self.expect([EOF, "(?i)there are stopped jobs"])
if index==1:
self.sendline("exit")
self.expect(EOF)
self.close()
def prompt(self, timeout=-1):
'''Match the next shell prompt.
This is little more than a short-cut to the :meth:`~pexpect.spawn.expect`
method. Note that if you called :meth:`login` with
``auto_prompt_reset=False``, then before calling :meth:`prompt` you must
set the :attr:`PROMPT` attribute to a regex that it will use for
matching the prompt.
Calling :meth:`prompt` will erase the contents of the :attr:`before`
attribute even if no prompt is ever matched. If timeout is not given or
it is set to -1 then self.timeout is used.
:return: True if the shell prompt was matched, False if the timeout was
reached.
'''
if timeout == -1:
timeout = self.timeout
i = self.expect([self.PROMPT, TIMEOUT], timeout=timeout)
if i==1:
return False
return True
def set_unique_prompt(self):
'''This sets the remote prompt to something more unique than ``#`` or ``$``.
This makes it easier for the :meth:`prompt` method to match the shell prompt
unambiguously. This method is called automatically by the :meth:`login`
method, but you may want to call it manually if you somehow reset the
shell prompt. For example, if you 'su' to a different user then you
will need to manually reset the prompt. This sends shell commands to
the remote host to set the prompt, so this assumes the remote host is
ready to receive commands.
Alternatively, you may use your own prompt pattern. In this case you
should call :meth:`login` with ``auto_prompt_reset=False``; then set the
:attr:`PROMPT` attribute to a regular expression. After that, the
:meth:`prompt` method will try to match your prompt pattern.
'''
self.sendline("unset PROMPT_COMMAND")
self.sendline(self.PROMPT_SET_SH) # sh-style
i = self.expect ([TIMEOUT, self.PROMPT], timeout=10)
if i == 0: # csh-style
self.sendline(self.PROMPT_SET_CSH)
i = self.expect([TIMEOUT, self.PROMPT], timeout=10)
if i == 0: # zsh-style
self.sendline(self.PROMPT_SET_ZSH)
i = self.expect([TIMEOUT, self.PROMPT], timeout=10)
if i == 0:
return False
return True
# vi:ts=4:sw=4:expandtab:ft=python:
| pxssh |
python | django__django | tests/template_tests/syntax_tests/test_numpy.py | {
"start": 229,
"end": 1174
} | class ____(SimpleTestCase):
@setup({"numpy-array-index01": "{{ var.1 }}"})
def test_numpy_array_index01(self):
"""
Numpy's array-index syntax allows a template to access a certain
item of a subscriptable object.
"""
output = self.engine.render_to_string(
"numpy-array-index01",
{"var": numpy.array(["first item", "second item"])},
)
self.assertEqual(output, "second item")
@setup({"numpy-array-index02": "{{ var.5 }}"})
def test_numpy_array_index02(self):
"""
Fail silently when the array index is out of range.
"""
output = self.engine.render_to_string(
"numpy-array-index02",
{"var": numpy.array(["first item", "second item"])},
)
if self.engine.string_if_invalid:
self.assertEqual(output, "INVALID")
else:
self.assertEqual(output, "")
| NumpyTests |
python | scipy__scipy | scipy/constants/_codata.py | {
"start": 198639,
"end": 202549
} | class ____(DeprecationWarning):
"""Accessing a constant no longer in current CODATA data set"""
pass
def _check_obsolete(key: str) -> None:
if key in _obsolete_constants and key not in _aliases:
warnings.warn(f"Constant '{key}' is not in current {_current_codata} data set",
ConstantWarning, stacklevel=3)
@xp_capabilities(out_of_scope=True)
def value(key: str) -> float:
"""
Value in physical_constants indexed by key
Parameters
----------
key : Python string
Key in dictionary `physical_constants`
Returns
-------
value : float
Value in `physical_constants` corresponding to `key`
Examples
--------
>>> from scipy import constants
>>> constants.value('elementary charge')
1.602176634e-19
"""
_check_obsolete(key)
return physical_constants[key][0]
@xp_capabilities(out_of_scope=True)
def unit(key: str) -> str:
"""
Unit in physical_constants indexed by key
Parameters
----------
key : Python string
Key in dictionary `physical_constants`
Returns
-------
unit : Python string
Unit in `physical_constants` corresponding to `key`
Examples
--------
>>> from scipy import constants
>>> constants.unit('proton mass')
'kg'
"""
_check_obsolete(key)
return physical_constants[key][1]
@xp_capabilities(out_of_scope=True)
def precision(key: str) -> float:
"""
Relative precision in physical_constants indexed by key
Parameters
----------
key : Python string
Key in dictionary `physical_constants`
Returns
-------
prec : float
Relative precision in `physical_constants` corresponding to `key`
Examples
--------
>>> from scipy import constants
>>> constants.precision('proton mass')
5.1e-37
"""
_check_obsolete(key)
return physical_constants[key][2] / physical_constants[key][0]
@xp_capabilities(out_of_scope=True)
def find(sub: str | None = None, disp: bool = False) -> Any:
"""
Return list of physical_constant keys containing a given string.
Parameters
----------
sub : str
Sub-string to search keys for. By default, return all keys.
disp : bool
If True, print the keys that are found and return None.
Otherwise, return the list of keys without printing anything.
Returns
-------
keys : list or None
If `disp` is False, the list of keys is returned.
Otherwise, None is returned.
Examples
--------
>>> from scipy.constants import find, physical_constants
Which keys in the ``physical_constants`` dictionary contain 'boltzmann'?
>>> find('boltzmann')
['Boltzmann constant',
'Boltzmann constant in Hz/K',
'Boltzmann constant in eV/K',
'Boltzmann constant in inverse meter per kelvin',
'Stefan-Boltzmann constant']
Get the constant called 'Boltzmann constant in Hz/K':
>>> physical_constants['Boltzmann constant in Hz/K']
(20836619120.0, 'Hz K^-1', 0.0)
Find constants with 'radius' in the key:
>>> find('radius')
['Bohr radius',
'alpha particle rms charge radius',
'classical electron radius',
'deuteron rms charge radius',
'proton rms charge radius']
>>> physical_constants['classical electron radius']
(2.8179403262e-15, 'm', 1.3e-24)
"""
if sub is None:
result = list(_current_constants.keys())
else:
result = [key for key in _current_constants
if sub.lower() in key.lower()]
result.sort()
if disp:
for key in result:
print(key)
return
else:
return result
# This is not used here, but it must be defined to pass
# scipy/_lib/tests/test_public_api.py::test_private_but_present_deprecation
c = value('speed of light in vacuum')
| ConstantWarning |
python | walkccc__LeetCode | solutions/1356. Sort Integers by The Number of 1 Bits/1356.py | {
"start": 0,
"end": 126
} | class ____:
def sortByBits(self, arr: list[int]) -> list[int]:
return sorted(arr, key=lambda x: (x.bit_count(), x))
| Solution |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.