language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/channels.py
|
{
"start": 360782,
"end": 370565
}
|
class ____(FieldChannelMixin, core.SecondaryFieldDef):
r"""
Longitude2 schema wrapper.
A field definition of a secondary channel that shares a scale with another primary channel.
For example, ``x2``, ``xError`` and ``xError2`` share the same scale with ``x``.
Parameters
----------
shorthand : str, dict, Sequence[str], :class:`RepeatRef`
shorthand for field, aggregate, and type
aggregate : dict, :class:`Aggregate`, :class:`ArgmaxDef`, :class:`ArgminDef`, :class:`NonArgAggregateOp`, Literal['average', 'count', 'distinct', 'max', 'mean', 'median', 'min', 'missing', 'product', 'q1', 'q3', 'ci0', 'ci1', 'stderr', 'stdev', 'stdevp', 'sum', 'valid', 'values', 'variance', 'variancep', 'exponential', 'exponentialb']
Aggregation function for the field (e.g., ``"mean"``, ``"sum"``, ``"median"``,
``"min"``, ``"max"``, ``"count"``).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bandPosition : float
Relative position on a band of a stacked, binned, time unit, or band scale. For
example, the marks will be positioned at the beginning of the band if set to ``0``,
and at the middle of the band if set to ``0.5``.
bin : None
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__, or indicating
that the data for ``x`` or ``y`` channel are binned before they are imported into
Vega-Lite (``"binned"``).
* If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__ will be
applied.
* If ``"binned"``, this indicates that the data for the ``x`` (or ``y``) channel are
already binned. You can map the bin-start field to ``x`` (or ``y``) and the
bin-end field to ``x2`` (or ``y2``). The scale and axis will be formatted similar
to binning in Vega-Lite. To adjust the axis ticks based on the bin step, you can
also set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
field : str, dict, :class:`Field`, :class:`FieldName`, :class:`RepeatRef`
**Required.** A string defining the name of the field from which to pull a data
value or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:** 1) Dots (``.``) and brackets (``[`` and ``]``) can be used to access
nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"``). If
field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"``). See more details
about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__. 2) ``field`` is not required
if ``aggregate`` is ``count``.
timeUnit : dict, :class:`TimeUnit`, :class:`MultiTimeUnit`, :class:`BinnedTimeUnit`, :class:`SingleTimeUnit`, :class:`TimeUnitParams`, :class:`UtcMultiTimeUnit`, :class:`UtcSingleTimeUnit`, :class:`LocalMultiTimeUnit`, :class:`LocalSingleTimeUnit`, Literal['binnedyear', 'binnedyearquarter', 'binnedyearquartermonth', 'binnedyearmonth', 'binnedyearmonthdate', 'binnedyearmonthdatehours', 'binnedyearmonthdatehoursminutes', 'binnedyearmonthdatehoursminutesseconds', 'binnedyearweek', 'binnedyearweekday', 'binnedyearweekdayhours', 'binnedyearweekdayhoursminutes', 'binnedyearweekdayhoursminutesseconds', 'binnedyeardayofyear', 'binnedutcyear', 'binnedutcyearquarter', 'binnedutcyearquartermonth', 'binnedutcyearmonth', 'binnedutcyearmonthdate', 'binnedutcyearmonthdatehours', 'binnedutcyearmonthdatehoursminutes', 'binnedutcyearmonthdatehoursminutesseconds', 'binnedutcyearweek', 'binnedutcyearweekday', 'binnedutcyearweekdayhours', 'binnedutcyearweekdayhoursminutes', 'binnedutcyearweekdayhoursminutesseconds', 'binnedutcyeardayofyear', 'utcyear', 'utcquarter', 'utcmonth', 'utcweek', 'utcday', 'utcdayofyear', 'utcdate', 'utchours', 'utcminutes', 'utcseconds', 'utcmilliseconds', 'year', 'quarter', 'month', 'week', 'day', 'dayofyear', 'date', 'hours', 'minutes', 'seconds', 'milliseconds', 'utcyearquarter', 'utcyearquartermonth', 'utcyearmonth', 'utcyearmonthdate', 'utcyearmonthdatehours', 'utcyearmonthdatehoursminutes', 'utcyearmonthdatehoursminutesseconds', 'utcyearweek', 'utcyearweekday', 'utcyearweekdayhours', 'utcyearweekdayhoursminutes', 'utcyearweekdayhoursminutesseconds', 'utcyeardayofyear', 'utcquartermonth', 'utcmonthdate', 'utcmonthdatehours', 'utcmonthdatehoursminutes', 'utcmonthdatehoursminutesseconds', 'utcweekday', 'utcweekdayhours', 'utcweekdayhoursminutes', 'utcweekdayhoursminutesseconds', 'utcdayhours', 'utcdayhoursminutes', 'utcdayhoursminutesseconds', 'utchoursminutes', 'utchoursminutesseconds', 'utcminutesseconds', 'utcsecondsmilliseconds', 'yearquarter', 'yearquartermonth', 'yearmonth', 'yearmonthdate', 'yearmonthdatehours', 'yearmonthdatehoursminutes', 'yearmonthdatehoursminutesseconds', 'yearweek', 'yearweekday', 'yearweekdayhours', 'yearweekdayhoursminutes', 'yearweekdayhoursminutesseconds', 'yeardayofyear', 'quartermonth', 'monthdate', 'monthdatehours', 'monthdatehoursminutes', 'monthdatehoursminutesseconds', 'weekday', 'weekdayhours', 'weekdayhoursminutes', 'weekdayhoursminutesseconds', 'dayhours', 'dayhoursminutes', 'dayhoursminutesseconds', 'hoursminutes', 'hoursminutesseconds', 'minutesseconds', 'secondsmilliseconds']
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours``) for a temporal
field. or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : str, :class:`Text`, Sequence[str], None
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function
(``aggregate``, ``bin`` and ``timeUnit``). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"``). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"``).
Otherwise, the title is simply the field name.
**Notes**:
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/usage/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "longitude2"
@overload
def aggregate(self, _: NonArgAggregateOp_T, /) -> Longitude2: ...
@overload
def aggregate(
self, *, argmax: Optional[str | SchemaBase] = Undefined
) -> Longitude2: ...
@overload
def aggregate(
self, *, argmin: Optional[str | SchemaBase] = Undefined
) -> Longitude2: ...
@overload
def bandPosition(self, _: float, /) -> Longitude2: ...
@overload
def bin(self, _: None, /) -> Longitude2: ...
@overload
def field(self, _: str | RepeatRef, /) -> Longitude2: ...
@overload
def field(
self,
*,
repeat: Optional[Literal["row", "column", "repeat", "layer"]] = Undefined,
) -> Longitude2: ...
@overload
def timeUnit(
self,
_: TimeUnitParams | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T,
/,
) -> Longitude2: ...
@overload
def timeUnit(
self,
*,
binned: Optional[bool] = Undefined,
maxbins: Optional[float] = Undefined,
step: Optional[float] = Undefined,
unit: Optional[SchemaBase | MultiTimeUnit_T | SingleTimeUnit_T] = Undefined,
utc: Optional[bool] = Undefined,
) -> Longitude2: ...
@overload
def title(self, _: str | Sequence[str] | None, /) -> Longitude2: ...
def __init__(
self,
shorthand: Optional[str | SchemaBase | Sequence[str] | Map] = Undefined,
aggregate: Optional[SchemaBase | Map | NonArgAggregateOp_T] = Undefined,
bandPosition: Optional[float] = Undefined,
bin: Optional[None] = Undefined,
field: Optional[str | SchemaBase | Map] = Undefined,
timeUnit: Optional[
SchemaBase | Map | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T
] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
**kwds,
):
super().__init__(
shorthand=shorthand,
aggregate=aggregate,
bandPosition=bandPosition,
bin=bin,
field=field,
timeUnit=timeUnit,
title=title,
**kwds,
)
@with_property_setters
|
Longitude2
|
python
|
pytorch__pytorch
|
test/dynamo/test_compile.py
|
{
"start": 7145,
"end": 8369
}
|
class ____(TestCase):
def check_signature(self, public_fn_name, private_fn_name, private_namespace):
public_fn = getattr(torch.compiler, public_fn_name)
private_fn = getattr(private_namespace, private_fn_name)
public_sig = inspect.signature(public_fn)
private_sig = inspect.signature(private_fn)
matching = public_sig == private_sig
matching |= len(public_sig.parameters) < len(private_sig.parameters) and all(
public == private
for public, private in zip(
public_sig.parameters.items(), private_sig.parameters.items()
)
)
self.assertEqual(
matching,
True,
f"Signatures do not match for function {public_fn_name}() \n Public: {public_sig} \n Private: {private_sig}",
)
def test_dynamo_signatures(self):
function_names = [
"reset",
"allow_in_graph",
"list_backends",
"assume_constant_result",
"disable",
]
for fn_name in function_names:
self.check_signature(fn_name, fn_name, torch._dynamo)
if __name__ == "__main__":
run_tests()
|
PublicTorchCompilerTests
|
python
|
walkccc__LeetCode
|
solutions/2351. First Letter to Appear Twice/2351.py
|
{
"start": 0,
"end": 191
}
|
class ____:
def repeatedCharacter(self, s: str) -> str:
seen = [False] * 26
for c in s:
if seen[ord(c) - ord('a')]:
return c
seen[ord(c) - ord('a')] = True
|
Solution
|
python
|
getsentry__sentry
|
src/sentry/api/endpoints/organization_events_has_measurements.py
|
{
"start": 1137,
"end": 1684
}
|
class ____(serializers.Serializer):
transaction = serializers.CharField(max_length=200)
type = serializers.ChoiceField(choices=list(MEASUREMENT_TYPES.keys()))
def validate(self, data):
# only allow one project at a time in order to cache the results
# for a unique transaction
project_ids = self.context.get("project_ids", [])
if len(project_ids) != 1:
raise serializers.ValidationError("Only 1 project allowed.")
return data
@region_silo_endpoint
|
EventsHasMeasurementsQuerySerializer
|
python
|
getsentry__sentry
|
tests/sentry/web/frontend/test_openidtoken.py
|
{
"start": 346,
"end": 5408
}
|
class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.application = ApiApplication.objects.create(
owner=self.user, redirect_uris="https://example.com"
)
self.id_token = OpenIDToken("ex_client_id", self.user.id, "shared_secret", nonce="abcd")
def test_get_user_details_no_scope(self) -> None:
grant_no_scopes = ApiGrant.objects.create(
user=self.user,
application=self.application,
redirect_uri="https://example.com",
scope_list=["openid"],
)
user_details = self.id_token._get_user_details(grant_no_scopes)
assert user_details == {}
def test_get_user_details_profile_scope(self) -> None:
grant_profile_scope = ApiGrant.objects.create(
user=self.user,
application=self.application,
redirect_uri="https://example.com",
scope_list=["openid", "profile"],
)
user_details = self.id_token._get_user_details(grant_profile_scope)
assert user_details == {
"name": grant_profile_scope.user.name,
"avatar_type": grant_profile_scope.user.avatar_type,
"avatar_url": grant_profile_scope.user.avatar_url,
"date_joined": str(grant_profile_scope.user.date_joined),
}
def test_get_user_details_email_scope(self) -> None:
grant_email_scope = ApiGrant.objects.create(
user=self.user,
application=self.application,
redirect_uri="https://example.com",
scope_list=["openid", "email"],
)
user_details = self.id_token._get_user_details(grant_email_scope)
assert user_details == {"email": "admin@localhost", "email_verified": True}
def test_get_user_details_multiple_scopes(self) -> None:
grant_multiple_scopes = ApiGrant.objects.create(
user=self.user,
application=self.application,
redirect_uri="https://example.com",
scope_list=["openid", "email", "profile"],
)
user_details = self.id_token._get_user_details(grant_multiple_scopes)
assert user_details == {
"email": "admin@localhost",
"email_verified": True,
"name": grant_multiple_scopes.user.name,
"avatar_type": grant_multiple_scopes.user.avatar_type,
"avatar_url": grant_multiple_scopes.user.avatar_url,
"date_joined": str(grant_multiple_scopes.user.date_joined),
}
def test_get_signed_id_token_no_scopes(self) -> None:
grant = ApiGrant.objects.create(
user=self.user,
application=self.application,
redirect_uri="https://example.com",
scope_list=["openid"],
)
id_token = OpenIDToken("ex_client_id", self.user.id, "shared_secret", nonce="abcd")
encrypted_id_token = id_token.get_signed_id_token(grant)
assert encrypted_id_token.count(".") == 2
decrypted_id_token = jwt_utils.decode(
encrypted_id_token, "shared_secret", audience="ex_client_id"
)
now = datetime.now()
current_timestamp = datetime.timestamp(now)
assert decrypted_id_token["aud"] == "ex_client_id"
assert decrypted_id_token["iss"] == "https://sentry.io"
assert decrypted_id_token["nonce"] == "abcd"
assert isinstance(decrypted_id_token["sub"], int)
assert decrypted_id_token["exp"] > current_timestamp
assert decrypted_id_token["iat"] < current_timestamp
def test_get_signed_id_token_with_scopes(self) -> None:
grant = ApiGrant.objects.create(
user=self.user,
application=self.application,
redirect_uri="https://example.com",
scope_list=["openid", "profile", "email"],
)
id_token = OpenIDToken("ex_client_id", self.user.id, "shared_secret", nonce="abcd")
encrypted_id_token = id_token.get_signed_id_token(grant)
assert encrypted_id_token.count(".") == 2
decrypted_id_token = jwt_utils.decode(
encrypted_id_token, "shared_secret", audience="ex_client_id"
)
now = datetime.now()
current_timestamp = datetime.timestamp(now)
assert decrypted_id_token["aud"] == "ex_client_id"
assert decrypted_id_token["iss"] == "https://sentry.io"
assert decrypted_id_token["nonce"] == "abcd"
assert isinstance(decrypted_id_token["sub"], int)
assert decrypted_id_token["exp"] > current_timestamp
assert decrypted_id_token["iat"] < current_timestamp
assert decrypted_id_token["email"] == "admin@localhost"
assert decrypted_id_token["email_verified"] is True
assert decrypted_id_token["name"] == grant.user.name
assert decrypted_id_token["avatar_type"] == grant.user.avatar_type
assert decrypted_id_token["avatar_url"] == grant.user.avatar_url
assert decrypted_id_token["date_joined"] == str(grant.user.date_joined)
|
OpenIDTokenTest
|
python
|
vyperlang__vyper
|
vyper/ast/nodes.py
|
{
"start": 21110,
"end": 22034
}
|
class ____(TopLevel):
# metadata
__slots__ = ("path", "resolved_path", "source_id", "is_interface", "settings")
"""
settings: Settings
settings result from parsing the compiler pragmas in the file.
"""
_special_decoders = {"settings": Settings.from_dict}
def to_dict(self):
return dict(source_sha256sum=self.source_sha256sum, **super().to_dict())
@property
def source_sha256sum(self):
return sha256sum(self.full_source_code)
@contextlib.contextmanager
def namespace(self):
from vyper.semantics.namespace import get_namespace, override_global_namespace
# kludge implementation for backwards compatibility.
# TODO: replace with type_from_ast
try:
ns = self._metadata["namespace"]
except AttributeError:
ns = get_namespace()
with override_global_namespace(ns):
yield
|
Module
|
python
|
agronholm__apscheduler
|
tests/test_marshalling.py
|
{
"start": 619,
"end": 2268
}
|
class ____:
@pytest.mark.parametrize(
"obj, error",
[
(partial(DummyClass.meth), "Cannot create a reference to a partial()"),
(lambda: None, "Cannot create a reference to a lambda"),
],
ids=["partial", "lambda"],
)
def test_errors(self, obj, error):
exc = pytest.raises(SerializationError, callable_to_ref, obj)
assert str(exc.value) == error
def test_nested_function_error(self):
def nested():
pass
exc = pytest.raises(SerializationError, callable_to_ref, nested)
assert str(exc.value) == "Cannot create a reference to a nested function"
@pytest.mark.parametrize(
"input,expected",
[
(DummyClass.meth, "test_marshalling:DummyClass.meth"),
(DummyClass.classmeth, "test_marshalling:DummyClass.classmeth"),
(
DummyClass.InnerDummyClass.innerclassmeth,
"test_marshalling:DummyClass.InnerDummyClass.innerclassmeth",
),
(DummyClass.staticmeth, "test_marshalling:DummyClass.staticmeth"),
(
InheritedDummyClass.classmeth,
"test_marshalling:InheritedDummyClass.classmeth",
),
(timedelta, "datetime:timedelta"),
],
ids=[
"unbound method",
"class method",
"inner class method",
"static method",
"inherited class method",
"timedelta",
],
)
def test_valid_refs(self, input, expected):
assert callable_to_ref(input) == expected
|
TestCallableToRef
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/links/test_alloy_db.py
|
{
"start": 2048,
"end": 2347
}
|
class ____:
def test_class_attributes(self):
assert AlloyDBUsersLink.key == EXPECTED_ALLOY_DB_USERS_LINK_KEY
assert AlloyDBUsersLink.name == EXPECTED_ALLOY_DB_USERS_LINK_NAME
assert AlloyDBUsersLink.format_str == EXPECTED_ALLOY_DB_USERS_LINK_FORMAT_STR
|
TestAlloyDBUsersLink
|
python
|
tornadoweb__tornado
|
tornado/test/locale_test.py
|
{
"start": 3337,
"end": 6359
}
|
class ____(unittest.TestCase):
def test_format_date(self):
locale = tornado.locale.get("en_US")
date = datetime.datetime(2013, 4, 28, 18, 35)
self.assertEqual(
locale.format_date(date, full_format=True), "April 28, 2013 at 6:35 pm"
)
aware_dt = datetime.datetime.now(datetime.timezone.utc)
naive_dt = aware_dt.replace(tzinfo=None)
for name, now in {"aware": aware_dt, "naive": naive_dt}.items():
with self.subTest(dt=name):
self.assertEqual(
locale.format_date(
now - datetime.timedelta(seconds=2), full_format=False
),
"2 seconds ago",
)
self.assertEqual(
locale.format_date(
now - datetime.timedelta(minutes=2), full_format=False
),
"2 minutes ago",
)
self.assertEqual(
locale.format_date(
now - datetime.timedelta(hours=2), full_format=False
),
"2 hours ago",
)
self.assertEqual(
locale.format_date(
now - datetime.timedelta(days=1),
full_format=False,
shorter=True,
),
"yesterday",
)
date = now - datetime.timedelta(days=2)
self.assertEqual(
locale.format_date(date, full_format=False, shorter=True),
locale._weekdays[date.weekday()],
)
date = now - datetime.timedelta(days=300)
self.assertEqual(
locale.format_date(date, full_format=False, shorter=True),
"%s %d" % (locale._months[date.month - 1], date.day),
)
date = now - datetime.timedelta(days=500)
self.assertEqual(
locale.format_date(date, full_format=False, shorter=True),
"%s %d, %d" % (locale._months[date.month - 1], date.day, date.year),
)
def test_friendly_number(self):
locale = tornado.locale.get("en_US")
self.assertEqual(locale.friendly_number(1000000), "1,000,000")
def test_list(self):
locale = tornado.locale.get("en_US")
self.assertEqual(locale.list([]), "")
self.assertEqual(locale.list(["A"]), "A")
self.assertEqual(locale.list(["A", "B"]), "A and B")
self.assertEqual(locale.list(["A", "B", "C"]), "A, B and C")
def test_format_day(self):
locale = tornado.locale.get("en_US")
date = datetime.datetime(2013, 4, 28, 18, 35)
self.assertEqual(locale.format_day(date=date, dow=True), "Sunday, April 28")
self.assertEqual(locale.format_day(date=date, dow=False), "April 28")
|
EnglishTest
|
python
|
joke2k__faker
|
faker/providers/date_time/fil_PH/__init__.py
|
{
"start": 46,
"end": 829
}
|
class ____(DateTimeProvider):
"""Provider for datetimes for fil_PH locale"""
DAY_NAMES = {
"0": "Linggo",
"1": "Lunes",
"2": "Martes",
"3": "Miyerkules",
"4": "Huwebes",
"5": "Biyernes",
"6": "Sabado",
}
MONTH_NAMES = {
"01": "Enero",
"02": "Pebrero",
"03": "Marso",
"04": "Abril",
"05": "Mayo",
"06": "Hunyo",
"07": "Hulyo",
"08": "Agosto",
"09": "Setyembre",
"10": "Oktubre",
"11": "Nobyembre",
"12": "Disyembre",
}
def day_of_week(self):
day = self.date("%w")
return self.DAY_NAMES[day]
def month_name(self):
month = self.month()
return self.MONTH_NAMES[month]
|
Provider
|
python
|
apache__airflow
|
providers/amazon/tests/unit/amazon/aws/hooks/test_opensearch_serverless.py
|
{
"start": 917,
"end": 1199
}
|
class ____:
def test_opensearch_serverless_hook(self):
hook = OpenSearchServerlessHook()
service_name = "opensearchserverless"
assert hook.conn is not None
assert hook.conn.meta.service_model.service_name == service_name
|
TestOpenSearchServerlessHook
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/distributions/util_test.py
|
{
"start": 24318,
"end": 28412
}
|
class ____(test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(42)
def _fill_triangular(self, x, upper=False):
"""Numpy implementation of `fill_triangular`."""
x = np.asarray(x)
# Formula derived by solving for n: m = n(n+1)/2.
m = np.int32(x.shape[-1])
n = np.sqrt(0.25 + 2. * m) - 0.5
if n != np.floor(n):
raise ValueError("Invalid shape.")
n = np.int32(n)
# We can't do: `x[..., -(n**2-m):]` because this doesn't correctly handle
# `m == n == 1`. Hence, we do absolute indexing.
x_tail = x[..., (m - (n * n - m)):]
y = np.concatenate(
[x, x_tail[..., ::-1]] if upper else [x_tail, x[..., ::-1]],
axis=-1)
y = y.reshape(np.concatenate([
np.int32(x.shape[:-1]),
np.int32([n, n]),
], axis=0))
return np.triu(y) if upper else np.tril(y)
def _run_test(self, x_, use_deferred_shape=False, **kwargs):
x_ = np.asarray(x_)
with self.cached_session() as sess:
static_shape = None if use_deferred_shape else x_.shape
x_pl = array_ops.placeholder_with_default(x_, shape=static_shape)
# Add `zeros_like(x)` such that x's value and gradient are identical. We
# do this so we can ensure each gradient value is mapped to the right
# gradient location. (Not doing this means the gradient wrt `x` is simple
# `ones_like(x)`.)
# Note:
# zeros_like_x_pl == zeros_like(x_pl)
# gradient(zeros_like_x_pl, x_pl) == x_pl - 1
zeros_like_x_pl = (x_pl * array_ops.stop_gradient(x_pl - 1.)
- array_ops.stop_gradient(x_pl * (x_pl - 1.)))
x = x_pl + zeros_like_x_pl
actual = du.fill_triangular(x, **kwargs)
grad_actual = gradients_impl.gradients(actual, x_pl)[0]
[actual_, grad_actual_] = sess.run([actual, grad_actual],
feed_dict={x_pl: x_})
expected = self._fill_triangular(x_, **kwargs)
if use_deferred_shape:
self.assertEqual(None, actual.shape)
else:
self.assertAllEqual(expected.shape, actual.shape)
self.assertAllClose(expected, actual_, rtol=1e-8, atol=1e-9)
self.assertAllClose(x_, grad_actual_, rtol=1e-8, atol=1e-9)
@test_util.run_deprecated_v1
def testCorrectlyMakes1x1TriLower(self):
self._run_test(self._rng.randn(3, int(1*2/2)))
@test_util.run_deprecated_v1
def testCorrectlyMakesNoBatchTriLower(self):
self._run_test(self._rng.randn(int(4*5/2)))
@test_util.run_deprecated_v1
def testCorrectlyMakesBatchTriLower(self):
self._run_test(self._rng.randn(2, 3, int(3*4/2)))
@test_util.run_deprecated_v1
def testCorrectlyMakesBatchTriLowerUnknownShape(self):
self._run_test(self._rng.randn(2, 3, int(3*4/2)), use_deferred_shape=True)
@test_util.run_deprecated_v1
def testCorrectlyMakesBatch7x7TriLowerUnknownShape(self):
self._run_test(self._rng.randn(2, 3, int(7*8/2)), use_deferred_shape=True)
@test_util.run_deprecated_v1
def testCorrectlyMakesBatch7x7TriLower(self):
self._run_test(self._rng.randn(2, 3, int(7*8/2)))
@test_util.run_deprecated_v1
def testCorrectlyMakes1x1TriUpper(self):
self._run_test(self._rng.randn(3, int(1*2/2)), upper=True)
@test_util.run_deprecated_v1
def testCorrectlyMakesNoBatchTriUpper(self):
self._run_test(self._rng.randn(int(4*5/2)), upper=True)
@test_util.run_deprecated_v1
def testCorrectlyMakesBatchTriUpper(self):
self._run_test(self._rng.randn(2, 2, int(3*4/2)), upper=True)
@test_util.run_deprecated_v1
def testCorrectlyMakesBatchTriUpperUnknownShape(self):
self._run_test(self._rng.randn(2, 2, int(3*4/2)),
use_deferred_shape=True,
upper=True)
@test_util.run_deprecated_v1
def testCorrectlyMakesBatch7x7TriUpperUnknownShape(self):
self._run_test(self._rng.randn(2, 3, int(7*8/2)),
use_deferred_shape=True,
upper=True)
@test_util.run_deprecated_v1
def testCorrectlyMakesBatch7x7TriUpper(self):
self._run_test(self._rng.randn(2, 3, int(7*8/2)), upper=True)
|
FillTriangularTest
|
python
|
celery__celery
|
t/unit/backends/test_redis.py
|
{
"start": 4470,
"end": 5013
}
|
class ____(conftest.MockCallbacks):
def __init__(self, sentinels, min_other_sentinels=0, sentinel_kwargs=None,
**connection_kwargs):
self.sentinel_kwargs = sentinel_kwargs
self.sentinels = [Redis(hostname, port, **self.sentinel_kwargs)
for hostname, port in sentinels]
self.min_other_sentinels = min_other_sentinels
self.connection_kwargs = connection_kwargs
def master_for(self, service_name, redis_class):
return random.choice(self.sentinels)
|
Sentinel
|
python
|
fastapi__sqlmodel
|
tests/test_deprecations.py
|
{
"start": 46,
"end": 84
}
|
class ____(SQLModel):
name: str
|
Item
|
python
|
bokeh__bokeh
|
tests/unit/bokeh/util/test_callback_manager.py
|
{
"start": 1918,
"end": 2238
}
|
class ____:
def __init__(self) -> None:
self.last_name = None
self.last_old = None
self.last_new = None
def __call__(self, event):
self.method(event)
def method(self, event):
self.event = event
def partially_good(self, arg, event):
pass
|
_GoodEventCallback
|
python
|
spack__spack
|
lib/spack/spack/solver/runtimes.py
|
{
"start": 439,
"end": 13471
}
|
class ____:
"""An object of this class is injected in callbacks to compilers, to let them declare
properties of the runtimes they support and of the runtimes they provide, and to add
runtime dependencies to the nodes using said compiler.
The usage of the object is the following. First, a runtime package name or the wildcard
"*" are passed as an argument to __call__, to set which kind of package we are referring to.
Then we can call one method with a directive-like API.
Examples:
>>> pkg = RuntimePropertyRecorder(setup)
>>> # Every package compiled with %gcc has a link dependency on 'gcc-runtime'
>>> pkg("*").depends_on(
... "gcc-runtime",
... when="%gcc",
... type="link",
... description="If any package uses %gcc, it depends on gcc-runtime"
... )
>>> # The version of gcc-runtime is the same as the %gcc used to "compile" it
>>> pkg("gcc-runtime").requires("@=9.4.0", when="%gcc@=9.4.0")
"""
def __init__(self, setup):
self._setup = setup
self.rules = []
self.runtime_conditions = set()
self.injected_dependencies = set()
# State of this object set in the __call__ method, and reset after
# each directive-like method
self.current_package = None
def __call__(self, package_name: str) -> "RuntimePropertyRecorder":
"""Sets a package name for the next directive-like method call"""
assert self.current_package is None, f"state was already set to '{self.current_package}'"
self.current_package = package_name
return self
def reset(self):
"""Resets the current state."""
self.current_package = None
def depends_on(self, dependency_str: str, *, when: str, type: str, description: str) -> None:
"""Injects conditional dependencies on packages.
Conditional dependencies can be either "real" packages or virtual dependencies.
Args:
dependency_str: the dependency spec to inject
when: anonymous condition to be met on a package to have the dependency
type: dependency type
description: human-readable description of the rule for adding the dependency
"""
# TODO: The API for this function is not final, and is still subject to change. At
# TODO: the moment, we implemented only the features strictly needed for the
# TODO: functionality currently provided by Spack, and we assert nothing else is required.
msg = "the 'depends_on' method can be called only with pkg('*')"
assert self.current_package == "*", msg
when_spec = spack.spec.Spec(when)
assert not when_spec.name, "only anonymous when specs are accepted"
dependency_spec = spack.spec.Spec(dependency_str)
if dependency_spec.versions != spack.version.any_version:
self._setup.version_constraints.add((dependency_spec.name, dependency_spec.versions))
self.injected_dependencies.add(dependency_spec)
body_str, node_variable = self.rule_body_from(when_spec)
head_clauses = self._setup.spec_clauses(dependency_spec, body=False)
runtime_pkg = dependency_spec.name
is_virtual = head_clauses[0].args[0] == "virtual_node"
main_rule = (
f"% {description}\n"
f'1 {{ attr("depends_on", {node_variable}, node(0..X-1, "{runtime_pkg}"), "{type}") :'
f' max_dupes("{runtime_pkg}", X)}} 1:-\n'
f"{body_str}."
)
if is_virtual:
main_rule = (
f"% {description}\n"
f'attr("dependency_holds", {node_variable}, "{runtime_pkg}", "{type}") :-\n'
f"{body_str}."
)
self.rules.append(main_rule)
for clause in head_clauses:
if clause.args[0] == "node":
continue
runtime_node = f'node(RuntimeID, "{runtime_pkg}")'
head_str = str(clause).replace(f'"{runtime_pkg}"', runtime_node)
depends_on_constraint = (
f' attr("depends_on", {node_variable}, {runtime_node}, "{type}"),\n'
)
if is_virtual:
depends_on_constraint = (
f' attr("depends_on", {node_variable}, ProviderNode, "{type}"),\n'
f" provider(ProviderNode, {runtime_node}),\n"
)
rule = f"{head_str} :-\n" f"{depends_on_constraint}" f"{body_str}."
self.rules.append(rule)
self.reset()
@staticmethod
def node_for(name: str) -> str:
return f'node(ID{name.replace("-", "_")}, "{name}")'
def rule_body_from(self, when_spec: "spack.spec.Spec") -> Tuple[str, str]:
"""Computes the rule body from a "when" spec, and returns it, along with the
node variable.
"""
node_placeholder = "XXX"
node_variable = "node(ID, Package)"
when_substitutions = {}
for s in when_spec.traverse(root=False):
when_substitutions[f'"{s.name}"'] = self.node_for(s.name)
when_spec.name = node_placeholder
body_clauses = self._setup.spec_clauses(when_spec, body=True)
for clause in body_clauses:
if clause.args[0] == "virtual_on_incoming_edges":
# Substitute: attr("virtual_on_incoming_edges", ProviderNode, Virtual)
# with: attr("virtual_on_edge", ParentNode, ProviderNode, Virtual)
# (avoid adding virtuals everywhere, if a single edge needs it)
_, provider, virtual = clause.args
clause.args = "virtual_on_edge", node_placeholder, provider, virtual
# Check for abstract hashes in the body
for s in when_spec.traverse(root=False):
if s.abstract_hash:
body_clauses.append(fn.attr("hash", s.name, s.abstract_hash))
body_str = ",\n".join(f" {x}" for x in body_clauses)
body_str = body_str.replace(f'"{node_placeholder}"', f"{node_variable}")
for old, replacement in when_substitutions.items():
body_str = body_str.replace(old, replacement)
return body_str, node_variable
def requires(self, impose: str, *, when: str):
"""Injects conditional requirements on a given package.
Args:
impose: constraint to be imposed
when: condition triggering the constraint
"""
msg = "the 'requires' method cannot be called with pkg('*') or without setting the package"
assert self.current_package is not None and self.current_package != "*", msg
imposed_spec = spack.spec.Spec(f"{self.current_package}{impose}")
when_spec = spack.spec.Spec(f"{self.current_package}{when}")
assert imposed_spec.versions.concrete, f"{impose} must have a concrete version"
# Add versions to possible versions
for s in (imposed_spec, when_spec):
if not s.versions.concrete:
continue
self._setup.possible_versions[s.name][s.version].append(Provenance.RUNTIME)
self.runtime_conditions.add((imposed_spec, when_spec))
self.reset()
def propagate(self, constraint_str: str, *, when: str):
msg = "the 'propagate' method can be called only with pkg('*')"
assert self.current_package == "*", msg
when_spec = spack.spec.Spec(when)
assert not when_spec.name, "only anonymous when specs are accepted"
when_substitutions = {}
for s in when_spec.traverse(root=False):
when_substitutions[f'"{s.name}"'] = self.node_for(s.name)
body_str, node_variable = self.rule_body_from(when_spec)
constraint_spec = spack.spec.Spec(constraint_str)
constraint_clauses = self._setup.spec_clauses(constraint_spec, body=False)
for clause in constraint_clauses:
if clause.args[0] == "node_version_satisfies":
self._setup.version_constraints.add(
(constraint_spec.name, constraint_spec.versions)
)
args = f'"{constraint_spec.name}", "{constraint_spec.versions}"'
head_str = f"propagate({node_variable}, node_version_satisfies({args}))"
rule = f"{head_str} :-\n{body_str}."
self.rules.append(rule)
self.reset()
def default_flags(self, spec: "spack.spec.Spec"):
if not spec.external or "flags" not in spec.extra_attributes:
self.reset()
return
when_spec = spack.spec.Spec(f"%[deptypes=build] {spec}")
body_str, node_variable = self.rule_body_from(when_spec)
node_placeholder = "XXX"
flags = spec.extra_attributes["flags"]
root_spec_str = f"{node_placeholder}"
for flag_type, default_values in flags.items():
root_spec_str = f"{root_spec_str} {flag_type}='{default_values}'"
root_spec = spack.spec.Spec(root_spec_str)
head_clauses = self._setup.spec_clauses(
root_spec, body=False, context=SourceContext(source="compiler")
)
self.rules.append(f"% Default compiler flags for {spec}\n")
for clause in head_clauses:
if clause.args[0] == "node":
continue
head_str = str(clause).replace(f'"{node_placeholder}"', f"{node_variable}")
rule = f"{head_str} :-\n{body_str}."
self.rules.append(rule)
self.reset()
def consume_facts(self):
"""Consume the facts collected by this object, and emits rules and
facts for the runtimes.
"""
self._setup.gen.h2("Runtimes: declarations")
runtime_pkgs = sorted(
{x.name for x in self.injected_dependencies if not spack.repo.PATH.is_virtual(x.name)}
)
for runtime_pkg in runtime_pkgs:
self._setup.gen.fact(fn.runtime(runtime_pkg))
self._setup.gen.newline()
self._setup.gen.h2("Runtimes: rules")
self._setup.gen.newline()
for rule in self.rules:
self._setup.gen.append(rule)
self._setup.gen.newline()
self._setup.gen.h2("Runtimes: requirements")
for imposed_spec, when_spec in sorted(self.runtime_conditions):
msg = f"{when_spec} requires {imposed_spec} at runtime"
_ = self._setup.condition(when_spec, imposed_spec=imposed_spec, msg=msg)
self._setup.trigger_rules()
self._setup.effect_rules()
def _normalize_packages_yaml(packages_yaml: Dict[str, Any]) -> None:
for pkg_name in list(packages_yaml.keys()):
is_virtual = spack.repo.PATH.is_virtual(pkg_name)
if pkg_name == "all" or not is_virtual:
continue
# Remove the virtual entry from the normalized configuration
data = packages_yaml.pop(pkg_name)
is_buildable = data.get("buildable", True)
if not is_buildable:
for provider in spack.repo.PATH.providers_for(pkg_name):
entry = packages_yaml.setdefault(provider.name, {})
entry["buildable"] = False
externals = data.get("externals", [])
def keyfn(x):
return spack.spec.Spec(x["spec"]).name
for provider, specs in itertools.groupby(externals, key=keyfn):
entry = packages_yaml.setdefault(provider, {})
entry.setdefault("externals", []).extend(specs)
def external_config_with_implicit_externals(
configuration: spack.config.Configuration,
) -> Dict[str, Any]:
# Read packages.yaml and normalize it so that it will not contain entries referring to
# virtual packages.
packages_yaml = configuration.deepcopy_as_builtin("packages", line_info=True)
_normalize_packages_yaml(packages_yaml)
# Add externals for libc from compilers on Linux
if not using_libc_compatibility():
return packages_yaml
seen = set()
for compiler in spack.compilers.config.all_compilers_from(configuration):
libc = spack.compilers.libraries.CompilerPropertyDetector(compiler).default_libc()
if libc and libc not in seen:
seen.add(libc)
entry = {"spec": f"{libc}", "prefix": libc.external_path}
packages_yaml.setdefault(libc.name, {}).setdefault("externals", []).append(entry)
return packages_yaml
def all_libcs() -> Set[spack.spec.Spec]:
"""Return a set of all libc specs targeted by any configured compiler. If none, fall back to
libc determined from the current Python process if dynamically linked."""
libcs = set()
for c in spack.compilers.config.all_compilers_from(spack.config.CONFIG):
candidate = spack.compilers.libraries.CompilerPropertyDetector(c).default_libc()
if candidate is not None:
libcs.add(candidate)
if libcs:
return libcs
libc = spack.util.libc.libc_from_current_python_process()
return {libc} if libc else set()
|
RuntimePropertyRecorder
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_23/events.py
|
{
"start": 335,
"end": 1987
}
|
class ____(NonStrictDataModel):
"""
:param metric: The metric name
:type metric: str
:param variants: The names of the metric variants
:type variants: Sequence[str]
"""
_schema = {
"properties": {
"metric": {"description": "The metric name", "type": ["string", "null"]},
"variants": {
"description": "The names of the metric variants",
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
}
def __init__(self, metric: Optional[str] = None, variants: Optional[List[str]] = None, **kwargs: Any) -> None:
super(MetricVariants, self).__init__(**kwargs)
self.metric = metric
self.variants = variants
@schema_property("metric")
def metric(self) -> Optional[str]:
return self._property_metric
@metric.setter
def metric(self, value: Optional[str]) -> None:
if value is None:
self._property_metric = None
return
self.assert_isinstance(value, "metric", six.string_types)
self._property_metric = value
@schema_property("variants")
def variants(self) -> Optional[List[str]]:
return self._property_variants
@variants.setter
def variants(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_variants = None
return
self.assert_isinstance(value, "variants", (list, tuple))
self.assert_isinstance(value, "variants", six.string_types, is_array=True)
self._property_variants = value
|
MetricVariants
|
python
|
aio-libs__aiohttp
|
aiohttp/streams.py
|
{
"start": 994,
"end": 1415
}
|
class ____:
__slots__ = ("_stream",)
def __init__(self, stream: "StreamReader") -> None:
self._stream = stream
def __aiter__(self) -> "ChunkTupleAsyncStreamIterator":
return self
async def __anext__(self) -> tuple[bytes, bool]:
rv = await self._stream.readchunk()
if rv == (b"", False):
raise StopAsyncIteration
return rv
|
ChunkTupleAsyncStreamIterator
|
python
|
rapidsai__cudf
|
python/cudf/cudf/tests/input_output/test_json.py
|
{
"start": 36540,
"end": 48582
}
|
class ____:
@pytest.mark.parametrize("chunk_size", [10, 100, 1024, 1024 * 1024])
def test_chunked_nested_json_reader(self, tag, data, chunk_size):
expected = cudf.read_json(StringIO(data), lines=True)
source_size = len(data)
chunks = []
for chunk_start in range(0, source_size, chunk_size):
chunks.append(
cudf.read_json(
StringIO(data),
byte_range=[chunk_start, chunk_size],
lines=True,
)
)
df = cudf.concat(chunks, ignore_index=True)
assert expected.to_arrow().equals(df.to_arrow())
@pytest.mark.skipif(
PANDAS_VERSION < PANDAS_CURRENT_SUPPORTED_VERSION,
reason="https://github.com/pandas-dev/pandas/pull/57439",
)
def test_order_nested_json_reader(self, tag, data):
expected = pd.read_json(StringIO(data), lines=True)
target = cudf.read_json(StringIO(data), lines=True)
# Using pyarrow instead of assert_eq because pandas
# doesn't handle nested values comparisons correctly
if tag == "dtype_mismatch":
with pytest.raises(AssertionError):
# pandas parses integer values in float representation
# as integer
assert pa.Table.from_pandas(expected).equals(target.to_arrow())
elif tag == "missing":
with pytest.raises(AssertionError):
# pandas inferences integer with nulls as float64
assert pa.Table.from_pandas(expected).equals(target.to_arrow())
else:
assert pa.Table.from_pandas(expected).equals(target.to_arrow())
def test_json_round_trip_gzip():
df = cudf.DataFrame({"a": [1, 2, 3], "b": ["abc", "def", "ghi"]})
bio = BytesIO()
with gzip.open(bio, mode="wb") as fo:
with pytest.warns(UserWarning):
df.to_json(fo, orient="records", lines=True)
bio.seek(0)
with gzip.open(bio, mode="rb") as fo:
written_df = cudf.read_json(fo, orient="records", lines=True)
assert_eq(written_df, df)
# Testing writing from middle of the file.
loc = bio.tell()
with gzip.open(bio, mode="wb") as fo:
fo.seek(loc)
with pytest.warns(UserWarning):
df.to_json(fo, orient="records", lines=True)
bio.seek(loc)
with gzip.open(bio, mode="rb") as fo:
fo.seek(loc)
written_df = cudf.read_json(fo, orient="records", lines=True)
assert_eq(written_df, df)
@pytest.mark.parametrize(
"data",
[
# # empty input
# assert failing due to missing index size information
"",
"[]",
"[]\n[]\n[]",
# simple values
"""[1]\n[2]\n[3]""",
"""[1, 2, 3]\n[4, 5, 6]\n[7, 8, 9]""",
# nulls
"""[1, 2, 3]\n[4, 5, null]\n[7, 8, 9]""",
"""[1, 2, 3]\n[4, 5, null]\n[7, 8, 9]\n[null, null, null]""",
"""[1, 2, 3]\n[4, 5, null]\n[]""",
# missing
"""[1, 2, 3]\n[4, 5 ]\n[7, 8, 9]""",
"""[1, 2, 3]\n[4, 5, 6]\n[7, 8, 9, 10]""",
"""[1, 2, 3]\n[4, 5, 6, {}]\n[7, 8, 9]""",
"""[1, 2, 3]\n[4, 5, 6, []]\n[7, 8, 9]""",
"""[1, 2, 3]\n[4, 5, 6, {"a": 10}]\n[7, 8, 9]""",
"""[1, 2, 3]\n[4, 5, 6, [10]]\n[7, 8, 9]""",
# mixed
"""[1, 2, 3]\n[4, 5, {}]\n[7, 8, 9]""",
"""[1, 2, {}]\n[4, 5, 6]\n[7, 8, 9]""",
"""[1, 2, 3]\n[4, 5, [6]]\n[7, 8, 9]""",
"""[1, 2, [3]]\n[4, 5, 6]\n[7, 8, 9]""",
# nested
"""[1, 2, [3]]\n[4, 5, [6]]\n[7, 8, [9]]""",
"""[1, 2, {"a": 3}]\n[4, 5, {"b": 6}]\n[7, 8, {"c": 9}]""",
"""[1, 2, [{"a": 3}, {"a": 3}]]
[4, 5, [{"b": 6}, {"b": 6}, {}, {"b": 6}]]
[7, 8, [{}]]""",
"""[1, 2, {"a": [3, 3, 3]}]
[4, 5, {"b": [6, 6]}]
[7, 8, {"c": 9}]""",
"""[1, 2, [{"a": 3}, {"a": null}]]
[4, 5, [{"b": [6.0, 6, 06]}, {"b": [6]}, {}, {"b": null}]]
[7, 8, [{}]]""",
],
)
def test_json_array_of_arrays(data, lines):
data = data if lines else "[" + data.replace("\n", ",") + "]"
pdf = pd.read_json(StringIO(data), orient="values", lines=lines)
df = cudf.read_json(
StringIO(data),
engine="cudf",
orient="values",
lines=lines,
)
# if mixed with dict/list type, replace other types with None.
if 2 in pdf.columns and any(
pdf[2].apply(lambda x: isinstance(x, dict) or isinstance(x, list))
):
pdf[2] = pdf[2].apply(
lambda x: x if isinstance(x, dict) or isinstance(x, list) else None
)
# TODO: Replace string column names with integer column names
# for values orient in cudf json reader
pdf.rename(columns={name: str(name) for name in pdf.columns}, inplace=True)
# assert_eq(pdf, df)
pa_table_pdf = pa.Table.from_pandas(
pdf, schema=df.to_arrow().schema, safe=False
)
assert df.to_arrow().equals(pa_table_pdf)
@pytest.mark.parametrize(
"jsonl_string",
[
# simple list with mixed types
"""{"a":[123, {}], "b":1.1}""",
"""{"a":[123, {"0": 123}], "b":1.0}\n {"b":1.1}\n {"b":2.1}""",
"""{"a":[{"L": 123}, 123], "b":1.0}\n {"b":1.1}\n {"b":2.1}""",
"""{"a":[123, {"0": 123}, 12.3], "b":1.0}\n {"b":1.1}\n {"b":2.1}""",
"""{"a":[123, {"0": 123}, null], "b":1.0}\n {"b":1.1}\n {"b":2.1}""",
"""{"a":["123", {"0": 123}], "b":1.0}\n {"b":1.1}\n {"b":2.1}""",
"""{"a":[{"0": 123}, "123"], "b":1.0}\n {"b":1.1}\n {"b":2.1}""",
"""{"a":["123", {"0": 123}, "123"], "b":1.0}\n {"b":1.1}""",
"""{"a":[123]}\n {"a":[{"0": 123}], "b":1.0}\n {"b":1.1}""",
"""{"a":[{"0": 123}]}\n {"a":[123], "b":1.0}\n {"b":1.1}""",
"""{"a":[{"0": 123}]}\n {"a": []}\n {"a":[123], "b":1.0}\n{"b":1.1}""",
"""{"b":1.0, "a":[{"0": 123}]}\n {"a":[123]}\n {"b":1.1}\n{"a": []}""",
"""{"a": []}\n {"a":[{"0": 123}]}\n {"a":[123], "b":1.0}\n{"b":1.1}""",
"""{"a": []}\n {"a":[123], "b":1.0}\n {"a":[{"0": 123}]}\n{"b":1.1}""",
# nested list with mixed types
"""{"a":[123, [{"0": 123}, {}]], "b":1.0}
{"b":1.1}
{"a":[]}
{"a":[123]}
{"a":[[123], []]}""",
"""{"a":[], "b":1.0}
{"a":[[[456]]]}
{"a":[[123]]}
{"a":[123]}""",
"""{"a":[123], "b":1.0}
{"b":1.1}
{"b":2.1}
{"a":[[[[[[]]]]]]}""",
"""{"a":[123], "b":1.0}
{"a":[[[[[[]]]]]]}
{"a":[[[[[[]]]]], [[[[[]]]]]]}
{"a":[[[[[[]]]], [[[[]]]]]]}
{"a":[[[[[[]]], [[[]]]]]]}
{"a":[[[[[[]], [[]]]]]]}
{"a":[[[[[[], 123, []]]]]]}""",
# mixed elements in multiple columns
"""{"a":[123, {"0": 123}], "b":1.0}
{"c": ["abc"], "b":1.1}
{"c": ["abc", []] }""",
],
)
def test_json_nested_mixed_types_in_list(jsonl_string):
# utility function for this test:
# replace list elements with None if it has dict and non-dict (ignore None)
def _replace_in_list(list_to_replace, replace_items):
return [
_replace_in_list(x, replace_items)
if isinstance(x, list)
else None
if x in replace_items
else x
for x in list_to_replace
]
def _replace_with_nulls(df, replace_items):
for col in df.columns:
if df[col].dtype == "object":
df[col] = df[col].apply(
lambda x: _replace_in_list(x, replace_items)
if isinstance(x, list)
else x
)
return df
# both json lines and json string tested.
json_string = "[" + jsonl_string.replace("\n", ",") + "]"
pdf = pd.read_json(StringIO(jsonl_string), orient="records", lines=True)
pdf2 = pd.read_json(StringIO(json_string), orient="records", lines=False)
assert_eq(pdf, pdf2)
# replace list elements with None if it has dict and non-dict
# in above test cases, these items are mixed with dict/list items
# so, replace them with None.
pdf = _replace_with_nulls(pdf, [123, "123", 12.3, "abc"])
gdf = cudf.read_json(
StringIO(jsonl_string),
orient="records",
lines=True,
)
gdf2 = cudf.read_json(
StringIO(json_string),
engine="cudf",
orient="records",
lines=False,
)
if """[{"0": 123}, {}]""" not in jsonl_string:
# {} in pandas is represented as {"0": None} in cudf
assert_eq(gdf, pdf)
assert_eq(gdf2, pdf)
pa_table_pdf = pa.Table.from_pandas(
pdf, schema=gdf.to_arrow().schema, safe=False
)
assert gdf.to_arrow().equals(pa_table_pdf)
assert gdf2.to_arrow().equals(pa_table_pdf)
@pytest.mark.parametrize(
"jsonl_string",
[
# mixed type in list (in different order)
"""{"a":[[{"0": 123}, {}], {"1": 321}], "b":1.0}""",
"""{"a":[{"1": 321}, [{"0": 123}, {}], ], "b":1.0}""",
"""{"a":[123, [{"0": 123}, {}], {"1": 321}], "b":1.0}""",
"""{"a":[null, [{"0": 123}, {}], {"1": 321}], "b":1.0}""",
# mixed type in struct (in different order)
"""{"a": {"b": {"0": 123}, "c": {"1": 321}}, "d":1.0}
{"a": {"b": {"0": 123}, "c": [123, 123]}, "d":1.0}""",
"""{"a": {"b": {"0": 123}, "c": [123, 123]}, "d":1.0}
{"a": {"b": {"0": 123}, "c": {"1": 321}}, "d":1.0}""",
"""{"a": {"b": {"0": 123}, "c": null}, "d":1.0}
{"a": {"b": {"0": 123}, "c": {"1": 321}}, "d":1.0}
{"a": {"b": {"0": 123}, "c": [123, 123]}, "d":1.0}""",
"""{"a": {"b": {"0": 123}, "c": 123}, "d":1.0}
{"a": {"b": {"0": 123}, "c": {"1": 321}}, "d":1.0}
{"a": {"b": {"0": 123}, "c": [123, 123]}, "d":1.0}""",
],
)
def test_json_nested_mixed_types_error(jsonl_string):
# mixing list and struct should raise an exception
with pytest.raises(RuntimeError):
cudf.read_json(
StringIO(jsonl_string),
orient="records",
lines=True,
)
@pytest.mark.parametrize("on_bad_lines", ["error", "recover", "abc"])
def test_json_reader_on_bad_lines(on_bad_lines):
json_input = StringIO(
'{"a":1,"b":10}\n{"a":2,"b":11}\nabc\n{"a":3,"b":12}\n'
)
if on_bad_lines == "error":
with pytest.raises(RuntimeError):
cudf.read_json(
json_input,
lines=True,
orient="records",
on_bad_lines=on_bad_lines,
)
elif on_bad_lines == "recover":
actual = cudf.read_json(
json_input, lines=True, orient="records", on_bad_lines=on_bad_lines
)
expected = cudf.DataFrame(
{"a": [1, 2, None, 3], "b": [10, 11, None, 12]}
)
assert_eq(actual, expected)
else:
with pytest.raises(TypeError):
cudf.read_json(
json_input,
lines=True,
orient="records",
on_bad_lines=on_bad_lines,
)
def test_chunked_json_reader():
df = cudf.DataFrame(
{
"a": ["aaaa"] * 1_000_000,
"b": range(1_000_000),
}
)
buf = BytesIO()
df.to_json(buf, lines=True, orient="records", engine="cudf")
buf.seek(0)
df = df.to_pandas()
with cudf.option_context("io.json.low_memory", True):
gdf = cudf.read_json(buf, lines=True)
assert_eq(df, gdf)
# compression formats limited to those supported by both reader and writer
@pytest.mark.parametrize("compression", ["gzip", "snappy", "zstd"])
def test_roundtrip_compression(compression, tmp_path):
expected = cudf.DataFrame({"a": [1], "b": ["2"]})
fle = BytesIO()
expected.to_json(fle, engine="cudf", compression=compression)
result = cudf.read_json(fle, engine="cudf", compression=compression)
assert_eq(result, expected)
|
TestNestedJsonReaderCommon
|
python
|
python-pillow__Pillow
|
Tests/test_color_lut.py
|
{
"start": 267,
"end": 10622
}
|
class ____:
def generate_identity_table(
self, channels: int, size: int | tuple[int, int, int]
) -> tuple[int, tuple[int, int, int], list[float]]:
if isinstance(size, tuple):
size_1d, size_2d, size_3d = size
else:
size_1d, size_2d, size_3d = (size, size, size)
table = [
[
r / (size_1d - 1) if size_1d != 1 else 0,
g / (size_2d - 1) if size_2d != 1 else 0,
b / (size_3d - 1) if size_3d != 1 else 0,
r / (size_1d - 1) if size_1d != 1 else 0,
g / (size_2d - 1) if size_2d != 1 else 0,
][:channels]
for b in range(size_3d)
for g in range(size_2d)
for r in range(size_1d)
]
return (
channels,
(size_1d, size_2d, size_3d),
[item for sublist in table for item in sublist],
)
def test_wrong_args(self) -> None:
im = Image.new("RGB", (10, 10), 0)
with pytest.raises(ValueError, match="filter"):
im.im.color_lut_3d(
"RGB", Image.Resampling.BICUBIC, *self.generate_identity_table(3, 3)
)
with pytest.raises(ValueError, match="image mode"):
im.im.color_lut_3d(
"wrong", Image.Resampling.BILINEAR, *self.generate_identity_table(3, 3)
)
with pytest.raises(ValueError, match="table_channels"):
im.im.color_lut_3d(
"RGB", Image.Resampling.BILINEAR, *self.generate_identity_table(5, 3)
)
with pytest.raises(ValueError, match="table_channels"):
im.im.color_lut_3d(
"RGB", Image.Resampling.BILINEAR, *self.generate_identity_table(1, 3)
)
with pytest.raises(ValueError, match="table_channels"):
im.im.color_lut_3d(
"RGB", Image.Resampling.BILINEAR, *self.generate_identity_table(2, 3)
)
with pytest.raises(ValueError, match="Table size"):
im.im.color_lut_3d(
"RGB",
Image.Resampling.BILINEAR,
*self.generate_identity_table(3, (1, 3, 3)),
)
with pytest.raises(ValueError, match="Table size"):
im.im.color_lut_3d(
"RGB",
Image.Resampling.BILINEAR,
*self.generate_identity_table(3, (66, 3, 3)),
)
with pytest.raises(ValueError, match=r"size1D \* size2D \* size3D"):
im.im.color_lut_3d(
"RGB", Image.Resampling.BILINEAR, 3, (2, 2, 2), [0, 0, 0] * 7
)
with pytest.raises(ValueError, match=r"size1D \* size2D \* size3D"):
im.im.color_lut_3d(
"RGB", Image.Resampling.BILINEAR, 3, (2, 2, 2), [0, 0, 0] * 9
)
with pytest.raises(TypeError):
im.im.color_lut_3d(
"RGB", Image.Resampling.BILINEAR, 3, (2, 2, 2), [0, 0, "0"] * 8
)
with pytest.raises(TypeError):
im.im.color_lut_3d("RGB", Image.Resampling.BILINEAR, 3, (2, 2, 2), 16)
@pytest.mark.parametrize(
"lut_mode, table_channels, table_size",
[
("RGB", 3, 3),
("CMYK", 4, 3),
("RGB", 3, (2, 3, 3)),
("RGB", 3, (65, 3, 3)),
("RGB", 3, (3, 65, 3)),
("RGB", 3, (2, 3, 65)),
],
)
def test_correct_args(
self, lut_mode: str, table_channels: int, table_size: int | tuple[int, int, int]
) -> None:
im = Image.new("RGB", (10, 10), 0)
im.im.color_lut_3d(
lut_mode,
Image.Resampling.BILINEAR,
*self.generate_identity_table(table_channels, table_size),
)
@pytest.mark.parametrize(
"image_mode, lut_mode, table_channels, table_size",
[
("L", "RGB", 3, 3),
("RGB", "L", 3, 3),
("L", "L", 3, 3),
("RGB", "RGBA", 3, 3),
("RGB", "RGB", 4, 3),
],
)
def test_wrong_mode(
self, image_mode: str, lut_mode: str, table_channels: int, table_size: int
) -> None:
with pytest.raises(ValueError, match="wrong mode"):
im = Image.new(image_mode, (10, 10), 0)
im.im.color_lut_3d(
lut_mode,
Image.Resampling.BILINEAR,
*self.generate_identity_table(table_channels, table_size),
)
@pytest.mark.parametrize(
"image_mode, lut_mode, table_channels, table_size",
[
("RGBA", "RGBA", 3, 3),
("RGBA", "RGBA", 4, 3),
("RGB", "HSV", 3, 3),
("RGB", "RGBA", 4, 3),
],
)
def test_correct_mode(
self, image_mode: str, lut_mode: str, table_channels: int, table_size: int
) -> None:
im = Image.new(image_mode, (10, 10), 0)
im.im.color_lut_3d(
lut_mode,
Image.Resampling.BILINEAR,
*self.generate_identity_table(table_channels, table_size),
)
def test_identities(self) -> None:
g = Image.linear_gradient("L")
im = Image.merge(
"RGB",
[
g,
g.transpose(Image.Transpose.ROTATE_90),
g.transpose(Image.Transpose.ROTATE_180),
],
)
# Fast test with small cubes
for size in [2, 3, 5, 7, 11, 16, 17]:
assert_image_equal(
im,
im._new(
im.im.color_lut_3d(
"RGB",
Image.Resampling.BILINEAR,
*self.generate_identity_table(3, size),
)
),
)
# Not so fast
assert_image_equal(
im,
im._new(
im.im.color_lut_3d(
"RGB",
Image.Resampling.BILINEAR,
*self.generate_identity_table(3, (2, 2, 65)),
)
),
)
def test_identities_4_channels(self) -> None:
g = Image.linear_gradient("L")
im = Image.merge(
"RGB",
[
g,
g.transpose(Image.Transpose.ROTATE_90),
g.transpose(Image.Transpose.ROTATE_180),
],
)
# Red channel copied to alpha
assert_image_equal(
Image.merge("RGBA", (im.split() * 2)[:4]),
im._new(
im.im.color_lut_3d(
"RGBA",
Image.Resampling.BILINEAR,
*self.generate_identity_table(4, 17),
)
),
)
def test_copy_alpha_channel(self) -> None:
g = Image.linear_gradient("L")
im = Image.merge(
"RGBA",
[
g,
g.transpose(Image.Transpose.ROTATE_90),
g.transpose(Image.Transpose.ROTATE_180),
g.transpose(Image.Transpose.ROTATE_270),
],
)
assert_image_equal(
im,
im._new(
im.im.color_lut_3d(
"RGBA",
Image.Resampling.BILINEAR,
*self.generate_identity_table(3, 17),
)
),
)
def test_channels_order(self) -> None:
g = Image.linear_gradient("L")
im = Image.merge(
"RGB",
[
g,
g.transpose(Image.Transpose.ROTATE_90),
g.transpose(Image.Transpose.ROTATE_180),
],
)
# Reverse channels by splitting and using table
# fmt: off
assert_image_equal(
Image.merge('RGB', im.split()[::-1]),
im._new(im.im.color_lut_3d('RGB', Image.Resampling.BILINEAR,
3, (2, 2, 2), [
0, 0, 0, 0, 0, 1,
0, 1, 0, 0, 1, 1,
1, 0, 0, 1, 0, 1,
1, 1, 0, 1, 1, 1,
])))
# fmt: on
def test_overflow(self) -> None:
g = Image.linear_gradient("L")
im = Image.merge(
"RGB",
[
g,
g.transpose(Image.Transpose.ROTATE_90),
g.transpose(Image.Transpose.ROTATE_180),
],
)
# fmt: off
transformed = im._new(im.im.color_lut_3d('RGB', Image.Resampling.BILINEAR,
3, (2, 2, 2),
[
-1, -1, -1, 2, -1, -1,
-1, 2, -1, 2, 2, -1,
-1, -1, 2, 2, -1, 2,
-1, 2, 2, 2, 2, 2,
])).load()
# fmt: on
assert transformed is not None
assert transformed[0, 0] == (0, 0, 255)
assert transformed[50, 50] == (0, 0, 255)
assert transformed[255, 0] == (0, 255, 255)
assert transformed[205, 50] == (0, 255, 255)
assert transformed[0, 255] == (255, 0, 0)
assert transformed[50, 205] == (255, 0, 0)
assert transformed[255, 255] == (255, 255, 0)
assert transformed[205, 205] == (255, 255, 0)
# fmt: off
transformed = im._new(im.im.color_lut_3d('RGB', Image.Resampling.BILINEAR,
3, (2, 2, 2),
[
-3, -3, -3, 5, -3, -3,
-3, 5, -3, 5, 5, -3,
-3, -3, 5, 5, -3, 5,
-3, 5, 5, 5, 5, 5,
])).load()
# fmt: on
assert transformed is not None
assert transformed[0, 0] == (0, 0, 255)
assert transformed[50, 50] == (0, 0, 255)
assert transformed[255, 0] == (0, 255, 255)
assert transformed[205, 50] == (0, 255, 255)
assert transformed[0, 255] == (255, 0, 0)
assert transformed[50, 205] == (255, 0, 0)
assert transformed[255, 255] == (255, 255, 0)
assert transformed[205, 205] == (255, 255, 0)
|
TestColorLut3DCoreAPI
|
python
|
sympy__sympy
|
sympy/physics/mechanics/wrapping_geometry.py
|
{
"start": 548,
"end": 1895
}
|
class ____(ABC):
"""Abstract base class for all geometry classes to inherit from.
Notes
=====
Instances of this class cannot be directly instantiated by users. However,
it can be used to created custom geometry types through subclassing.
"""
@property
@abstractmethod
def point(cls):
"""The point with which the geometry is associated."""
pass
@abstractmethod
def geodesic_length(self, point_1, point_2):
"""Returns the shortest distance between two points on a geometry's
surface.
Parameters
==========
point_1 : Point
The point from which the geodesic length should be calculated.
point_2 : Point
The point to which the geodesic length should be calculated.
"""
pass
@abstractmethod
def geodesic_end_vectors(self, point_1, point_2):
"""The vectors parallel to the geodesic at the two end points.
Parameters
==========
point_1 : Point
The point from which the geodesic originates.
point_2 : Point
The point at which the geodesic terminates.
"""
pass
def __repr__(self):
"""Default representation of a geometry object."""
return f'{self.__class__.__name__}()'
|
WrappingGeometryBase
|
python
|
scrapy__scrapy
|
tests/test_webclient.py
|
{
"start": 6276,
"end": 6519
}
|
class ____(resource.Resource):
def render(self, request):
# only sends 3 bytes even though it claims to send 5
request.setHeader(b"content-length", b"5")
request.write(b"abc")
return b""
|
BrokenDownloadResource
|
python
|
mwaskom__seaborn
|
tests/_core/test_plot.py
|
{
"start": 1250,
"end": 2065
}
|
class ____(Mark):
_grouping_props = ["color"]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.passed_keys = []
self.passed_data = []
self.passed_axes = []
self.passed_scales = None
self.passed_orient = None
self.n_splits = 0
def _plot(self, split_gen, scales, orient):
for keys, data, ax in split_gen():
self.n_splits += 1
self.passed_keys.append(keys)
self.passed_data.append(data)
self.passed_axes.append(ax)
self.passed_scales = scales
self.passed_orient = orient
def _legend_artist(self, variables, value, scales):
a = mpl.lines.Line2D([], [])
a.variables = variables
a.value = value
return a
|
MockMark
|
python
|
great-expectations__great_expectations
|
contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_montana_zip.py
|
{
"start": 742,
"end": 1743
}
|
class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_montana_zip"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_montana_zip(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
|
ColumnValuesToBeValidMontanaZip
|
python
|
has2k1__plotnine
|
tools/term.py
|
{
"start": 546,
"end": 794
}
|
class ____(Enum):
"""
Background color codes
"""
black = "\033[40m"
red = "\033[41m"
green = "\033[42m"
orange = "\033[43m"
blue = "\033[44m"
purple = "\033[45m"
cyan = "\033[46m"
lightgrey = "\033[47m"
|
Bg
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster_tests/execution_tests/misc_execution_tests/test_retries.py
|
{
"start": 23430,
"end": 25114
}
|
class ____(ConfigurableResource):
parent_dir: str
def create_resource(self, context: InitResourceContext) -> None:
filepath = os.path.join(self.parent_dir, f"{context.run_id}_resource.txt")
if not os.path.exists(filepath):
open(filepath, "a", encoding="utf8").close()
raise ValueError("Resource error")
@op(retry_policy=RetryPolicy(max_retries=3))
def do_something(my_resource: FailOnceResource):
pass
@job(resource_defs={"my_resource": FailOnceResource(parent_dir="")})
def resource_fail_once_job():
do_something()
def test_resource_retries_multiprocess():
with tempfile.TemporaryDirectory() as tempdir:
with instance_for_test() as instance:
with execute_job(
reconstructable(resource_fail_once_job),
instance=instance,
raise_on_error=False,
run_config={"resources": {"my_resource": {"config": {"parent_dir": tempdir}}}},
) as result:
assert result.success
assert len(_get_retry_events(result.events_for_node("do_something"))) == 1
def test_resource_retries_in_process():
# resource retry policy is not supported for the in-process executor, since the resource
# initialization is not scoped to a step
with tempfile.TemporaryDirectory() as tempdir:
result = resource_fail_once_job.execute_in_process(
run_config={"resources": {"my_resource": {"config": {"parent_dir": tempdir}}}},
raise_on_error=False,
)
assert not result.success
assert len(_get_retry_events(result.events_for_node("do_something"))) == 0
|
FailOnceResource
|
python
|
getsentry__sentry
|
src/sentry/migrations/0908_increase_email_field_length.py
|
{
"start": 155,
"end": 1446
}
|
class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = False
dependencies = [
("sentry", "0907_sentry_apps_array"),
]
operations = [
migrations.AlterField(
model_name="user",
name="email",
field=models.EmailField(blank=True, max_length=200),
),
]
|
Migration
|
python
|
realpython__materials
|
python-contact-book/source_code_step_2/rpcontacts/views.py
|
{
"start": 243,
"end": 1391
}
|
class ____(QMainWindow):
"""Main Window."""
def __init__(self, parent=None):
"""Initializer."""
super().__init__(parent)
self.setWindowTitle("RP Contacts")
self.resize(550, 250)
self.centralWidget = QWidget()
self.setCentralWidget(self.centralWidget)
self.layout = QHBoxLayout()
self.centralWidget.setLayout(self.layout)
self.setupUI()
def setupUI(self):
"""Setup the main window's GUI."""
# Create the table view widget
self.table = QTableView()
self.table.setSelectionBehavior(QAbstractItemView.SelectRows)
self.table.resizeColumnsToContents()
# Create buttons
self.addButton = QPushButton("Add...")
self.deleteButton = QPushButton("Delete")
self.clearAllButton = QPushButton("Clear All")
# Lay out the GUI
layout = QVBoxLayout()
layout.addWidget(self.addButton)
layout.addWidget(self.deleteButton)
layout.addStretch()
layout.addWidget(self.clearAllButton)
self.layout.addWidget(self.table)
self.layout.addLayout(layout)
|
Window
|
python
|
apache__thrift
|
lib/py/src/Thrift.py
|
{
"start": 787,
"end": 1357
}
|
class ____(object):
STOP = 0
VOID = 1
BOOL = 2
BYTE = 3
I08 = 3
DOUBLE = 4
I16 = 6
I32 = 8
I64 = 10
STRING = 11
UTF7 = 11
STRUCT = 12
MAP = 13
SET = 14
LIST = 15
UTF8 = 16
UTF16 = 17
_VALUES_TO_NAMES = (
'STOP',
'VOID',
'BOOL',
'BYTE',
'DOUBLE',
None,
'I16',
None,
'I32',
None,
'I64',
'STRING',
'STRUCT',
'MAP',
'SET',
'LIST',
'UTF8',
'UTF16',
)
|
TType
|
python
|
py-pdf__pypdf
|
pypdf/_page.py
|
{
"start": 82217,
"end": 89201
}
|
class ____(Sequence[PageObject]):
def __init__(
self,
length_function: Callable[[], int],
get_function: Callable[[int], PageObject],
) -> None:
self.length_function = length_function
self.get_function = get_function
self.current = -1
def __len__(self) -> int:
return self.length_function()
@overload
def __getitem__(self, index: int) -> PageObject:
...
@overload
def __getitem__(self, index: slice) -> Sequence[PageObject]:
...
def __getitem__(
self, index: Union[int, slice]
) -> Union[PageObject, Sequence[PageObject]]:
if isinstance(index, slice):
indices = range(*index.indices(len(self)))
cls = type(self)
return cls(indices.__len__, lambda idx: self[indices[idx]])
if not isinstance(index, int):
raise TypeError("Sequence indices must be integers")
len_self = len(self)
if index < 0:
# support negative indexes
index += len_self
if not (0 <= index < len_self):
raise IndexError("Sequence index out of range")
return self.get_function(index)
def __delitem__(self, index: Union[int, slice]) -> None:
if isinstance(index, slice):
r = list(range(*index.indices(len(self))))
# pages have to be deleted from last to first
r.sort()
r.reverse()
for p in r:
del self[p] # recursive call
return
if not isinstance(index, int):
raise TypeError("Index must be integers")
len_self = len(self)
if index < 0:
# support negative indexes
index += len_self
if not (0 <= index < len_self):
raise IndexError("Index out of range")
ind = self[index].indirect_reference
assert ind is not None
parent: Optional[PdfObject] = cast(DictionaryObject, ind.get_object()).get(
"/Parent", None
)
first = True
while parent is not None:
parent = cast(DictionaryObject, parent.get_object())
try:
i = cast(ArrayObject, parent["/Kids"]).index(ind)
del cast(ArrayObject, parent["/Kids"])[i]
first = False
try:
assert ind is not None
del ind.pdf.flattened_pages[index] # case of page in a Reader
except Exception: # pragma: no cover
pass
if "/Count" in parent:
parent[NameObject("/Count")] = NumberObject(
cast(int, parent["/Count"]) - 1
)
if len(cast(ArrayObject, parent["/Kids"])) == 0:
# No more objects in this part of this subtree
ind = parent.indirect_reference
parent = parent.get("/Parent", None)
except ValueError: # from index
if first:
raise PdfReadError(f"Page not found in page tree: {ind}")
break
def __iter__(self) -> Iterator[PageObject]:
for i in range(len(self)):
yield self[i]
def __str__(self) -> str:
p = [f"PageObject({i})" for i in range(self.length_function())]
return f"[{', '.join(p)}]"
def _get_fonts_walk(
obj: DictionaryObject,
fnt: set[str],
emb: set[str],
) -> tuple[set[str], set[str]]:
"""
Get the set of all fonts and all embedded fonts.
Args:
obj: Page resources dictionary
fnt: font
emb: embedded fonts
Returns:
A tuple (fnt, emb)
If there is a key called 'BaseFont', that is a font that is used in the document.
If there is a key called 'FontName' and another key in the same dictionary object
that is called 'FontFilex' (where x is null, 2, or 3), then that fontname is
embedded.
We create and add to two sets, fnt = fonts used and emb = fonts embedded.
"""
fontkeys = ("/FontFile", "/FontFile2", "/FontFile3")
def process_font(f: DictionaryObject) -> None:
nonlocal fnt, emb
f = cast(DictionaryObject, f.get_object()) # to be sure
if "/BaseFont" in f:
fnt.add(cast(str, f["/BaseFont"]))
if (
("/CharProcs" in f)
or (
"/FontDescriptor" in f
and any(
x in cast(DictionaryObject, f["/FontDescriptor"]) for x in fontkeys
)
)
or (
"/DescendantFonts" in f
and "/FontDescriptor"
in cast(
DictionaryObject,
cast(ArrayObject, f["/DescendantFonts"])[0].get_object(),
)
and any(
x
in cast(
DictionaryObject,
cast(
DictionaryObject,
cast(ArrayObject, f["/DescendantFonts"])[0].get_object(),
)["/FontDescriptor"],
)
for x in fontkeys
)
)
):
# the list comprehension ensures there is FontFile
try:
emb.add(cast(str, f["/BaseFont"]))
except KeyError:
emb.add("(" + cast(str, f["/Subtype"]) + ")")
if "/DR" in obj and "/Font" in cast(DictionaryObject, obj["/DR"]):
for f in cast(DictionaryObject, cast(DictionaryObject, obj["/DR"])["/Font"]):
process_font(f)
if "/Resources" in obj:
if "/Font" in cast(DictionaryObject, obj["/Resources"]):
for f in cast(
DictionaryObject, cast(DictionaryObject, obj["/Resources"])["/Font"]
).values():
process_font(f)
if "/XObject" in cast(DictionaryObject, obj["/Resources"]):
for x in cast(
DictionaryObject, cast(DictionaryObject, obj["/Resources"])["/XObject"]
).values():
_get_fonts_walk(cast(DictionaryObject, x.get_object()), fnt, emb)
if "/Annots" in obj:
for a in cast(ArrayObject, obj["/Annots"]):
_get_fonts_walk(cast(DictionaryObject, a.get_object()), fnt, emb)
if "/AP" in obj:
if (
cast(DictionaryObject, cast(DictionaryObject, obj["/AP"])["/N"]).get(
"/Type"
)
== "/XObject"
):
_get_fonts_walk(
cast(DictionaryObject, cast(DictionaryObject, obj["/AP"])["/N"]),
fnt,
emb,
)
else:
for a in cast(DictionaryObject, cast(DictionaryObject, obj["/AP"])["/N"]):
_get_fonts_walk(cast(DictionaryObject, a), fnt, emb)
return fnt, emb # return the sets for each page
|
_VirtualList
|
python
|
celery__celery
|
t/unit/backends/test_mongodb.py
|
{
"start": 26367,
"end": 27799
}
|
class ____:
def __init__(self, a):
self.a = a
def __eq__(self, other):
assert self.__class__ == type(other)
return self.a == other.a
SUCCESS_RESULT_TEST_DATA = [
# json types
{
"result": "A simple string",
"serializers": ["bson", "pickle", "yaml", "json", "msgpack"],
},
{
"result": 100,
"serializers": ["bson", "pickle", "yaml", "json", "msgpack"],
},
{
"result": 9.1999999999999999,
"serializers": ["bson", "pickle", "yaml", "json", "msgpack"],
},
{
"result": {"foo": "simple result"},
"serializers": ["bson", "pickle", "yaml", "json", "msgpack"],
},
{
"result": ["a", "b"],
"serializers": ["bson", "pickle", "yaml", "json", "msgpack"],
},
{
"result": False,
"serializers": ["bson", "pickle", "yaml", "json", "msgpack"],
},
{
"result": None,
"serializers": ["bson", "pickle", "yaml", "json", "msgpack"],
},
# advanced essential types
{
"result": datetime.datetime(2000, 1, 1, 0, 0, 0, 0),
"serializers": ["bson", "pickle", "yaml"],
},
{
"result": datetime.datetime(2000, 1, 1, 0, 0, 0, 0, tzinfo=ZoneInfo("UTC")),
"serializers": ["pickle", "yaml"],
},
# custom types
{
"result": _MyTestClass("Hi!"),
"serializers": ["pickle"],
},
]
|
_MyTestClass
|
python
|
huggingface__transformers
|
src/transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py
|
{
"start": 9067,
"end": 11402
}
|
class ____(nn.Module):
def __init__(self, config: Dinov2WithRegistersConfig):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size {config.hidden_size} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)
self.config = config
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.dropout_prob = config.attention_probs_dropout_prob
self.scaling = self.attention_head_size**-0.5
self.is_causal = False
self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
batch_size = hidden_states.shape[0]
new_shape = batch_size, -1, self.num_attention_heads, self.attention_head_size
key_layer = self.key(hidden_states).view(*new_shape).transpose(1, 2)
value_layer = self.value(hidden_states).view(*new_shape).transpose(1, 2)
query_layer = self.query(hidden_states).view(*new_shape).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
context_layer, attention_probs = attention_interface(
self,
query_layer,
key_layer,
value_layer,
None,
is_causal=self.is_causal,
scaling=self.scaling,
dropout=0.0 if not self.training else self.dropout_prob,
)
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.reshape(new_context_layer_shape)
return context_layer, attention_probs
|
Dinov2WithRegistersSelfAttention
|
python
|
qdrant__qdrant-client
|
qdrant_client/http/api/aliases_api.py
|
{
"start": 1311,
"end": 3043
}
|
class ____:
def __init__(self, api_client: "Union[ApiClient, AsyncApiClient]"):
self.api_client = api_client
def _build_for_get_collection_aliases(
self,
collection_name: str,
):
"""
Get list of all aliases for a collection
"""
path_params = {
"collection_name": str(collection_name),
}
headers = {}
return self.api_client.request(
type_=m.InlineResponse2008,
method="GET",
url="/collections/{collection_name}/aliases",
headers=headers if headers else None,
path_params=path_params,
)
def _build_for_get_collections_aliases(
self,
):
"""
Get list of all existing collections aliases
"""
headers = {}
return self.api_client.request(
type_=m.InlineResponse2008,
method="GET",
url="/aliases",
headers=headers if headers else None,
)
def _build_for_update_aliases(
self,
timeout: int = None,
change_aliases_operation: m.ChangeAliasesOperation = None,
):
query_params = {}
if timeout is not None:
query_params["timeout"] = str(timeout)
headers = {}
body = jsonable_encoder(change_aliases_operation)
if "Content-Type" not in headers:
headers["Content-Type"] = "application/json"
return self.api_client.request(
type_=m.InlineResponse200,
method="POST",
url="/collections/aliases",
headers=headers if headers else None,
params=query_params,
content=body,
)
|
_AliasesApi
|
python
|
pypa__warehouse
|
warehouse/packaging/models.py
|
{
"start": 40811,
"end": 41729
}
|
class ____(db.Model):
"""
Store an alternate repository name, url, description for a project.
One project can have zero, one, or more alternate repositories.
For each project, ensures the url and name are unique.
Urls must start with http(s).
"""
__tablename__ = "alternate_repositories"
__table_args__ = (
UniqueConstraint("project_id", "url"),
UniqueConstraint("project_id", "name"),
CheckConstraint(
"url ~* '^https?://.+'::text",
name="alternate_repository_valid_url",
),
)
__repr__ = make_repr("name", "url")
project_id: Mapped[UUID] = mapped_column(
ForeignKey("projects.id", onupdate="CASCADE", ondelete="CASCADE"),
)
project: Mapped[Project] = orm.relationship(back_populates="alternate_repositories")
name: Mapped[str]
url: Mapped[str]
description: Mapped[str]
|
AlternateRepository
|
python
|
getsentry__sentry
|
tests/sentry/issues/test_grouptype.py
|
{
"start": 494,
"end": 1367
}
|
class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.registry_patcher = patch("sentry.issues.grouptype.registry", new=GroupTypeRegistry())
self.registry_patcher.__enter__()
class ErrorGroupType(GroupType):
type_id = -1
slug = "error"
description = "Error"
category = GroupCategory.TEST_NOTIFICATION.value
category_v2 = GroupCategory.TEST_NOTIFICATION.value
class IssueStreamGroupType(GroupType):
type_id = 0
slug = "issue_stream"
description = "Issue Stream"
category = GroupCategory.TEST_NOTIFICATION.value
category_v2 = GroupCategory.TEST_NOTIFICATION.value
def tearDown(self) -> None:
super().tearDown()
self.registry_patcher.__exit__(None, None, None)
|
BaseGroupTypeTest
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/refurb/FURB189.py
|
{
"start": 429,
"end": 476
}
|
class ____(dict[str, str]):
pass
|
SubscriptDict
|
python
|
huggingface__transformers
|
src/transformers/models/data2vec/modeling_data2vec_audio.py
|
{
"start": 32762,
"end": 37675
}
|
class ____(Data2VecAudioPreTrainedModel):
def __init__(self, config):
r"""
target_lang (`str`, *optional*):
Language id of adapter weights. Adapter weights are stored in the format adapter.<lang>.safetensors or
adapter.<lang>.bin. Only relevant when using an instance of [`Data2VecAudioForCTC`] with adapters. Uses 'eng' by
default.
"""
super().__init__(config)
self.data2vec_audio = Data2VecAudioModel(config)
self.dropout = nn.Dropout(config.final_dropout)
if config.vocab_size is None:
raise ValueError(
f"You are trying to instantiate {self.__class__} with a configuration that "
"does not define the vocabulary size of the language model head. Please "
"instantiate the model as follows: `Data2VecAudioForCTC.from_pretrained(..., vocab_size=vocab_size)`. "
"or define `vocab_size` of your model's configuration."
)
output_hidden_size = (
config.output_hidden_size if hasattr(config, "add_adapter") and config.add_adapter else config.hidden_size
)
self.lm_head = nn.Linear(output_hidden_size, config.vocab_size)
# Initialize weights and apply final processing
self.post_init()
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
self.data2vec_audio.feature_extractor._freeze_parameters()
@auto_docstring
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
) -> Union[tuple, CausalLMOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*):
Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to
the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`.
All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,
config.vocab_size - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None and labels.max() >= self.config.vocab_size:
raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}")
outputs = self.data2vec_audio(
input_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
hidden_states = self.dropout(hidden_states)
logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
# retrieve loss input_lengths from attention_mask
attention_mask = (
attention_mask if attention_mask is not None else torch.ones_like(input_values, dtype=torch.long)
)
input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)
# assuming that padded tokens are filled with -100
# when not being attended to
labels_mask = labels >= 0
target_lengths = labels_mask.sum(-1)
flattened_targets = labels.masked_select(labels_mask)
# ctc_loss doesn't support fp16
log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1)
with torch.backends.cudnn.flags(enabled=False):
loss = nn.functional.ctc_loss(
log_probs,
flattened_targets,
input_lengths,
target_lengths,
blank=self.config.pad_token_id,
reduction=self.config.ctc_loss_reduction,
zero_infinity=self.config.ctc_zero_infinity,
)
if not return_dict:
output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
return ((loss,) + output) if loss is not None else output
return CausalLMOutput(
loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
)
@auto_docstring(
custom_intro="""
Data2VecAudio Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like
SUPERB Keyword Spotting.
"""
)
|
Data2VecAudioForCTC
|
python
|
spack__spack
|
lib/spack/spack/builder.py
|
{
"start": 1637,
"end": 9054
}
|
class ____:
def __init__(self, builder, phase_fn):
self.builder = builder
self.phase_fn = phase_fn
def __call__(self, spec, prefix):
return self.phase_fn(self.builder.pkg, spec, prefix)
def get_builder_class(pkg, name: str) -> Optional[Type["Builder"]]:
"""Return the builder class if a package module defines it."""
cls = getattr(pkg.module, name, None)
if cls and spack.repo.is_package_module(cls.__module__):
return cls
return None
def _create(pkg: spack.package_base.PackageBase) -> "Builder":
"""Return a new builder object for the package object being passed as argument.
The function inspects the build-system used by the package object and try to:
1. Return a custom builder, if any is defined in the same ``package.py`` file.
2. Return a customization of more generic builders, if any is defined in the
class hierarchy (look at AspellDictPackage for an example of that)
3. Return a run-time generated adapter builder otherwise
The run-time generated adapter builder is capable of adapting an old-style package
to the new architecture, where the installation procedure has been extracted from
the ``*Package`` hierarchy into a ``*Builder`` hierarchy. This means that the
adapter looks for attribute or method overrides preferably in the ``*Package``
before using the default builder implementation.
Note that in case a builder is explicitly coded in ``package.py``, no attempt is made
to look for build-related methods in the ``*Package``.
Args:
pkg: package object for which we need a builder
"""
package_buildsystem = buildsystem_name(pkg)
default_builder_cls = BUILDER_CLS[package_buildsystem]
builder_cls_name = default_builder_cls.__name__
builder_class = get_builder_class(pkg, builder_cls_name)
if builder_class:
return builder_class(pkg)
# Specialized version of a given buildsystem can subclass some
# base classes and specialize certain phases or methods or attributes.
# In that case they can store their builder class as a class level attribute.
# See e.g. AspellDictPackage as an example.
base_cls = getattr(pkg, builder_cls_name, default_builder_cls)
# From here on we define classes to construct a special builder that adapts to the
# old, single class, package format. The adapter forwards any call or access to an
# attribute related to the installation procedure to a package object wrapped in
# a class that falls-back on calling the base builder if no override is found on the
# package. The semantic should be the same as the method in the base builder were still
# present in the base class of the package.
class _ForwardToBaseBuilder:
def __init__(self, wrapped_pkg_object, root_builder):
self.wrapped_package_object = wrapped_pkg_object
self.root_builder = root_builder
package_cls = type(wrapped_pkg_object)
wrapper_cls = type(self)
bases = (package_cls, wrapper_cls)
new_cls_name = package_cls.__name__ + "Wrapper"
# Forward attributes that might be monkey patched later
new_cls = type(
new_cls_name,
bases,
{
"__module__": package_cls.__module__,
"run_tests": property(lambda x: x.wrapped_package_object.run_tests),
"test_requires_compiler": property(
lambda x: x.wrapped_package_object.test_requires_compiler
),
"test_suite": property(lambda x: x.wrapped_package_object.test_suite),
"tester": property(lambda x: x.wrapped_package_object.tester),
},
)
self.__class__ = new_cls
self.__dict__.update(wrapped_pkg_object.__dict__)
def __getattr__(self, item):
result = getattr(super(type(self.root_builder), self.root_builder), item)
if item in super(type(self.root_builder), self.root_builder).phases:
result = _PhaseAdapter(self.root_builder, result)
return result
def forward_method_to_getattr(fn_name):
def __forward(self, *args, **kwargs):
return self.__getattr__(fn_name)(*args, **kwargs)
return __forward
# Add fallback methods for the Package object to refer to the builder. If a method
# with the same name is defined in the Package, it will override this definition
# (when _ForwardToBaseBuilder is initialized)
for method_name in (
base_cls.phases # type: ignore
+ package_methods(base_cls) # type: ignore
+ package_long_methods(base_cls) # type: ignore
+ ("setup_build_environment", "setup_dependent_build_environment")
):
setattr(_ForwardToBaseBuilder, method_name, forward_method_to_getattr(method_name))
def forward_property_to_getattr(property_name):
def __forward(self):
return self.__getattr__(property_name)
return __forward
for attribute_name in package_attributes(base_cls): # type: ignore
setattr(
_ForwardToBaseBuilder,
attribute_name,
property(forward_property_to_getattr(attribute_name)),
)
class Adapter(base_cls, metaclass=_PackageAdapterMeta): # type: ignore
def __init__(self, pkg):
# Deal with custom phases in packages here
if hasattr(pkg, "phases"):
self.phases = pkg.phases
for phase in self.phases:
setattr(Adapter, phase, _PackageAdapterMeta.phase_method_adapter(phase))
# Attribute containing the package wrapped in dispatcher with a `__getattr__`
# method that will forward certain calls to the default builder.
self.pkg_with_dispatcher = _ForwardToBaseBuilder(pkg, root_builder=self)
super().__init__(pkg)
# These two methods don't follow the (self, spec, prefix) signature of phases nor
# the (self) signature of methods, so they are added explicitly to avoid using a
# catch-all (*args, **kwargs)
def setup_build_environment(
self, env: spack.util.environment.EnvironmentModifications
) -> None:
return self.pkg_with_dispatcher.setup_build_environment(env)
def setup_dependent_build_environment(
self,
env: spack.util.environment.EnvironmentModifications,
dependent_spec: spack.spec.Spec,
) -> None:
return self.pkg_with_dispatcher.setup_dependent_build_environment(env, dependent_spec)
return Adapter(pkg)
def buildsystem_name(pkg: spack.package_base.PackageBase) -> str:
"""Given a package object with an associated concrete spec,
return the name of its build system."""
try:
return pkg.spec.variants["build_system"].value
except KeyError as e:
# We are reading an old spec without the build_system variant
if hasattr(pkg, "default_buildsystem"):
# Package API v2.2
return pkg.default_buildsystem
elif hasattr(pkg, "legacy_buildsystem"):
return pkg.legacy_buildsystem
raise SpackError(f"Package {pkg.name} does not define a build system.") from e
|
_PhaseAdapter
|
python
|
spyder-ide__spyder
|
spyder/plugins/tours/widgets.py
|
{
"start": 1614,
"end": 4639
}
|
class ____(QDialog):
"""A general fade in/fade out QDialog with some builtin functions"""
sig_key_pressed = Signal()
def __init__(self, parent, opacity, duration, easing_curve):
super().__init__(parent)
self.parent = parent
self.opacity_min = min(opacity)
self.opacity_max = max(opacity)
self.duration_fadein = duration[0]
self.duration_fadeout = duration[-1]
self.easing_curve_in = easing_curve[0]
self.easing_curve_out = easing_curve[-1]
self.effect = None
self.anim = None
self._fade_running = False
self._funcs_before_fade_in = []
self._funcs_after_fade_in = []
self._funcs_before_fade_out = []
self._funcs_after_fade_out = []
self.setModal(False)
def _run(self, funcs):
for func in funcs:
func()
def _run_before_fade_in(self):
self._run(self._funcs_before_fade_in)
def _run_after_fade_in(self):
self._run(self._funcs_after_fade_in)
def _run_before_fade_out(self):
self._run(self._funcs_before_fade_out)
def _run_after_fade_out(self):
self._run(self._funcs_after_fade_out)
def _set_fade_finished(self):
self._fade_running = False
def _fade_setup(self):
self._fade_running = True
self.effect = QGraphicsOpacityEffect(self)
self.setGraphicsEffect(self.effect)
self.anim = QPropertyAnimation(self.effect, b"opacity")
# --- public api
def fade_in(self, on_finished_connect):
self._run_before_fade_in()
self._fade_setup()
self.show()
self.raise_()
self.anim.setEasingCurve(self.easing_curve_in)
self.anim.setStartValue(self.opacity_min)
self.anim.setEndValue(self.opacity_max)
self.anim.setDuration(self.duration_fadein)
self.anim.finished.connect(on_finished_connect)
self.anim.finished.connect(self._set_fade_finished)
self.anim.finished.connect(self._run_after_fade_in)
self.anim.start()
def fade_out(self, on_finished_connect):
self._run_before_fade_out()
self._fade_setup()
self.anim.setEasingCurve(self.easing_curve_out)
self.anim.setStartValue(self.opacity_max)
self.anim.setEndValue(self.opacity_min)
self.anim.setDuration(self.duration_fadeout)
self.anim.finished.connect(on_finished_connect)
self.anim.finished.connect(self._set_fade_finished)
self.anim.finished.connect(self._run_after_fade_out)
self.anim.start()
def is_fade_running(self):
return self._fade_running
def set_funcs_before_fade_in(self, funcs):
self._funcs_before_fade_in = funcs
def set_funcs_after_fade_in(self, funcs):
self._funcs_after_fade_in = funcs
def set_funcs_before_fade_out(self, funcs):
self._funcs_before_fade_out = funcs
def set_funcs_after_fade_out(self, funcs):
self._funcs_after_fade_out = funcs
|
FadingDialog
|
python
|
scrapy__scrapy
|
scrapy/extensions/feedexport.py
|
{
"start": 11715,
"end": 14413
}
|
class ____:
def __init__(
self,
storage: FeedStorageProtocol,
uri: str,
format: str, # noqa: A002
store_empty: bool,
batch_id: int,
uri_template: str,
filter: ItemFilter, # noqa: A002
feed_options: dict[str, Any],
spider: Spider,
exporters: dict[str, type[BaseItemExporter]],
settings: BaseSettings,
crawler: Crawler,
):
self.file: IO[bytes] | None = None
self.exporter: BaseItemExporter | None = None
self.storage: FeedStorageProtocol = storage
# feed params
self.batch_id: int = batch_id
self.format: str = format
self.store_empty: bool = store_empty
self.uri_template: str = uri_template
self.uri: str = uri
self.filter: ItemFilter = filter
# exporter params
self.feed_options: dict[str, Any] = feed_options
self.spider: Spider = spider
self.exporters: dict[str, type[BaseItemExporter]] = exporters
self.settings: BaseSettings = settings
self.crawler: Crawler = crawler
# flags
self.itemcount: int = 0
self._exporting: bool = False
self._fileloaded: bool = False
def start_exporting(self) -> None:
if not self._fileloaded:
self.file = self.storage.open(self.spider)
if "postprocessing" in self.feed_options:
self.file = cast(
"IO[bytes]",
PostProcessingManager(
self.feed_options["postprocessing"],
self.file,
self.feed_options,
),
)
self.exporter = self._get_exporter(
file=self.file,
format_=self.feed_options["format"],
fields_to_export=self.feed_options["fields"],
encoding=self.feed_options["encoding"],
indent=self.feed_options["indent"],
**self.feed_options["item_export_kwargs"],
)
self._fileloaded = True
if not self._exporting:
assert self.exporter
self.exporter.start_exporting()
self._exporting = True
def _get_exporter(
self, file: IO[bytes], format_: str, *args: Any, **kwargs: Any
) -> BaseItemExporter:
return build_from_crawler(
self.exporters[format_], self.crawler, file, *args, **kwargs
)
def finish_exporting(self) -> None:
if self._exporting:
assert self.exporter
self.exporter.finish_exporting()
self._exporting = False
|
FeedSlot
|
python
|
ray-project__ray
|
python/ray/experimental/collective/operations.py
|
{
"start": 6151,
"end": 6968
}
|
class ____:
"""Wrapper for NCCL reduce-scatter."""
def bind(
self,
input_nodes: List["ray.dag.DAGNode"],
op: ReduceOp = ReduceOp.SUM,
transport: Optional[Union[str, Communicator]] = None,
) -> List[CollectiveOutputNode]:
if not isinstance(op, ReduceOp):
raise ValueError(f"Unexpected operation: {op}")
return _bind(input_nodes, ReduceScatterOp(reduceOp=op), transport)
def __call__(
self,
tensor,
group_name: str = "default",
op: RayReduceOp = RayReduceOp.SUM,
):
from ray.util.collective.collective import reducescatter
return reducescatter(tensor, group_name, op)
allgather = AllGatherWrapper()
allreduce = AllReduceWrapper()
reducescatter = ReduceScatterWrapper()
|
ReduceScatterWrapper
|
python
|
huggingface__transformers
|
src/transformers/models/helium/modular_helium.py
|
{
"start": 3792,
"end": 4094
}
|
class ____(GraniteAttention):
def __init__(self, config: HeliumConfig, layer_idx: Optional[int] = None):
super().__init__(config, layer_idx)
self.o_proj = nn.Linear(config.hidden_size, config.hidden_size, bias=False)
self.scaling = 1 / math.sqrt(self.head_dim)
|
HeliumAttention
|
python
|
h5py__h5py
|
h5py/_hl/selections2.py
|
{
"start": 1949,
"end": 2723
}
|
class ____:
"""
Implements slicing for scalar datasets.
"""
def __init__(self, fspace, args):
if args == ():
self.mshape = None
elif args == (Ellipsis,):
self.mshape = ()
else:
raise ValueError("Illegal slicing argument for scalar dataspace")
self.mspace = h5s.create(h5s.SCALAR)
self.fspace = fspace
def __iter__(self):
self.mspace.select_all()
yield self.fspace, self.mspace
def select_read(fspace, args):
""" Top-level dispatch function for reading.
At the moment, only supports reading from scalar datasets.
"""
if fspace.shape == ():
return ScalarReadSelection(fspace, args)
raise NotImplementedError()
|
ScalarReadSelection
|
python
|
PrefectHQ__prefect
|
tests/_internal/pydantic/test_validated_func.py
|
{
"start": 243,
"end": 1638
}
|
class ____:
"""Test basic function argument validation."""
def test_simple_function(self):
def greet(name: str, age: int = 0):
return f"Hello {name}, you are {age} years old"
vf = ValidatedFunction(greet)
result = vf.validate_call_args(("Alice",), {"age": 30})
assert result == {"name": "Alice", "age": 30}
def test_simple_function_with_defaults(self):
def greet(name: str, age: int = 25):
return f"Hello {name}"
vf = ValidatedFunction(greet)
result = vf.validate_call_args(("Bob",), {})
assert result == {"name": "Bob", "age": 25}
def test_all_positional(self):
def add(a: int, b: int):
return a + b
vf = ValidatedFunction(add)
result = vf.validate_call_args((5, 10), {})
assert result == {"a": 5, "b": 10}
def test_all_keyword(self):
def add(a: int, b: int):
return a + b
vf = ValidatedFunction(add)
result = vf.validate_call_args((), {"a": 5, "b": 10})
assert result == {"a": 5, "b": 10}
def test_mixed_positional_and_keyword(self):
def multiply(x: int, y: int, z: int = 1):
return x * y * z
vf = ValidatedFunction(multiply)
result = vf.validate_call_args((2, 3), {"z": 4})
assert result == {"x": 2, "y": 3, "z": 4}
|
TestBasicValidation
|
python
|
pytorch__pytorch
|
torch/_dynamo/variables/functions.py
|
{
"start": 85421,
"end": 86733
}
|
class ____(UserFunctionVariable):
def as_python_constant(self) -> Any:
return self.fn
def call_function(
self,
tx: "InstructionTranslator",
args: Sequence[VariableTracker],
kwargs: dict[str, VariableTracker],
) -> VariableTracker:
constant_args = check_constant_args(args, kwargs)
if constant_args:
try:
value = self.fn(
*[x.as_python_constant() for x in args],
**{k: v.as_python_constant() for k, v in kwargs.items()},
)
except TypeError as exc:
raise_observed_exception(
type(exc),
tx,
args=list(map(ConstantVariable.create, exc.args)),
)
return variables.UserDefinedClassVariable(
# pyrefly: ignore[unbound-name]
value,
mutation_type=ValueMutationNew(),
)
unimplemented(
gb_type="namedtuple construction",
context=f"{args=}, {kwargs=}",
explanation="`torch.compile` only support certain input types for namedtuple",
hints=[
*graph_break_hints.SUPPORTABLE,
],
)
|
CollectionsNamedTupleFunction
|
python
|
Netflix__metaflow
|
metaflow/sidecar/sidecar_subprocess.py
|
{
"start": 553,
"end": 670
}
|
class ____(Exception):
"""raised when unable to write to pipe given allotted time"""
pass
|
PipeUnavailableError
|
python
|
huggingface__transformers
|
src/transformers/models/vitmatte/image_processing_vitmatte_fast.py
|
{
"start": 1371,
"end": 6033
}
|
class ____(BaseImageProcessorFast):
do_rescale: bool = True
rescale_factor: Union[int, float] = 1 / 255
do_normalize: bool = True
image_mean: Optional[Union[float, list[float]]] = IMAGENET_STANDARD_MEAN
image_std: Optional[Union[float, list[float]]] = IMAGENET_STANDARD_STD
do_pad: bool = True
size_divisor: int = 32
valid_kwargs = VitMatteImageProcessorKwargs
def __init__(self, **kwargs: Unpack[VitMatteImageProcessorKwargs]) -> None:
size_divisibility = kwargs.pop("size_divisibility", None)
kwargs.setdefault("size_divisor", size_divisibility)
super().__init__(**kwargs)
def _pad_image(
self,
images: torch.Tensor,
size_divisor: int = 32,
) -> torch.Tensor:
"""
Pads an image or batched images constantly so that width and height are divisible by size_divisor
Args:
image (`torch.Tensor`):
Image to pad.
size_divisor (`int`, *optional*, defaults to 32):
The width and height of the image will be padded to be divisible by this number.
"""
height, width = get_image_size(images, channel_dim=ChannelDimension.FIRST)
pad_height = 0 if height % size_divisor == 0 else size_divisor - height % size_divisor
pad_width = 0 if width % size_divisor == 0 else size_divisor - width % size_divisor
if pad_width + pad_height > 0:
padding = (0, 0, pad_width, pad_height)
images = F.pad(images, padding)
return images
@auto_docstring
def preprocess(
self,
images: list["torch.Tensor"],
trimaps: list["torch.Tensor"],
**kwargs: Unpack[VitMatteImageProcessorKwargs],
) -> BatchFeature:
r"""
trimaps (`list[torch.Tensor]`):
The trimaps to preprocess.
"""
return super().preprocess(images, trimaps, **kwargs)
def _preprocess_image_like_inputs(
self,
images: ImageInput,
trimaps: ImageInput,
do_convert_rgb: bool,
input_data_format: ChannelDimension,
device: Optional[Union[str, "torch.device"]] = None,
**kwargs: Unpack[VitMatteImageProcessorKwargs],
) -> BatchFeature:
"""
Preprocess image-like inputs.
"""
images = self._prepare_image_like_inputs(
images=images, do_convert_rgb=do_convert_rgb, input_data_format=input_data_format, device=device
)
trimaps = self._prepare_image_like_inputs(images=trimaps, expected_ndims=2, device=device)
return self._preprocess(images, trimaps, **kwargs)
@filter_out_non_signature_kwargs()
def _preprocess(
self,
images: list["torch.Tensor"],
trimaps: list["torch.Tensor"],
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_pad: Optional[bool] = None,
size_divisor: Optional[int] = None,
disable_grouping: Optional[bool] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
) -> BatchFeature:
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
grouped_trimaps, grouped_trimaps_index = group_images_by_shape(trimaps, disable_grouping=disable_grouping)
processed_images_grouped = {}
for shape in grouped_images:
stacked_images = grouped_images[shape]
stacked_trimaps = grouped_trimaps[shape]
# Fused rescale and normalize
stacked_images = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
stacked_trimaps = self.rescale_and_normalize(
stacked_trimaps, do_rescale, rescale_factor, False, image_mean, image_std
)
stacked_images = torch.cat([stacked_images, stacked_trimaps], dim=1)
if do_pad:
stacked_images = self._pad_image(stacked_images, size_divisor)
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
processed_images = torch.stack(processed_images, dim=0) if return_tensors else processed_images
return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors)
__all__ = ["VitMatteImageProcessorFast"]
|
VitMatteImageProcessorFast
|
python
|
encode__django-rest-framework
|
tests/test_api_client.py
|
{
"start": 5264,
"end": 5381
}
|
class ____(APIView):
def get(self, request):
return HttpResponse('123', content_type='text/plain')
|
TextView
|
python
|
networkx__networkx
|
networkx/classes/tests/test_reportviews.py
|
{
"start": 14669,
"end": 15568
}
|
class ____(TestOutMultiEdgeDataView):
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9, create_using=nx.MultiDiGraph())
cls.eview = nx.reportviews.InMultiEdgeView
def test_repr(self):
ev = self.eview(self.G)(data=True)
rep = (
"InMultiEdgeDataView([(0, 1, {}), (1, 2, {}), "
+ "(2, 3, {}), (3, 4, {}), "
+ "(4, 5, {}), (5, 6, {}), "
+ "(6, 7, {}), (7, 8, {})])"
)
assert repr(ev) == rep
def test_contains_with_nbunch(self):
evr = self.eview(self.G)
ev = evr(nbunch=[0, 2])
assert (0, 1) not in ev
assert (1, 2) in ev
assert (2, 3) not in ev
assert (3, 4) not in ev
assert (4, 5) not in ev
assert (5, 6) not in ev
assert (7, 8) not in ev
assert (8, 9) not in ev
# Edge Views
|
TestInMultiEdgeDataView
|
python
|
tornadoweb__tornado
|
demos/chat/chatdemo.py
|
{
"start": 1770,
"end": 1910
}
|
class ____(tornado.web.RequestHandler):
def get(self):
self.render("index.html", messages=global_message_buffer.cache)
|
MainHandler
|
python
|
ApeWorX__ape
|
src/ape_test/accounts.py
|
{
"start": 3133,
"end": 3633
}
|
class ____(ApeSigner, TestAccountAPI):
index: int
address_str: str
__test__ = False
@property
def alias(self) -> str:
return f"TEST::{self.index}"
@cached_property
def address(self) -> AddressType:
# Overridden.
return self.network_manager.ethereum.decode_address(self.address_str)
@log_instead_of_fail(default="<TestAccount>")
def __repr__(self) -> str:
return f"<{self.__class__.__name__}_{self.index} {self.address}>"
|
TestAccount
|
python
|
celery__celery
|
t/unit/utils/test_dispatcher.py
|
{
"start": 659,
"end": 851
}
|
class ____:
def __call__(self, val, **kwargs):
return val
def a(self, val, **kwargs):
return val
a_signal = Signal(providing_args=['val'], use_caching=False)
|
Callable
|
python
|
pola-rs__polars
|
py-polars/src/polars/dataframe/group_by.py
|
{
"start": 948,
"end": 27435
}
|
class ____:
"""Starts a new GroupBy operation."""
def __init__(
self,
df: DataFrame,
*by: IntoExpr | Iterable[IntoExpr],
maintain_order: bool,
predicates: Iterable[Any] | None,
**named_by: IntoExpr,
) -> None:
"""
Utility class for performing a group by operation over the given DataFrame.
Generated by calling `df.group_by(...)`.
Parameters
----------
df
DataFrame to perform the group by operation over.
*by
Column or columns to group by. Accepts expression input. Strings are parsed
as column names.
maintain_order
Ensure that the order of the groups is consistent with the input data.
This is slower than a default group by.
predicates
Predicate expressions to filter groups after aggregation.
**named_by
Additional column(s) to group by, specified as keyword arguments.
The columns will be named as the keyword used.
"""
self.df = df
self.by = by
self.named_by = named_by
self.maintain_order = maintain_order
self.predicates = predicates
def _lgb(self) -> LazyGroupBy:
group_by = self.df.lazy().group_by(
*self.by, **self.named_by, maintain_order=self.maintain_order
)
if self.predicates:
return group_by.having(self.predicates)
return group_by
def __iter__(self) -> Self:
"""
Allows iteration over the groups of the group by operation.
Each group is represented by a tuple of `(name, data)`. The group names are
tuples of the distinct group values that identify each group.
Examples
--------
>>> df = pl.DataFrame({"foo": ["a", "a", "b"], "bar": [1, 2, 3]})
>>> for name, data in df.group_by("foo"): # doctest: +SKIP
... print(name)
... print(data)
(a,)
shape: (2, 2)
┌─────┬─────┐
│ foo ┆ bar │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════╪═════╡
│ a ┆ 1 │
│ a ┆ 2 │
└─────┴─────┘
(b,)
shape: (1, 2)
┌─────┬─────┐
│ foo ┆ bar │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════╪═════╡
│ b ┆ 3 │
└─────┴─────┘
"""
# Every group gather can trigger a rechunk, so do early.
from polars.lazyframe.opt_flags import QueryOptFlags
self.df = self.df.rechunk()
temp_col = "__POLARS_GB_GROUP_INDICES"
groups_df = (
self.df.lazy()
.with_row_index("__POLARS_GB_ROW_INDEX")
.group_by(*self.by, **self.named_by, maintain_order=self.maintain_order)
.agg(F.first().alias(temp_col))
.collect(optimizations=QueryOptFlags.none())
)
self._group_names = groups_df.select(F.all().exclude(temp_col)).iter_rows()
self._group_indices = groups_df.select(temp_col).to_series()
self._current_index = 0
return self
def __next__(self) -> tuple[tuple[Any, ...], DataFrame]:
if self._current_index >= len(self._group_indices):
raise StopIteration
group_name = next(self._group_names)
group_data = self.df[self._group_indices[self._current_index], :]
self._current_index += 1
return group_name, group_data
def having(self, *predicates: IntoExpr | Iterable[IntoExpr]) -> GroupBy:
"""
Filter groups with a list of predicates after aggregation.
Using this method is equivalent to adding the predicates to the aggregation and
filtering afterwards.
This method can be chained and all conditions will be combined using `&`.
Parameters
----------
*predicates
Expressions that evaluate to a boolean value for each group. Typically, this
requires the use of an aggregation function. Multiple predicates are
combined using `&`.
Examples
--------
Only keep groups that contain more than one element.
>>> df = pl.DataFrame(
... {
... "a": ["a", "b", "a", "b", "c"],
... }
... )
>>> df.group_by("a").having(pl.len() > 1).agg() # doctest: +IGNORE_RESULT
shape: (2, 1)
┌─────┐
│ a │
│ --- │
│ str │
╞═════╡
│ b │
│ a │
└─────┘
"""
return GroupBy(
self.df,
*self.by,
maintain_order=self.maintain_order,
predicates=_chain_predicates(self.predicates, predicates),
**self.named_by,
)
def agg(
self,
*aggs: IntoExpr | Iterable[IntoExpr],
**named_aggs: IntoExpr,
) -> DataFrame:
"""
Compute aggregations for each group of a group by operation.
Parameters
----------
*aggs
Aggregations to compute for each group of the group by operation,
specified as positional arguments.
Accepts expression input. Strings are parsed as column names.
**named_aggs
Additional aggregations, specified as keyword arguments.
The resulting columns will be renamed to the keyword used.
Examples
--------
Compute the aggregation of the columns for each group.
>>> df = pl.DataFrame(
... {
... "a": ["a", "b", "a", "b", "c"],
... "b": [1, 2, 1, 3, 3],
... "c": [5, 4, 3, 2, 1],
... }
... )
>>> df.group_by("a").agg(pl.col("b"), pl.col("c")) # doctest: +IGNORE_RESULT
shape: (3, 3)
┌─────┬───────────┬───────────┐
│ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- │
│ str ┆ list[i64] ┆ list[i64] │
╞═════╪═══════════╪═══════════╡
│ a ┆ [1, 1] ┆ [5, 3] │
│ b ┆ [2, 3] ┆ [4, 2] │
│ c ┆ [3] ┆ [1] │
└─────┴───────────┴───────────┘
Compute the sum of a column for each group.
>>> df.group_by("a").agg(pl.col("b").sum()) # doctest: +IGNORE_RESULT
shape: (3, 2)
┌─────┬─────┐
│ a ┆ b │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════╪═════╡
│ a ┆ 2 │
│ b ┆ 5 │
│ c ┆ 3 │
└─────┴─────┘
Compute multiple aggregates at once by passing a list of expressions.
>>> df.group_by("a").agg([pl.sum("b"), pl.mean("c")]) # doctest: +IGNORE_RESULT
shape: (3, 3)
┌─────┬─────┬─────┐
│ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ f64 │
╞═════╪═════╪═════╡
│ c ┆ 3 ┆ 1.0 │
│ a ┆ 2 ┆ 4.0 │
│ b ┆ 5 ┆ 3.0 │
└─────┴─────┴─────┘
Or use positional arguments to compute multiple aggregations in the same way.
>>> df.group_by("a").agg(
... pl.sum("b").name.suffix("_sum"),
... (pl.col("c") ** 2).mean().name.suffix("_mean_squared"),
... ) # doctest: +IGNORE_RESULT
shape: (3, 3)
┌─────┬───────┬────────────────┐
│ a ┆ b_sum ┆ c_mean_squared │
│ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ f64 │
╞═════╪═══════╪════════════════╡
│ a ┆ 2 ┆ 17.0 │
│ c ┆ 3 ┆ 1.0 │
│ b ┆ 5 ┆ 10.0 │
└─────┴───────┴────────────────┘
Use keyword arguments to easily name your expression inputs.
>>> df.group_by("a").agg(
... b_sum=pl.sum("b"),
... c_mean_squared=(pl.col("c") ** 2).mean(),
... ) # doctest: +IGNORE_RESULT
shape: (3, 3)
┌─────┬───────┬────────────────┐
│ a ┆ b_sum ┆ c_mean_squared │
│ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ f64 │
╞═════╪═══════╪════════════════╡
│ a ┆ 2 ┆ 17.0 │
│ c ┆ 3 ┆ 1.0 │
│ b ┆ 5 ┆ 10.0 │
└─────┴───────┴────────────────┘
"""
from polars.lazyframe.opt_flags import QueryOptFlags
return (
self._lgb()
.agg(*aggs, **named_aggs)
.collect(optimizations=QueryOptFlags.none())
)
def map_groups(self, function: Callable[[DataFrame], DataFrame]) -> DataFrame:
"""
Apply a custom/user-defined function (UDF) over the groups as a sub-DataFrame.
.. warning::
This method is much slower than the native expressions API.
Only use it if you cannot implement your logic otherwise.
Implementing logic using a Python function is almost always *significantly*
slower and more memory intensive than implementing the same logic using
the native expression API because:
- The native expression engine runs in Rust; UDFs run in Python.
- Use of Python UDFs forces the DataFrame to be materialized in memory.
- Polars-native expressions can be parallelised (UDFs cannot).
- Polars-native expressions can be logically optimised (UDFs cannot).
Wherever possible you should strongly prefer the native expression API
to achieve the best performance.
Parameters
----------
function
Custom function that receives a DataFrame and returns a DataFrame.
Returns
-------
DataFrame
Examples
--------
For each color group sample two rows:
>>> df = pl.DataFrame(
... {
... "id": [0, 1, 2, 3, 4],
... "color": ["red", "green", "green", "red", "red"],
... "shape": ["square", "triangle", "square", "triangle", "square"],
... }
... )
>>> df.group_by("color").map_groups(
... lambda group_df: group_df.sample(2)
... ) # doctest: +IGNORE_RESULT
shape: (4, 3)
┌─────┬───────┬──────────┐
│ id ┆ color ┆ shape │
│ --- ┆ --- ┆ --- │
│ i64 ┆ str ┆ str │
╞═════╪═══════╪══════════╡
│ 1 ┆ green ┆ triangle │
│ 2 ┆ green ┆ square │
│ 4 ┆ red ┆ square │
│ 3 ┆ red ┆ triangle │
└─────┴───────┴──────────┘
It is better to implement this with an expression:
>>> df.filter(
... pl.int_range(pl.len()).shuffle().over("color") < 2
... ) # doctest: +IGNORE_RESULT
"""
if self.predicates:
msg = "cannot call `map_groups` when filtering groups with `having`"
raise TypeError(msg)
if self.named_by:
msg = "cannot call `map_groups` when grouping by named expressions"
raise TypeError(msg)
if not all(isinstance(c, str) for c in self.by):
msg = "cannot call `map_groups` when grouping by an expression"
raise TypeError(msg)
by_strs: list[str] = self.by # type: ignore[assignment]
return self.df.__class__._from_pydf(
self.df._df.group_by_map_groups(by_strs, function, self.maintain_order)
)
def head(self, n: int = 5) -> DataFrame:
"""
Get the first `n` rows of each group.
Parameters
----------
n
Number of rows to return.
Examples
--------
>>> df = pl.DataFrame(
... {
... "letters": ["c", "c", "a", "c", "a", "b"],
... "nrs": [1, 2, 3, 4, 5, 6],
... }
... )
>>> df
shape: (6, 2)
┌─────────┬─────┐
│ letters ┆ nrs │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════════╪═════╡
│ c ┆ 1 │
│ c ┆ 2 │
│ a ┆ 3 │
│ c ┆ 4 │
│ a ┆ 5 │
│ b ┆ 6 │
└─────────┴─────┘
>>> df.group_by("letters").head(2).sort("letters")
shape: (5, 2)
┌─────────┬─────┐
│ letters ┆ nrs │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════════╪═════╡
│ a ┆ 3 │
│ a ┆ 5 │
│ b ┆ 6 │
│ c ┆ 1 │
│ c ┆ 2 │
└─────────┴─────┘
"""
from polars.lazyframe.opt_flags import QueryOptFlags
return self._lgb().head(n).collect(optimizations=QueryOptFlags._eager())
def tail(self, n: int = 5) -> DataFrame:
"""
Get the last `n` rows of each group.
Parameters
----------
n
Number of rows to return.
Examples
--------
>>> df = pl.DataFrame(
... {
... "letters": ["c", "c", "a", "c", "a", "b"],
... "nrs": [1, 2, 3, 4, 5, 6],
... }
... )
>>> df
shape: (6, 2)
┌─────────┬─────┐
│ letters ┆ nrs │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════════╪═════╡
│ c ┆ 1 │
│ c ┆ 2 │
│ a ┆ 3 │
│ c ┆ 4 │
│ a ┆ 5 │
│ b ┆ 6 │
└─────────┴─────┘
>>> df.group_by("letters").tail(2).sort("letters")
shape: (5, 2)
┌─────────┬─────┐
│ letters ┆ nrs │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════════╪═════╡
│ a ┆ 3 │
│ a ┆ 5 │
│ b ┆ 6 │
│ c ┆ 2 │
│ c ┆ 4 │
└─────────┴─────┘
"""
from polars.lazyframe.opt_flags import QueryOptFlags
return self._lgb().tail(n).collect(optimizations=QueryOptFlags.none())
def all(self) -> DataFrame:
"""
Aggregate the groups into Series.
Examples
--------
>>> df = pl.DataFrame({"a": ["one", "two", "one", "two"], "b": [1, 2, 3, 4]})
>>> df.group_by("a", maintain_order=True).all()
shape: (2, 2)
┌─────┬───────────┐
│ a ┆ b │
│ --- ┆ --- │
│ str ┆ list[i64] │
╞═════╪═══════════╡
│ one ┆ [1, 3] │
│ two ┆ [2, 4] │
└─────┴───────────┘
"""
return self.agg(F.all())
def len(self, name: str | None = None) -> DataFrame:
"""
Return the number of rows in each group.
Parameters
----------
name
Assign a name to the resulting column; if unset, defaults to "len".
Examples
--------
>>> df = pl.DataFrame({"a": ["Apple", "Apple", "Orange"], "b": [1, None, 2]})
>>> df.group_by("a").len() # doctest: +IGNORE_RESULT
shape: (2, 2)
┌────────┬─────┐
│ a ┆ len │
│ --- ┆ --- │
│ str ┆ u32 │
╞════════╪═════╡
│ Apple ┆ 2 │
│ Orange ┆ 1 │
└────────┴─────┘
>>> df.group_by("a").len(name="n") # doctest: +IGNORE_RESULT
shape: (2, 2)
┌────────┬─────┐
│ a ┆ n │
│ --- ┆ --- │
│ str ┆ u32 │
╞════════╪═════╡
│ Apple ┆ 2 │
│ Orange ┆ 1 │
└────────┴─────┘
"""
len_expr = F.len()
if name is not None:
len_expr = len_expr.alias(name)
return self.agg(len_expr)
@deprecated("`GroupBy.count` was renamed; use `GroupBy.len` instead")
def count(self) -> DataFrame:
"""
Return the number of rows in each group.
.. deprecated:: 0.20.5
This method has been renamed to :func:`GroupBy.len`.
Rows containing null values count towards the total.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": ["Apple", "Apple", "Orange"],
... "b": [1, None, 2],
... }
... )
>>> df.group_by("a").count() # doctest: +SKIP
shape: (2, 2)
┌────────┬───────┐
│ a ┆ count │
│ --- ┆ --- │
│ str ┆ u32 │
╞════════╪═══════╡
│ Apple ┆ 2 │
│ Orange ┆ 1 │
└────────┴───────┘
"""
return self.agg(F.len().alias("count"))
def first(self, *, ignore_nulls: bool = False) -> DataFrame:
"""
Aggregate the first values in the group.
Parameters
----------
ignore_nulls
Ignore null values (default `False`).
If set to `True`, the first non-null value for each aggregation is returned,
otherwise `None` is returned if no non-null value exists.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [1, 2, 2, 3, 4, 5],
... "b": [0.5, 0.5, 4, 10, 13, 14],
... "c": [None, True, True, False, False, True],
... "d": ["Apple", "Orange", "Apple", "Apple", "Banana", "Banana"],
... }
... )
>>> df.group_by("d", maintain_order=True).first()
shape: (3, 4)
┌────────┬─────┬──────┬───────┐
│ d ┆ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ f64 ┆ bool │
╞════════╪═════╪══════╪═══════╡
│ Apple ┆ 1 ┆ 0.5 ┆ null │
│ Orange ┆ 2 ┆ 0.5 ┆ true │
│ Banana ┆ 4 ┆ 13.0 ┆ false │
└────────┴─────┴──────┴───────┘
>>> df.group_by("d", maintain_order=True).first(ignore_nulls=True)
shape: (3, 4)
┌────────┬─────┬──────┬───────┐
│ d ┆ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ f64 ┆ bool │
╞════════╪═════╪══════╪═══════╡
│ Apple ┆ 1 ┆ 0.5 ┆ true │
│ Orange ┆ 2 ┆ 0.5 ┆ true │
│ Banana ┆ 4 ┆ 13.0 ┆ false │
└────────┴─────┴──────┴───────┘
"""
return self.agg(F.all().first(ignore_nulls=ignore_nulls))
def last(self, *, ignore_nulls: bool = False) -> DataFrame:
"""
Aggregate the last values in the group.
Parameters
----------
ignore_nulls
Ignore null values (default `False`).
If set to `True`, the last non-null value for each column is returned,
otherwise `None` is returned if no non-null value exists.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [1, 2, 2, 3, 4, 5],
... "b": [0.5, 0.5, 4, 10, 14, None],
... "c": [True, True, True, None, False, True],
... "d": ["Apple", "Orange", "Apple", "Apple", "Banana", "Banana"],
... }
... )
>>> df.group_by("d", maintain_order=True).last()
shape: (3, 4)
┌────────┬─────┬──────┬──────┐
│ d ┆ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ f64 ┆ bool │
╞════════╪═════╪══════╪══════╡
│ Apple ┆ 3 ┆ 10.0 ┆ null │
│ Orange ┆ 2 ┆ 0.5 ┆ true │
│ Banana ┆ 5 ┆ null ┆ true │
└────────┴─────┴──────┴──────┘
>>> df.group_by("d", maintain_order=True).last(ignore_nulls=True)
shape: (3, 4)
┌────────┬─────┬──────┬──────┐
│ d ┆ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ f64 ┆ bool │
╞════════╪═════╪══════╪══════╡
│ Apple ┆ 3 ┆ 10.0 ┆ true │
│ Orange ┆ 2 ┆ 0.5 ┆ true │
│ Banana ┆ 5 ┆ 14.0 ┆ true │
└────────┴─────┴──────┴──────┘
"""
return self.agg(F.all().last(ignore_nulls=ignore_nulls))
def max(self) -> DataFrame:
"""
Reduce the groups to the maximal value.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [1, 2, 2, 3, 4, 5],
... "b": [0.5, 0.5, 4, 10, 13, 14],
... "c": [True, True, True, False, False, True],
... "d": ["Apple", "Orange", "Apple", "Apple", "Banana", "Banana"],
... }
... )
>>> df.group_by("d", maintain_order=True).max()
shape: (3, 4)
┌────────┬─────┬──────┬──────┐
│ d ┆ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ f64 ┆ bool │
╞════════╪═════╪══════╪══════╡
│ Apple ┆ 3 ┆ 10.0 ┆ true │
│ Orange ┆ 2 ┆ 0.5 ┆ true │
│ Banana ┆ 5 ┆ 14.0 ┆ true │
└────────┴─────┴──────┴──────┘
"""
return self.agg(F.all().max())
def mean(self) -> DataFrame:
"""
Reduce the groups to the mean values.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [1, 2, 2, 3, 4, 5],
... "b": [0.5, 0.5, 4, 10, 13, 14],
... "c": [True, True, True, False, False, True],
... "d": ["Apple", "Orange", "Apple", "Apple", "Banana", "Banana"],
... }
... )
>>> df.group_by("d", maintain_order=True).mean()
shape: (3, 4)
┌────────┬─────┬──────────┬──────────┐
│ d ┆ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- ┆ --- │
│ str ┆ f64 ┆ f64 ┆ f64 │
╞════════╪═════╪══════════╪══════════╡
│ Apple ┆ 2.0 ┆ 4.833333 ┆ 0.666667 │
│ Orange ┆ 2.0 ┆ 0.5 ┆ 1.0 │
│ Banana ┆ 4.5 ┆ 13.5 ┆ 0.5 │
└────────┴─────┴──────────┴──────────┘
"""
return self.agg(F.all().mean())
def median(self) -> DataFrame:
"""
Return the median per group.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [1, 2, 2, 3, 4, 5],
... "b": [0.5, 0.5, 4, 10, 13, 14],
... "d": ["Apple", "Banana", "Apple", "Apple", "Banana", "Banana"],
... }
... )
>>> df.group_by("d", maintain_order=True).median()
shape: (2, 3)
┌────────┬─────┬──────┐
│ d ┆ a ┆ b │
│ --- ┆ --- ┆ --- │
│ str ┆ f64 ┆ f64 │
╞════════╪═════╪══════╡
│ Apple ┆ 2.0 ┆ 4.0 │
│ Banana ┆ 4.0 ┆ 13.0 │
└────────┴─────┴──────┘
"""
return self.agg(F.all().median())
def min(self) -> DataFrame:
"""
Reduce the groups to the minimal value.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [1, 2, 2, 3, 4, 5],
... "b": [0.5, 0.5, 4, 10, 13, 14],
... "c": [True, True, True, False, False, True],
... "d": ["Apple", "Orange", "Apple", "Apple", "Banana", "Banana"],
... }
... )
>>> df.group_by("d", maintain_order=True).min()
shape: (3, 4)
┌────────┬─────┬──────┬───────┐
│ d ┆ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ f64 ┆ bool │
╞════════╪═════╪══════╪═══════╡
│ Apple ┆ 1 ┆ 0.5 ┆ false │
│ Orange ┆ 2 ┆ 0.5 ┆ true │
│ Banana ┆ 4 ┆ 13.0 ┆ false │
└────────┴─────┴──────┴───────┘
"""
return self.agg(F.all().min())
def n_unique(self) -> DataFrame:
"""
Count the unique values per group.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [1, 2, 1, 3, 4, 5],
... "b": [0.5, 0.5, 0.5, 10, 13, 14],
... "d": ["Apple", "Banana", "Apple", "Apple", "Banana", "Banana"],
... }
... )
>>> df.group_by("d", maintain_order=True).n_unique()
shape: (2, 3)
┌────────┬─────┬─────┐
│ d ┆ a ┆ b │
│ --- ┆ --- ┆ --- │
│ str ┆ u32 ┆ u32 │
╞════════╪═════╪═════╡
│ Apple ┆ 2 ┆ 2 │
│ Banana ┆ 3 ┆ 3 │
└────────┴─────┴─────┘
"""
return self.agg(F.all().n_unique())
def quantile(
self, quantile: float, interpolation: QuantileMethod = "nearest"
) -> DataFrame:
"""
Compute the quantile per group.
Parameters
----------
quantile
Quantile between 0.0 and 1.0.
interpolation : {'nearest', 'higher', 'lower', 'midpoint', 'linear', 'equiprobable'}
Interpolation method.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [1, 2, 2, 3, 4, 5],
... "b": [0.5, 0.5, 4, 10, 13, 14],
... "d": ["Apple", "Orange", "Apple", "Apple", "Banana", "Banana"],
... }
... )
>>> df.group_by("d", maintain_order=True).quantile(1)
shape: (3, 3)
┌────────┬─────┬──────┐
│ d ┆ a ┆ b │
│ --- ┆ --- ┆ --- │
│ str ┆ f64 ┆ f64 │
╞════════╪═════╪══════╡
│ Apple ┆ 3.0 ┆ 10.0 │
│ Orange ┆ 2.0 ┆ 0.5 │
│ Banana ┆ 5.0 ┆ 14.0 │
└────────┴─────┴──────┘
""" # noqa: W505
return self.agg(F.all().quantile(quantile, interpolation=interpolation))
def sum(self) -> DataFrame:
"""
Reduce the groups to the sum.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [1, 2, 2, 3, 4, 5],
... "b": [0.5, 0.5, 4, 10, 13, 14],
... "c": [True, True, True, False, False, True],
... "d": ["Apple", "Orange", "Apple", "Apple", "Banana", "Banana"],
... }
... )
>>> df.group_by("d", maintain_order=True).sum()
shape: (3, 4)
┌────────┬─────┬──────┬─────┐
│ d ┆ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ f64 ┆ u32 │
╞════════╪═════╪══════╪═════╡
│ Apple ┆ 6 ┆ 14.5 ┆ 2 │
│ Orange ┆ 2 ┆ 0.5 ┆ 1 │
│ Banana ┆ 9 ┆ 27.0 ┆ 1 │
└────────┴─────┴──────┴─────┘
"""
return self.agg(F.all().sum())
|
GroupBy
|
python
|
TheAlgorithms__Python
|
data_structures/binary_tree/treap.py
|
{
"start": 64,
"end": 4751
}
|
class ____:
"""
Treap's node
Treap is a binary tree by value and heap by priority
"""
def __init__(self, value: int | None = None):
self.value = value
self.prior = random()
self.left: Node | None = None
self.right: Node | None = None
def __repr__(self) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return f"'{self.value}: {self.prior:.5}'"
else:
return pformat(
{f"{self.value}: {self.prior:.5}": (self.left, self.right)}, indent=1
)
def __str__(self) -> str:
value = str(self.value) + " "
left = str(self.left or "")
right = str(self.right or "")
return value + left + right
def split(root: Node | None, value: int) -> tuple[Node | None, Node | None]:
"""
We split current tree into 2 trees with value:
Left tree contains all values less than split value.
Right tree contains all values greater or equal, than split value
"""
if root is None or root.value is None: # None tree is split into 2 Nones
return None, None
elif value < root.value:
"""
Right tree's root will be current node.
Now we split(with the same value) current node's left son
Left tree: left part of that split
Right tree's left son: right part of that split
"""
left, root.left = split(root.left, value)
return left, root
else:
"""
Just symmetric to previous case
"""
root.right, right = split(root.right, value)
return root, right
def merge(left: Node | None, right: Node | None) -> Node | None:
"""
We merge 2 trees into one.
Note: all left tree's values must be less than all right tree's
"""
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
"""
Left will be root because it has more priority
Now we need to merge left's right son and right tree
"""
left.right = merge(left.right, right)
return left
else:
"""
Symmetric as well
"""
right.left = merge(left, right.left)
return right
def insert(root: Node | None, value: int) -> Node | None:
"""
Insert element
Split current tree with a value into left, right,
Insert new node into the middle
Merge left, node, right into root
"""
node = Node(value)
left, right = split(root, value)
return merge(merge(left, node), right)
def erase(root: Node | None, value: int) -> Node | None:
"""
Erase element
Split all nodes with values less into left,
Split all nodes with values greater into right.
Merge left, right
"""
left, right = split(root, value - 1)
_, right = split(right, value)
return merge(left, right)
def inorder(root: Node | None) -> None:
"""
Just recursive print of a tree
"""
if not root: # None
return
else:
inorder(root.left)
print(root.value, end=",")
inorder(root.right)
def interact_treap(root: Node | None, args: str) -> Node | None:
"""
Commands:
+ value to add value into treap
- value to erase all nodes with value
>>> root = interact_treap(None, "+1")
>>> inorder(root)
1,
>>> root = interact_treap(root, "+3 +5 +17 +19 +2 +16 +4 +0")
>>> inorder(root)
0,1,2,3,4,5,16,17,19,
>>> root = interact_treap(root, "+4 +4 +4")
>>> inorder(root)
0,1,2,3,4,4,4,4,5,16,17,19,
>>> root = interact_treap(root, "-0")
>>> inorder(root)
1,2,3,4,4,4,4,5,16,17,19,
>>> root = interact_treap(root, "-4")
>>> inorder(root)
1,2,3,5,16,17,19,
>>> root = interact_treap(root, "=0")
Unknown command
"""
for arg in args.split():
if arg[0] == "+":
root = insert(root, int(arg[1:]))
elif arg[0] == "-":
root = erase(root, int(arg[1:]))
else:
print("Unknown command")
return root
def main() -> None:
"""After each command, program prints treap"""
root = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. "
)
args = input()
while args != "q":
root = interact_treap(root, args)
print(root)
args = input()
print("good by!")
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
|
Node
|
python
|
has2k1__plotnine
|
plotnine/mapping/_atomic.py
|
{
"start": 2115,
"end": 2479
}
|
class ____(ae_value[str | tuple]):
"""
A single color value
"""
def __post_init__(self):
if isinstance(self.value, str):
return
elif is_color_tuple(self.value):
self.value = tuple(self.value)
return
raise ValueError(f"{self.value} is not a known color.")
colour = color
@dataclass
|
color
|
python
|
google__jax
|
docs/autodidax.py
|
{
"start": 9143,
"end": 10204
}
|
class ____:
array_abstraction_level = 1
shape: tuple[int, ...]
dtype: np.dtype
def __init__(self, shape, dtype):
self.shape = shape
self.dtype = dtype
@property
def ndim(self):
return len(self.shape)
_neg = staticmethod(neg)
_add = staticmethod(add)
_radd = staticmethod(swap(add))
_mul = staticmethod(mul)
_rmul = staticmethod(swap(mul))
_gt = staticmethod(greater)
_lt = staticmethod(less)
@staticmethod
def _bool(tracer):
raise Exception("ShapedArray can't be unambiguously converted to bool")
@staticmethod
def _nonzero(tracer):
raise Exception("ShapedArray can't be unambiguously converted to bool")
def str_short(self):
return f'{self.dtype.name}[{",".join(str(d) for d in self.shape)}]'
def __hash__(self):
return hash((self.shape, self.dtype))
def __eq__(self, other):
return (type(self) is type(other) and
self.shape == other.shape and self.dtype == other.dtype)
def __repr__(self):
return f"ShapedArray(shape={self.shape}, dtype={self.dtype})"
|
ShapedArray
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/pipelines/snapshot.py
|
{
"start": 548,
"end": 1144
}
|
class ____(GrapheneIPipelineSnapshotMixin, graphene.ObjectType):
class Meta: # pyright: ignore[reportIncompatibleVariableOverride]
interfaces = (GrapheneSolidContainer, GrapheneIPipelineSnapshot, GraphenePipelineReference)
name = "PipelineSnapshot"
def __init__(self, represented_job: RepresentedJob):
super().__init__()
self._represented_job = check.inst_param(
represented_job, "represented_pipeline", RepresentedJob
)
def get_represented_job(self) -> RepresentedJob:
return self._represented_job
|
GraphenePipelineSnapshot
|
python
|
scipy__scipy
|
benchmarks/benchmarks/go_benchmark_functions/go_funcs_univariate.py
|
{
"start": 3972,
"end": 4878
}
|
class ____(Benchmark):
"""
Univariate Problem06 objective function.
This class defines the Univariate Problem06 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Problem06}}(x) = - \\left[x + \\sin(x) \\right] e^{-x^2}
Bound constraints: :math:`x \\in [-10, 10]`
.. figure:: figures/Problem06.png
:alt: Univariate Problem06 function
:align: center
**Univariate Problem06 function**
*Global optimum*: :math:`f(x)=-0.824239` for :math:`x = 0.67956`
"""
def __init__(self, dimensions=1):
Benchmark.__init__(self, dimensions)
self._bounds = [(-10.0, 10.0)]
self.global_optimum = 0.67956
self.fglob = -0.824239
def fun(self, x, *args):
self.nfev += 1
x = x[0]
return -(x + sin(x)) * exp(-x ** 2.0)
|
Problem06
|
python
|
run-llama__llama_index
|
llama-index-integrations/llms/llama-index-llms-nvidia-triton/llama_index/llms/nvidia_triton/utils.py
|
{
"start": 9110,
"end": 13918
}
|
class ____(_BaseTritonClient):
"""GRPC connection to a triton inference server."""
@property
def _inference_server_client(
self,
) -> Type["grpcclient.InferenceServerClient"]:
"""Return the preferred InferenceServerClient class."""
import tritonclient.grpc as grpcclient
return grpcclient.InferenceServerClient # type: ignore
@property
def _infer_input(self) -> Type["grpcclient.InferInput"]:
"""Return the preferred InferInput."""
import tritonclient.grpc as grpcclient
return grpcclient.InferInput # type: ignore
@property
def _infer_output(
self,
) -> Type["grpcclient.InferRequestedOutput"]:
"""Return the preferred InferRequestedOutput."""
import tritonclient.grpc as grpcclient
return grpcclient.InferRequestedOutput # type: ignore
def _send_stop_signals(self, model_name: str, request_id: str) -> None:
"""Send the stop signal to the Triton Inference server."""
stop_inputs = self._generate_stop_signals()
self._client.async_stream_infer(
model_name,
stop_inputs,
request_id=request_id,
parameters={"Streaming": True},
)
@staticmethod
def _process_result(result: Dict[str, str]) -> str:
"""Post-process the result from the server."""
import google.protobuf.json_format
import tritonclient.grpc as grpcclient
from tritonclient.grpc.service_pb2 import ModelInferResponse
message = ModelInferResponse()
generated_text: str = ""
google.protobuf.json_format.Parse(json.dumps(result), message)
infer_result = grpcclient.InferResult(message)
np_res = infer_result.as_numpy("text_output")
generated_text = ""
if np_res is not None:
generated_text = "".join([token.decode() for token in np_res])
return generated_text
def _stream_callback(
self,
result_queue: Queue,
force_batch: bool,
result: Any,
error: str,
) -> None:
"""Add streamed result to queue."""
if error:
result_queue.put(error)
else:
response_raw = result.get_response(as_json=True)
if "outputs" in response_raw:
# the very last response might have no output, just the final flag
response = self._process_result(response_raw)
if force_batch:
response = self._trim_batch_response(response)
if response in STOP_WORDS:
result_queue.put(None)
else:
result_queue.put(response)
if response_raw["parameters"]["triton_final_response"]["bool_param"]:
# end of the generation
result_queue.put(None)
# pylint: disable-next=too-many-arguments
def _send_prompt_streaming(
self,
model_name: str,
request_inputs: Any,
request_outputs: Optional[Any],
request_id: str,
result_queue: StreamingResponseGenerator,
force_batch: bool = False,
) -> None:
"""Send the prompt and start streaming the result."""
self._client.start_stream(
callback=partial(self._stream_callback, result_queue, force_batch)
)
self._client.async_stream_infer(
model_name=model_name,
inputs=request_inputs,
outputs=request_outputs,
request_id=request_id,
)
def request_streaming(
self,
model_name: str,
request_id: Optional[str] = None,
force_batch: bool = False,
**params: Any,
) -> StreamingResponseGenerator:
"""Request a streaming connection."""
if not self._client.is_model_ready(model_name):
raise RuntimeError("Cannot request streaming, model is not loaded")
if not request_id:
request_id = str(random.randint(1, 9999999)) # nosec
inputs = self._generate_inputs(stream=not force_batch, **params)
result_queue = StreamingResponseGenerator(
self, request_id, force_batch, model_name, max_tokens=params["tokens"]
)
outputs = self._generate_outputs()
self._send_prompt_streaming(
model_name,
inputs,
outputs,
request_id,
result_queue,
force_batch,
)
return result_queue
def stop_stream(
self, model_name: str, request_id: str, signal: bool = True
) -> None:
"""Close the streaming connection."""
if signal:
self._send_stop_signals(model_name, request_id)
self._client.stop_stream()
|
GrpcTritonClient
|
python
|
streamlit__streamlit
|
lib/tests/streamlit/elements/select_slider_test.py
|
{
"start": 15158,
"end": 17014
}
|
class ____(DeltaGeneratorTestCase):
def test_select_slider_with_width_pixels(self):
"""Test that select_slider can be displayed with a specific width in pixels."""
st.select_slider("Label", options=["a", "b", "c"], width=500)
element = self.get_delta_from_queue().new_element
assert (
element.width_config.WhichOneof("width_spec")
== WidthConfigFields.PIXEL_WIDTH.value
)
assert element.width_config.pixel_width == 500
def test_select_slider_with_width_stretch(self):
"""Test that select_slider can be displayed with a width of 'stretch'."""
st.select_slider("Label", options=["a", "b", "c"], width="stretch")
element = self.get_delta_from_queue().new_element
assert (
element.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_STRETCH.value
)
assert element.width_config.use_stretch is True
def test_select_slider_with_default_width(self):
"""Test that the default width is used when not specified."""
st.select_slider("Label", options=["a", "b", "c"])
element = self.get_delta_from_queue().new_element
assert (
element.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_STRETCH.value
)
assert element.width_config.use_stretch is True
@parameterized.expand(
[
("invalid_string", "invalid"),
("negative", -1),
("zero", 0),
("float", 100.5),
]
)
def test_width_config_invalid(self, name, invalid_width):
"""Test width config with various invalid values."""
with pytest.raises(StreamlitInvalidWidthError):
st.select_slider("the label", options=["a", "b", "c"], width=invalid_width)
|
SelectSliderWidthTest
|
python
|
openai__openai-python
|
src/openai/_module_client.py
|
{
"start": 1631,
"end": 1754
}
|
class ____(LazyProxy["Audio"]):
@override
def __load__(self) -> Audio:
return _load_client().audio
|
AudioProxy
|
python
|
streamlit__streamlit
|
lib/streamlit/testing/v1/element_tree.py
|
{
"start": 32310,
"end": 34408
}
|
class ____(Widget, Generic[T]):
"""A representation of ``st.select_slider``."""
_value: T | Sequence[T] | None
proto: SliderProto = field(repr=False)
label: str
data_type: SliderProto.DataType.ValueType
options: list[str]
help: str
form_id: str
def __init__(self, proto: SliderProto, root: ElementTree) -> None:
super().__init__(proto, root)
self.type = "select_slider"
self.options = list(proto.options)
def set_value(self, v: T | Sequence[T]) -> SelectSlider[T]:
"""Set the (single) selection by value."""
self._value = v
return self
@property
def _widget_state(self) -> WidgetState:
serde = SelectSliderSerde(self.options, [], False)
try:
v = serde.serialize(self.format_func(self.value))
except (ValueError, TypeError):
try:
v = serde.serialize([self.format_func(val) for val in self.value]) # type: ignore
except: # noqa: E722
raise ValueError(f"Could not find index for {self.value}")
ws = WidgetState()
ws.id = self.id
ws.double_array_value.data[:] = v
return ws
@property
def value(self) -> T | Sequence[T]:
"""The currently selected value or range. (Any or Sequence of Any)""" # noqa: D400
if self._value is not None:
return self._value
state = self.root.session_state
assert state
# Awkward to do this with `cast`
return state[self.id] # type: ignore
@property
def format_func(self) -> Callable[[Any], Any]:
"""The widget's formatting function for displaying options. (callable)""" # noqa: D400
ss = self.root.session_state
return cast("Callable[[Any], Any]", ss[TESTING_KEY][self.id])
def set_range(self, lower: T, upper: T) -> SelectSlider[T]:
"""Set the ranged selection by values."""
return self.set_value([lower, upper])
SliderValueT = TypeVar("SliderValueT", int, float, date, time, datetime)
@dataclass(repr=False)
|
SelectSlider
|
python
|
python__mypy
|
mypy/report.py
|
{
"start": 3030,
"end": 4466
}
|
class ____(metaclass=ABCMeta):
def __init__(self, reports: Reports, output_dir: str) -> None:
self.output_dir = output_dir
if output_dir != "<memory>":
os.makedirs(output_dir, exist_ok=True)
@abstractmethod
def on_file(
self,
tree: MypyFile,
modules: dict[str, MypyFile],
type_map: dict[Expression, Type],
options: Options,
) -> None:
pass
@abstractmethod
def on_finish(self) -> None:
pass
def register_reporter(
report_name: str,
reporter: Callable[[Reports, str], AbstractReporter],
needs_lxml: bool = False,
) -> None:
reporter_classes[report_name] = (reporter, needs_lxml)
def alias_reporter(source_reporter: str, target_reporter: str) -> None:
reporter_classes[target_reporter] = reporter_classes[source_reporter]
def should_skip_path(path: str) -> bool:
if stats.is_special_module(path):
return True
if path.startswith(".."):
return True
if "stubs" in path.split("/") or "stubs" in path.split(os.sep):
return True
return False
def iterate_python_lines(path: str) -> Iterator[tuple[int, str]]:
"""Return an iterator over (line number, line text) from a Python file."""
if not os.path.isdir(path): # can happen with namespace packages
with tokenize.open(path) as input_file:
yield from enumerate(input_file, 1)
|
AbstractReporter
|
python
|
pytorch__pytorch
|
test/test_prims.py
|
{
"start": 11650,
"end": 13141
}
|
class ____(TestCase):
def test_torch_ops(self):
r = make_tensor((2,), device='cpu', dtype=torch.float)
self.assertEqual(torch.ops.prims.sin(r), torch.sin(r))
r = LoggingTensor(r)
with capture_logs() as logs:
log_input("input", r)
prims.sin(r)
self.assertExpectedInline('\n'.join(logs), """\
$0: f32[2] = input('input')
$1: f32[2] = torch._ops.prims.sin.default($0)""")
def test_mul_complex(self):
prims.mul(torch.randn(2), 1 + 1j)
def test_clone_complex(self):
with torch._dispatch.python.enable_python_dispatcher():
x = torch.randn(4, dtype=torch.complex64, device='meta').conj()
x + 1
def test_clone_meta_stride_preservation_dense(self):
tensor = torch.randn(1, 5).t()
meta_clone = prims._clone_meta(tensor, memory_format=torch.preserve_format)
self.assertEqual(tensor.stride(), meta_clone.stride())
def test_clone_meta_stride_preservation_sparse(self):
tensor = torch.arange(12).float().view(3, 4)[1:, ::2]
meta_clone = prims._clone_meta(tensor, memory_format=torch.preserve_format)
self.assertEqual(tensor.contiguous().stride(), meta_clone.stride())
def test_check_deprecation_warning(self):
with self.assertWarnsRegex(FutureWarning, 'will be removed in the future'):
torch._prims_common.check(True, lambda: 'message')
instantiate_device_type_tests(TestPrims, globals())
|
TestPrimsBasic
|
python
|
ansible__ansible
|
lib/ansible/plugins/action/uri.py
|
{
"start": 466,
"end": 3434
}
|
class ____(ActionBase):
TRANSFERS_FILES = True
def run(self, tmp=None, task_vars=None):
self._supports_async = True
self._supports_check_mode = False
if task_vars is None:
task_vars = dict()
super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
body_format = self._task.args.get('body_format', 'raw')
body = self._task.args.get('body')
src = self._task.args.get('src', None)
remote_src = boolean(self._task.args.get('remote_src', 'no'), strict=False)
try:
if remote_src:
# everything is remote, so we just execute the module
# without changing any of the module arguments
# call with ansible.legacy prefix to prevent collections collisions while allowing local override
return self._execute_module(module_name='ansible.legacy.uri', task_vars=task_vars, wrap_async=self._task.async_val)
kwargs = {}
if src:
src = self._find_needle('files', src)
tmp_src = self._connection._shell.join_path(self._connection._shell.tmpdir, os.path.basename(src))
kwargs['src'] = tmp_src
self._transfer_file(src, tmp_src)
self._fixup_perms2((self._connection._shell.tmpdir, tmp_src))
elif body_format == 'form-multipart':
if not isinstance(body, _c.Mapping):
raise AnsibleActionFail(
'body must be mapping, cannot be type %s' % body.__class__.__name__
)
new_body = deepcopy(body)
for field, value in new_body.items():
if not isinstance(value, _c.MutableMapping):
continue
content = value.get('content')
filename = value.get('filename')
if not filename or content:
continue
filename = self._find_needle('files', filename)
tmp_src = self._connection._shell.join_path(
self._connection._shell.tmpdir,
os.path.basename(filename)
)
value['filename'] = tmp_src
self._transfer_file(filename, tmp_src)
self._fixup_perms2((self._connection._shell.tmpdir, tmp_src))
kwargs['body'] = new_body
new_module_args = self._task.args | kwargs
# call with ansible.legacy prefix to prevent collections collisions while allowing local override
return self._execute_module('ansible.legacy.uri', module_args=new_module_args, task_vars=task_vars, wrap_async=self._task.async_val)
finally:
if not self._task.async_val:
self._remove_tmp_path(self._connection._shell.tmpdir)
|
ActionModule
|
python
|
numpy__numpy
|
tools/swig/test/testVector.py
|
{
"start": 302,
"end": 10633
}
|
class ____(unittest.TestCase):
def __init__(self, methodName="runTest"):
unittest.TestCase.__init__(self, methodName)
self.typeStr = "double"
self.typeCode = "d"
# Test the (type IN_ARRAY1[ANY]) typemap
def testLength(self):
"Test length function"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
length = Vector.__dict__[self.typeStr + "Length"]
self.assertEqual(length([5, 12, 0]), 13)
# Test the (type IN_ARRAY1[ANY]) typemap
def testLengthBadList(self):
"Test length function with bad list"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
length = Vector.__dict__[self.typeStr + "Length"]
self.assertRaises(BadListError, length, [5, "twelve", 0])
# Test the (type IN_ARRAY1[ANY]) typemap
def testLengthWrongSize(self):
"Test length function with wrong size"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
length = Vector.__dict__[self.typeStr + "Length"]
self.assertRaises(TypeError, length, [5, 12])
# Test the (type IN_ARRAY1[ANY]) typemap
def testLengthWrongDim(self):
"Test length function with wrong dimensions"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
length = Vector.__dict__[self.typeStr + "Length"]
self.assertRaises(TypeError, length, [[1, 2], [3, 4]])
# Test the (type IN_ARRAY1[ANY]) typemap
def testLengthNonContainer(self):
"Test length function with non-container"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
length = Vector.__dict__[self.typeStr + "Length"]
self.assertRaises(TypeError, length, None)
# Test the (type* IN_ARRAY1, int DIM1) typemap
def testProd(self):
"Test prod function"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
prod = Vector.__dict__[self.typeStr + "Prod"]
self.assertEqual(prod([1, 2, 3, 4]), 24)
# Test the (type* IN_ARRAY1, int DIM1) typemap
def testProdBadList(self):
"Test prod function with bad list"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
prod = Vector.__dict__[self.typeStr + "Prod"]
self.assertRaises(BadListError, prod, [[1, "two"], ["e", "pi"]])
# Test the (type* IN_ARRAY1, int DIM1) typemap
def testProdWrongDim(self):
"Test prod function with wrong dimensions"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
prod = Vector.__dict__[self.typeStr + "Prod"]
self.assertRaises(TypeError, prod, [[1, 2], [8, 9]])
# Test the (type* IN_ARRAY1, int DIM1) typemap
def testProdNonContainer(self):
"Test prod function with non-container"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
prod = Vector.__dict__[self.typeStr + "Prod"]
self.assertRaises(TypeError, prod, None)
# Test the (int DIM1, type* IN_ARRAY1) typemap
def testSum(self):
"Test sum function"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
sum = Vector.__dict__[self.typeStr + "Sum"]
self.assertEqual(sum([5, 6, 7, 8]), 26)
# Test the (int DIM1, type* IN_ARRAY1) typemap
def testSumBadList(self):
"Test sum function with bad list"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
sum = Vector.__dict__[self.typeStr + "Sum"]
self.assertRaises(BadListError, sum, [3, 4, 5, "pi"])
# Test the (int DIM1, type* IN_ARRAY1) typemap
def testSumWrongDim(self):
"Test sum function with wrong dimensions"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
sum = Vector.__dict__[self.typeStr + "Sum"]
self.assertRaises(TypeError, sum, [[3, 4], [5, 6]])
# Test the (int DIM1, type* IN_ARRAY1) typemap
def testSumNonContainer(self):
"Test sum function with non-container"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
sum = Vector.__dict__[self.typeStr + "Sum"]
self.assertRaises(TypeError, sum, True)
# Test the (type INPLACE_ARRAY1[ANY]) typemap
def testReverse(self):
"Test reverse function"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
reverse = Vector.__dict__[self.typeStr + "Reverse"]
vector = np.array([1, 2, 4], self.typeCode)
reverse(vector)
self.assertEqual((vector == [4, 2, 1]).all(), True)
# Test the (type INPLACE_ARRAY1[ANY]) typemap
def testReverseWrongDim(self):
"Test reverse function with wrong dimensions"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
reverse = Vector.__dict__[self.typeStr + "Reverse"]
vector = np.array([[1, 2], [3, 4]], self.typeCode)
self.assertRaises(TypeError, reverse, vector)
# Test the (type INPLACE_ARRAY1[ANY]) typemap
def testReverseWrongSize(self):
"Test reverse function with wrong size"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
reverse = Vector.__dict__[self.typeStr + "Reverse"]
vector = np.array([9, 8, 7, 6, 5, 4], self.typeCode)
self.assertRaises(TypeError, reverse, vector)
# Test the (type INPLACE_ARRAY1[ANY]) typemap
def testReverseWrongType(self):
"Test reverse function with wrong type"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
reverse = Vector.__dict__[self.typeStr + "Reverse"]
vector = np.array([1, 2, 4], 'c')
self.assertRaises(TypeError, reverse, vector)
# Test the (type INPLACE_ARRAY1[ANY]) typemap
def testReverseNonArray(self):
"Test reverse function with non-array"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
reverse = Vector.__dict__[self.typeStr + "Reverse"]
self.assertRaises(TypeError, reverse, [2, 4, 6])
# Test the (type* INPLACE_ARRAY1, int DIM1) typemap
def testOnes(self):
"Test ones function"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
ones = Vector.__dict__[self.typeStr + "Ones"]
vector = np.zeros(5, self.typeCode)
ones(vector)
np.testing.assert_array_equal(vector, np.array([1, 1, 1, 1, 1]))
# Test the (type* INPLACE_ARRAY1, int DIM1) typemap
def testOnesWrongDim(self):
"Test ones function with wrong dimensions"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
ones = Vector.__dict__[self.typeStr + "Ones"]
vector = np.zeros((5, 5), self.typeCode)
self.assertRaises(TypeError, ones, vector)
# Test the (type* INPLACE_ARRAY1, int DIM1) typemap
def testOnesWrongType(self):
"Test ones function with wrong type"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
ones = Vector.__dict__[self.typeStr + "Ones"]
vector = np.zeros((5, 5), 'c')
self.assertRaises(TypeError, ones, vector)
# Test the (type* INPLACE_ARRAY1, int DIM1) typemap
def testOnesNonArray(self):
"Test ones function with non-array"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
ones = Vector.__dict__[self.typeStr + "Ones"]
self.assertRaises(TypeError, ones, [2, 4, 6, 8])
# Test the (int DIM1, type* INPLACE_ARRAY1) typemap
def testZeros(self):
"Test zeros function"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
zeros = Vector.__dict__[self.typeStr + "Zeros"]
vector = np.ones(5, self.typeCode)
zeros(vector)
np.testing.assert_array_equal(vector, np.array([0, 0, 0, 0, 0]))
# Test the (int DIM1, type* INPLACE_ARRAY1) typemap
def testZerosWrongDim(self):
"Test zeros function with wrong dimensions"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
zeros = Vector.__dict__[self.typeStr + "Zeros"]
vector = np.ones((5, 5), self.typeCode)
self.assertRaises(TypeError, zeros, vector)
# Test the (int DIM1, type* INPLACE_ARRAY1) typemap
def testZerosWrongType(self):
"Test zeros function with wrong type"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
zeros = Vector.__dict__[self.typeStr + "Zeros"]
vector = np.ones(6, 'c')
self.assertRaises(TypeError, zeros, vector)
# Test the (int DIM1, type* INPLACE_ARRAY1) typemap
def testZerosNonArray(self):
"Test zeros function with non-array"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
zeros = Vector.__dict__[self.typeStr + "Zeros"]
self.assertRaises(TypeError, zeros, [1, 3, 5, 7, 9])
# Test the (type ARGOUT_ARRAY1[ANY]) typemap
def testEOSplit(self):
"Test eoSplit function"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
eoSplit = Vector.__dict__[self.typeStr + "EOSplit"]
even, odd = eoSplit([1, 2, 3])
self.assertEqual((even == [1, 0, 3]).all(), True)
self.assertEqual((odd == [0, 2, 0]).all(), True)
# Test the (type* ARGOUT_ARRAY1, int DIM1) typemap
def testTwos(self):
"Test twos function"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
twos = Vector.__dict__[self.typeStr + "Twos"]
vector = twos(5)
self.assertEqual((vector == [2, 2, 2, 2, 2]).all(), True)
# Test the (type* ARGOUT_ARRAY1, int DIM1) typemap
def testTwosNonInt(self):
"Test twos function with non-integer dimension"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
twos = Vector.__dict__[self.typeStr + "Twos"]
self.assertRaises(TypeError, twos, 5.0)
# Test the (int DIM1, type* ARGOUT_ARRAY1) typemap
def testThrees(self):
"Test threes function"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
threes = Vector.__dict__[self.typeStr + "Threes"]
vector = threes(6)
self.assertEqual((vector == [3, 3, 3, 3, 3, 3]).all(), True)
# Test the (type* ARGOUT_ARRAY1, int DIM1) typemap
def testThreesNonInt(self):
"Test threes function with non-integer dimension"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
threes = Vector.__dict__[self.typeStr + "Threes"]
self.assertRaises(TypeError, threes, "threes")
######################################################################
|
VectorTestCase
|
python
|
pypa__warehouse
|
tests/common/db/observations.py
|
{
"start": 128,
"end": 210
}
|
class ____(WarehouseFactory):
class Meta:
model = Observer
|
ObserverFactory
|
python
|
django__django
|
tests/managers_regress/models.py
|
{
"start": 2135,
"end": 2315
}
|
class ____(AbstractBase3):
name = models.CharField(max_length=25)
default = OnlyFred()
objects = models.Manager()
def __str__(self):
return self.name
|
Child5
|
python
|
huggingface__transformers
|
src/transformers/models/patchtst/modeling_patchtst.py
|
{
"start": 57784,
"end": 59578
}
|
class ____(nn.Module):
def __init__(self, config: PatchTSTConfig):
super().__init__()
self.use_cls_token = config.use_cls_token
self.pooling_type = config.pooling_type
self.flatten = nn.Flatten(start_dim=1)
self.dropout = nn.Dropout(config.head_dropout) if config.head_dropout > 0 else nn.Identity()
self.linear = nn.Linear(config.num_input_channels * config.d_model, config.num_targets)
def forward(self, embedding: torch.Tensor):
"""
Parameters:
embedding (`torch.Tensor` of shape `(bs, num_channels, num_patches, d_model)` or
`(bs, num_channels, num_patches+1, d_model)` if `cls_token` is set to True, *required*):
Embedding from the model
Returns:
`torch.Tensor` of shape `(bs, num_targets)`
"""
if self.use_cls_token:
# use the first output token, pooled_embedding: bs x num_channels x d_model
pooled_embedding = embedding[:, :, 0, :]
elif self.pooling_type == "mean":
# pooled_embedding: [bs x num_channels x d_model]
pooled_embedding = embedding.mean(dim=2)
elif self.pooling_type == "max":
# pooled_embedding: [bs x num_channels x d_model]
pooled_embedding = embedding.max(dim=2).values
else:
raise ValueError(f"pooling operator {self.pooling_type} is not implemented yet")
# pooled_embedding: bs x num_channels * d_model
pooled_embedding = self.flatten(pooled_embedding)
# output: bs x n_classes
output = self.linear(self.dropout(pooled_embedding))
return output
@auto_docstring(
custom_intro="""
The PatchTST for classification model.
"""
)
|
PatchTSTClassificationHead
|
python
|
pdm-project__pdm
|
src/pdm/cli/commands/fix/fixers.py
|
{
"start": 910,
"end": 2400
}
|
class ____(BaseFixer):
"""Fix the project config"""
identifier = "project-config"
def get_message(self) -> str:
return (
"[success]python.path[/] config needs to be moved to [info].pdm-python[/] and "
"[info].pdm.toml[/] needs to be renamed to [info]pdm.toml[/]"
)
def _fix_gitignore(self) -> None:
gitignore = self.project.root.joinpath(".gitignore")
if not gitignore.exists():
return
content = gitignore.read_text("utf8")
if ".pdm-python" not in content:
content = re.sub(r"^\.pdm\.toml$", ".pdm-python", content, flags=re.M)
gitignore.write_text(content, "utf8")
def fix(self) -> None:
old_file = self.project.root.joinpath(".pdm.toml")
config = Config(old_file).self_data
if not self.project.root.joinpath(".pdm-python").exists() and config.get("python.path"):
self.log("Creating .pdm-python...", verbosity=Verbosity.DETAIL)
self.project.root.joinpath(".pdm-python").write_text(config["python.path"])
self.project.project_config # access the project config to move the config items
self.log("Moving .pdm.toml to pdm.toml...", verbosity=Verbosity.DETAIL)
old_file.unlink()
self.log("Fixing .gitignore...", verbosity=Verbosity.DETAIL)
self._fix_gitignore()
def check(self) -> bool:
return self.project.root.joinpath(".pdm.toml").exists()
|
ProjectConfigFixer
|
python
|
walkccc__LeetCode
|
solutions/3527. Find the Most Common Response/3527.py
|
{
"start": 0,
"end": 366
}
|
class ____:
def findCommonResponse(self, responses: list[list[str]]) -> str:
count = collections.Counter()
for response in responses:
for response in set(response):
count[response] += 1
maxFreq = max(count.values())
return min([response
for response, count in count.items()
if count == maxFreq])
|
Solution
|
python
|
bokeh__bokeh
|
tests/unit/bokeh/colors/test_color__colors.py
|
{
"start": 5693,
"end": 11505
}
|
class ____:
def test_init(self) -> None:
c = bcc.RGB(10, 20, 30)
assert c
assert c.a == 1.0
assert c.r == 10
assert c.g == 20
assert c.b == 30
c = bcc.RGB(10, 20, 30, 0.3)
assert c
assert c.a == 0.3
assert c.r == 10
assert c.g == 20
assert c.b == 30
def test_repr(self) -> None:
c = bcc.RGB(10, 20, 30)
assert repr(c) == c.to_css()
c = bcc.RGB(10, 20, 30, 0.3)
assert repr(c) == c.to_css()
def test_copy(self) -> None:
c = bcc.RGB(10, 20, 30)
c2 = c.copy()
assert c2 is not c
assert c2.a == c.a
assert c2.r == c.r
assert c2.g == c.g
assert c2.b == c.b
def test_from_hex_string(self) -> None:
# '#rrggbb'
c = bcc.RGB.from_hex_string("#A3B20F")
assert (c.r, c.g, c.b, c.a) == (163, 178, 15, 1.0)
c = bcc.RGB.from_hex_string("#a3b20f")
assert (c.r, c.g, c.b, c.a) == (163, 178, 15, 1.0)
# '#rrggbbaa'
c = bcc.RGB.from_hex_string("#A3B20FC0")
assert (c.r, c.g, c.b, c.a) == (163, 178, 15, 192/255.0)
c = bcc.RGB.from_hex_string("#a3b20fc0")
assert (c.r, c.g, c.b, c.a) == (163, 178, 15, 192/255.0)
# '#rgb'
c = bcc.RGB.from_hex_string("#7A3")
assert (c.r, c.g, c.b, c.a) == (119, 170, 51, 1.0)
c = bcc.RGB.from_hex_string("#7a3")
assert (c.r, c.g, c.b, c.a) == (119, 170, 51, 1.0)
# '#rgba'
c = bcc.RGB.from_hex_string("#7A3B")
assert (c.r, c.g, c.b, c.a) == (119, 170, 51, 187/255.0)
c = bcc.RGB.from_hex_string("#7a3b")
assert (c.r, c.g, c.b, c.a) == (119, 170, 51, 187/255.0)
# Invalid hex string
with pytest.raises(ValueError):
bcc.RGB.from_hex_string("#")
with pytest.raises(ValueError):
bcc.RGB.from_hex_string("#1")
with pytest.raises(ValueError):
bcc.RGB.from_hex_string("#12")
with pytest.raises(ValueError):
bcc.RGB.from_hex_string("#12345")
with pytest.raises(ValueError):
bcc.RGB.from_hex_string("#1234567")
with pytest.raises(ValueError):
bcc.RGB.from_hex_string("#123456789")
with pytest.raises(ValueError):
bcc.RGB.from_hex_string(" #abc")
def test_from_hsl(self) -> None:
c = bcc.HSL(10, 0.1, 0.2)
c2 = bcc.RGB.from_hsl(c)
assert c2 is not c
assert c2.a == 1.0
assert c2.r == 56
assert c2.g == 48
assert c2.b == 46
c = bcc.HSL(10, 0.1, 0.2, 0.3)
c2 = bcc.RGB.from_hsl(c)
assert c2 is not c
assert c2.a == 0.3
assert c2.r == 56
assert c2.g == 48
assert c2.b == 46
def test_from_rgb(self) -> None:
c = bcc.RGB(10, 20, 30)
c2 = bcc.RGB.from_rgb(c)
assert c2 is not c
assert c2.a == c.a
assert c2.r == c.r
assert c2.g == c.g
assert c2.b == c.b
c = bcc.RGB(10, 20, 30, 0.1)
c2 = bcc.RGB.from_rgb(c)
assert c2 is not c
assert c2.a == c.a
assert c2.r == c.r
assert c2.g == c.g
assert c2.b == c.b
def test_to_css(self) -> None:
c = bcc.RGB(10, 20, 30)
assert c.to_css() == "rgb(10, 20, 30)"
c = bcc.RGB(10, 20, 30, 0.3)
assert c.to_css() == "rgba(10, 20, 30, 0.3)"
def test_to_hex(self) -> None:
c = bcc.RGB(10, 20, 30)
assert c.to_hex(), f"#{c.r:02x}{c.g:02x}{c.b:02x}"
assert bcc.RGB(10, 20, 30, 0.0).to_hex() == "#0a141e00"
assert bcc.RGB(10, 20, 30, 0.5).to_hex() == "#0a141e80"
assert bcc.RGB(10, 20, 30, 0.996).to_hex() == "#0a141efe"
assert bcc.RGB(10, 20, 30, 1.0).to_hex() == "#0a141e"
def test_to_hsl(self) -> None:
c = bcc.RGB(255, 100, 0)
c2 = c.to_hsl()
assert c2 is not c
assert c2.a == c.a
assert c2.h == 24
assert c2.s == 1.0
assert c2.l == 0.5
c = bcc.RGB(255, 100, 0, 0.1)
c2 = c.to_hsl()
assert c2 is not c
assert c2.a == c.a
assert c2.h == 24
assert c2.s == 1.0
assert c2.l == 0.5
def test_to_rgb(self) -> None:
c = bcc.RGB(10, 20, 30)
c2 = c.to_rgb()
assert c2 is not c
assert c2.a == c.a
assert c2.r == c.r
assert c2.g == c.g
assert c2.b == c.b
c = bcc.RGB(10, 20, 30, 0.1)
c2 = c.to_rgb()
assert c2 is not c
assert c2.a == c.a
assert c2.r == c.r
assert c2.g == c.g
assert c2.b == c.b
def test_brightness(self) -> None:
assert round(bcc.RGB( 0, 0, 0).brightness, 2) == 0.0
assert round(bcc.RGB(127, 127, 127).brightness, 2) == 0.5
assert round(bcc.RGB(128, 128, 128).brightness, 2) == 0.5
assert round(bcc.RGB(255, 255, 255).brightness, 2) == 1.0
def test_luminance(self) -> None:
assert round(bcc.RGB( 0, 0, 0).luminance, 3) == 0.000
assert round(bcc.RGB(190, 0, 190).luminance, 3) == 0.149
assert round(bcc.RGB(130, 130, 90).luminance, 3) == 0.218
assert round(bcc.RGB(255, 255, 255).luminance, 3) == 1.000
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
Test_RGB
|
python
|
django__django
|
tests/admin_inlines/admin.py
|
{
"start": 9208,
"end": 9324
}
|
class ____(admin.TabularInline):
model = Class
extra = 1
filter_vertical = ["person"]
|
ClassTabularVertical
|
python
|
pandas-dev__pandas
|
pandas/core/computation/pytables.py
|
{
"start": 13453,
"end": 17016
}
|
class ____(BaseExprVisitor):
const_type: ClassVar[type[ops.Term]] = Constant
term_type: ClassVar[type[Term]] = Term
def __init__(self, env, engine, parser, **kwargs) -> None:
super().__init__(env, engine, parser)
for bin_op in self.binary_ops:
bin_node = self.binary_op_nodes_map[bin_op]
setattr(
self,
f"visit_{bin_node}",
lambda node, bin_op=bin_op: partial(BinOp, bin_op, **kwargs),
)
def visit_UnaryOp(self, node, **kwargs) -> ops.Term | UnaryOp | None:
if isinstance(node.op, (ast.Not, ast.Invert)):
return UnaryOp("~", self.visit(node.operand))
elif isinstance(node.op, ast.USub):
return self.const_type(-self.visit(node.operand).value, self.env)
elif isinstance(node.op, ast.UAdd):
raise NotImplementedError("Unary addition not supported")
# TODO: return None might never be reached
return None
def visit_Index(self, node, **kwargs):
return self.visit(node.value).value
def visit_Assign(self, node, **kwargs):
cmpr = ast.Compare(
ops=[ast.Eq()], left=node.targets[0], comparators=[node.value]
)
return self.visit(cmpr)
def visit_Subscript(self, node, **kwargs) -> ops.Term:
# only allow simple subscripts
value = self.visit(node.value)
slobj = self.visit(node.slice)
try:
value = value.value
except AttributeError:
pass
if isinstance(slobj, Term):
# In py39 np.ndarray lookups with Term containing int raise
slobj = slobj.value
try:
return self.const_type(value[slobj], self.env)
except TypeError as err:
raise ValueError(f"cannot subscript {value!r} with {slobj!r}") from err
def visit_Attribute(self, node, **kwargs):
attr = node.attr
value = node.value
ctx = type(node.ctx)
if ctx == ast.Load:
# resolve the value
resolved = self.visit(value)
# try to get the value to see if we are another expression
try:
resolved = resolved.value
except AttributeError:
pass
try:
return self.term_type(getattr(resolved, attr), self.env)
except AttributeError:
# something like datetime.datetime where scope is overridden
if isinstance(value, ast.Name) and value.id == attr:
return resolved
raise ValueError(f"Invalid Attribute context {ctx.__name__}")
def translate_In(self, op):
return ast.Eq() if isinstance(op, ast.In) else op
def _rewrite_membership_op(self, node, left, right):
return self.visit(node.op), node.op, left, right
def _validate_where(w):
"""
Validate that the where statement is of the right type.
The type may either be String, Expr, or list-like of Exprs.
Parameters
----------
w : String term expression, Expr, or list-like of Exprs.
Returns
-------
where : The original where clause if the check was successful.
Raises
------
TypeError : An invalid data type was passed in for w (e.g. dict).
"""
if not (isinstance(w, (PyTablesExpr, str)) or is_list_like(w)):
raise TypeError(
"where must be passed as a string, PyTablesExpr, "
"or list-like of PyTablesExpr"
)
return w
|
PyTablesExprVisitor
|
python
|
getsentry__sentry
|
src/sentry/integrations/msteams/webhook.py
|
{
"start": 6532,
"end": 26463
}
|
class ____(Endpoint):
owner = ApiOwner.INTEGRATIONS
publish_status = {
"POST": ApiPublishStatus.PRIVATE,
}
authentication_classes = ()
permission_classes = ()
provider = IntegrationProviderSlug.MSTEAMS.value
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
self._event_handlers: dict[MsTeamsEvents, Callable[[Request], Response]] = {
MsTeamsEvents.MESSAGE: self._handle_message_event,
MsTeamsEvents.CONVERSATION_UPDATE: self._handle_conversation_update_event,
MsTeamsEvents.INSTALLATION_UPDATE: self._handle_installation_update_event,
MsTeamsEvents.UNKNOWN: self._handle_unknown_event,
}
@csrf_exempt
def dispatch(self, request: HttpRequest, *args, **kwargs) -> HttpResponse:
return super().dispatch(request, *args, **kwargs)
def post(self, request: Request) -> Response:
"""
POST webhook handler for MSTeams bot.
The events are broadcast to MSTeams from Microsoft, and are documented at https://learn.microsoft.com/en-us/microsoftteams/platform/resources/bot-v3/bots-notifications
"""
# verify_signature will raise the exception corresponding to the error
self._verify_webhook_request(request)
data = request.data
raw_event_type = data["type"]
event_type = MsTeamsEvents.get_from_value(value=raw_event_type)
event_handler_func = self._event_handlers[event_type]
response = event_handler_func(request)
logger.info("sentry.integrations.msteams.webhook", extra={"request_data": data})
return response if response else self.respond(status=204)
@classmethod
def _get_team_installation_request_data(cls, data: dict[str, Any]) -> dict[str, Any]:
"""
Helper method that will construct the installation request for a MsTeams team channel.
We want the KeyError exception to be raised if the key does not exist.
"""
channel_data = data["channelData"]
new_team_info = channel_data["team"]
team_id = new_team_info["id"]
team_name = new_team_info["name"]
service_url = data["serviceUrl"]
from_data = data["from"]
user_id = from_data["id"]
tenant_info = channel_data["tenant"]
tenant_id = tenant_info["id"]
params = {
"service_url": service_url,
"user_id": user_id,
"tenant_id": tenant_id,
"conversation_id": team_id,
"external_id": team_id,
"external_name": team_name,
"installation_type": "team",
}
return params
def _handle_installation_update_event(self, request: Request) -> Response:
data = request.data
action = data.get("action", None)
if action is None or action != "add":
logger.info(
"sentry.integrations.msteams.webhooks: Action not supported",
extra={"request_data": data},
)
return self.respond({"details": f"{action} is currently not supported"}, status=204)
try:
installation_params = self._get_team_installation_request_data(data=data)
except Exception as err:
logger.info(
"sentry.integrations.msteams.webhooks: Installation param error",
exc_info=err,
extra={"request_data": data},
)
return self.respond(
{"details": "required request format or keys are missing"}, status=400
)
# sign the params so this can't be forged
signed_params = sign(salt=SALT, **installation_params)
# send welcome message to the team
preinstall_client = get_preinstall_client(installation_params["service_url"])
card = build_team_installation_message(signed_params)
preinstall_client.send_card(installation_params["conversation_id"], card)
return self.respond(status=201)
def _handle_message_event(self, request: Request) -> Response:
data = request.data
conversation = data.get("conversation", {})
conversation_type = conversation.get("conversationType")
# the only message events we care about are those which
# are from a user submitting an option on a card, which
# will always contain an "payload.actionType" in the data.
if data.get("value", {}).get("payload", {}).get("actionType"):
# Processing card actions can only occur in the Region silo.
if SiloMode.get_current_mode() == SiloMode.CONTROL:
return self.respond(status=400)
return self._handle_action_submitted(request)
elif conversation_type == "channel":
return self._handle_channel_message(request)
return self._handle_personal_message(request)
def _handle_conversation_update_event(self, request: Request) -> Response:
data = request.data
conversation = data.get("conversation", {})
conversation_type = conversation.get("conversationType")
channel_data = data["channelData"]
event = channel_data.get("eventType")
if event == "teamMemberAdded":
return self._handle_team_member_added(request)
elif event == "teamMemberRemoved":
if SiloMode.get_current_mode() == SiloMode.CONTROL:
return self.respond(status=400)
return self._handle_team_member_removed(request)
elif (
data.get("membersAdded") and conversation_type == "personal"
): # no explicit event for user adding app unfortunately
return self._handle_personal_member_add(request)
return self.respond(status=204)
def _handle_unknown_event(self, request: Request) -> Response:
return self.respond(status=204)
def _verify_webhook_request(self, request: Request) -> bool:
return verify_signature(request)
def _handle_personal_member_add(self, request: Request):
data = request.data
data["conversation_id"] = data["conversation"]["id"]
tenant_id = data["conversation"]["tenantId"]
params = {
"external_id": tenant_id,
"external_name": f"{tenant_id} (Microsoft Tenant)",
"installation_type": "tenant",
}
return self._handle_member_add(data, params, build_personal_installation_message)
def _handle_team_member_added(self, request: Request) -> Response:
data = request.data
team = data["channelData"]["team"]
data["conversation_id"] = team["id"]
params = {
"external_id": team["id"],
"external_name": team["name"],
"installation_type": "team",
}
return self._handle_member_add(data, params, build_team_installation_message)
def _handle_member_add(
self,
data: Mapping[str, Any],
params: dict[str, str],
build_installation_card: Callable[[str], AdaptiveCard],
) -> Response:
# only care if our bot is the new member added
matches = list(filter(lambda x: x["id"] == data["recipient"]["id"], data["membersAdded"]))
if not matches:
return self.respond(status=204)
# need to keep track of the service url since we won't get it later
params.update(
{
"service_url": data["serviceUrl"],
"user_id": data["from"]["id"],
"conversation_id": data["conversation_id"],
"tenant_id": data["channelData"]["tenant"]["id"],
}
)
logger.info(
"sentry.integrations.msteams.webhook.handle_member_add",
extra={
**params,
"member_type_handler": build_installation_card.__name__,
},
)
# sign the params so this can't be forged
signed_params = sign(salt=SALT, **params)
# send welcome message to the team
client = get_preinstall_client(data["serviceUrl"])
conversation_id = data["conversation_id"]
card = build_installation_card(signed_params)
client.send_card(conversation_id, card)
return self.respond(status=201)
def _handle_team_member_removed(self, request: Request) -> Response:
data = request.data
channel_data = data["channelData"]
# only care if our bot is the new member removed
matches = list(filter(lambda x: x["id"] == data["recipient"]["id"], data["membersRemoved"]))
if not matches:
return self.respond(status=204)
team_id = channel_data["team"]["id"]
integration = parsing.get_integration_from_channel_data(data=data)
if integration is None:
logger.info(
"msteams.uninstall.missing-integration",
extra={"team_id": team_id},
)
return self.respond(status=404)
# no matter how many orgs are using the integration
# we have to delete the integration because the auth has been revoked
# an app can only be installed once for a team (unless it's deleted and re-installed)
# this is different than Vercel, for example, which can have multiple installations
# for the same team in Vercel with different auth tokens
org_integrations = integration_service.get_organization_integrations(
integration_id=integration.id
)
if len(org_integrations) > 0:
for org_integration in org_integrations:
create_audit_entry(
request=request,
organization_id=org_integration.organization_id,
target_object=integration.id,
event=audit_log.get_event_id("INTEGRATION_REMOVE"),
actor_label="Teams User",
data={
"provider": integration.provider,
"name": integration.name,
"team_id": team_id,
},
)
integration_service.delete_integration(integration_id=integration.id)
return self.respond(status=204)
def _make_action_data(self, data: Mapping[str, Any], user_id: int) -> dict[str, Any]:
action_data: dict[str, Any] = {}
action_type = data["payload"]["actionType"]
if action_type == ACTION_TYPE.UNRESOLVE:
action_data = {"status": "unresolved"}
elif action_type == ACTION_TYPE.RESOLVE:
status = data.get("resolveInput")
if status:
# status might look something like "resolved:inCurrentRelease" or just "resolved"
status_data = status.split(":", 1)
resolve_type = status_data[-1]
action_data = {"status": "resolved"}
if resolve_type == "inNextRelease":
action_data.update({"statusDetails": {"inNextRelease": True}})
elif resolve_type == "inCurrentRelease":
action_data.update({"statusDetails": {"inRelease": "latest"}})
# ignore has been renamed to archive, but ignore is still used in the payload
elif action_type == ACTION_TYPE.ARCHIVE:
ignore_count = data.get("archiveInput")
if ignore_count:
action_data = {"status": "ignored"}
if int(ignore_count) > 0:
action_data.update({"statusDetails": {"ignoreCount": int(ignore_count)}})
elif action_type == ACTION_TYPE.ASSIGN:
assignee = data["assignInput"]
if assignee == "ME":
assignee = f"user:{user_id}"
action_data = {"assignedTo": assignee, "integration": ActivityIntegration.MSTEAMS.value}
elif action_type == ACTION_TYPE.UNASSIGN:
action_data = {"assignedTo": ""}
return action_data
_ACTION_TYPES = {
ACTION_TYPE.RESOLVE: ("resolve", MessagingInteractionType.RESOLVE),
ACTION_TYPE.ARCHIVE: ("archive", MessagingInteractionType.ARCHIVE),
ACTION_TYPE.ASSIGN: ("assign", MessagingInteractionType.ASSIGN),
ACTION_TYPE.UNRESOLVE: ("unresolve", MessagingInteractionType.UNRESOLVE),
ACTION_TYPE.UNASSIGN: ("unassign", MessagingInteractionType.UNASSIGN),
}
_EVENT_TYPES: dict[str, type[MsTeamsIntegrationAnalytics]] = {
"assign": MsTeamsIntegrationAssign,
"resolve": MsTeamsIntegrationResolve,
"archive": MsTeamsIntegrationArchive,
"unresolve": MsTeamsIntegrationUnresolve,
"unassign": MsTeamsIntegrationUnassign,
}
def _issue_state_change(self, group: Group, identity: RpcIdentity, data) -> Response:
event_write_key = ApiKey(
organization_id=group.project.organization_id, scope_list=["event:write"]
)
action_data = self._make_action_data(data, identity.user_id)
status, interaction_type = self._ACTION_TYPES[data["payload"]["actionType"]]
try:
analytics.record(
self._EVENT_TYPES[status](
actor_id=identity.user_id,
organization_id=group.project.organization.id,
),
)
except Exception as e:
sentry_sdk.capture_exception(e)
with MessagingInteractionEvent(
interaction_type, MsTeamsMessagingSpec()
).capture() as lifecycle:
try:
response = client.put(
path=f"/projects/{group.project.organization.slug}/{group.project.slug}/issues/",
params={"id": group.id},
data=action_data,
user=user_service.get_user(user_id=identity.user_id),
auth=event_write_key,
)
except client.ApiError as e:
if e.status_code == 403:
lifecycle.record_halt(e)
# If the user hasn't configured their releases properly, we recieve errors like:
# sentry.api.client.ApiError: status=400 body={'statusDetails': {'inNextRelease': [xxx])]}}"
# We can mark these as halt
elif e.status_code == 400 and e.body.get("statusDetails", {}).get("inNextRelease"):
lifecycle.record_halt(e)
elif e.status_code >= 400:
lifecycle.record_failure(e)
return response
def _handle_action_submitted(self, request: Request) -> Response:
# pull out parameters
data = request.data
channel_data = data["channelData"]
tenant_id = channel_data["tenant"]["id"]
payload = data["value"]["payload"]
group_id = payload["groupId"]
integration_id = payload["integrationId"]
user_id = data["from"]["id"]
activity_id = data["replyToId"]
conversation = data["conversation"]
if conversation["conversationType"] == "personal":
conversation_id = conversation["id"]
else:
conversation_id = channel_data["channel"]["id"]
integration = parsing.get_integration_from_card_action(data=data)
if integration is None:
logger.info(
"msteams.action.missing-integration", extra={"integration_id": integration_id}
)
return self.respond(status=404)
team_id = integration.external_id
client = MsTeamsClient(integration)
group = Group.objects.select_related("project__organization").filter(id=group_id).first()
if group:
integration = integration_service.get_integration(
integration_id=integration.id, status=ObjectStatus.ACTIVE
)
if integration is None:
group = None
if integration is None or group is None:
logger.info(
"msteams.action.invalid-issue",
extra={
"team_id": team_id,
"integration_id": (integration.id if integration else None),
},
)
return self.respond(status=404)
idp = identity_service.get_provider(
provider_type=IntegrationProviderSlug.MSTEAMS.value, provider_ext_id=team_id
)
if idp is None:
logger.info(
"msteams.action.invalid-team-id",
extra={
"team_id": team_id,
"integration_id": integration.id,
"organization_id": group.organization.id,
},
)
return self.respond(status=404)
identity = identity_service.get_identity(
filter={"provider_id": idp.id, "identity_ext_id": user_id}
)
if identity is None:
associate_url = build_linking_url(
integration, group.organization, user_id, team_id, tenant_id
)
card = build_linking_card(associate_url)
user_conversation_id = client.get_user_conversation_id(user_id, tenant_id)
client.send_card(user_conversation_id, card)
return self.respond(status=201)
# update the state of the issue
issue_change_response = self._issue_state_change(group, identity, data["value"])
# get the rules from the payload
rules = tuple(Rule.objects.filter(id__in=payload["rules"]))
# pull the event based off our payload
event = eventstore.backend.get_event_by_id(group.project_id, payload["eventId"])
if event is None:
logger.info(
"msteams.action.event-missing",
extra={
"team_id": team_id,
"integration_id": integration.id,
"organization_id": group.organization.id,
"event_id": payload["eventId"],
"project_id": group.project_id,
},
)
return self.respond(status=404)
# refresh issue and update card
group.refresh_from_db()
card = MSTeamsIssueMessageBuilder(group, event, rules, integration).build_group_card()
client.update_card(conversation_id, activity_id, card)
return issue_change_response
def _handle_channel_message(self, request: Request) -> Response:
data = request.data
# check to see if we are mentioned
recipient_id = data.get("recipient", {}).get("id")
if recipient_id:
# check the ids of the mentions in the entities
mentioned = (
len(
list(
filter(
lambda x: x.get("mentioned", {}).get("id") == recipient_id,
data.get("entities", []),
)
)
)
> 0
)
if mentioned:
client = get_preinstall_client(data["serviceUrl"])
card = build_mentioned_card()
conversation_id = data["conversation"]["id"]
client.send_card(conversation_id, card)
return self.respond(status=204)
def _handle_personal_message(self, request: Request) -> Response:
data = request.data
command_text = data.get("text", "").strip()
dispatcher = MsTeamsCommandDispatcher(data)
try:
card = dispatcher.dispatch(CommandInput(command_text))
except CommandNotMatchedError:
card = build_unrecognized_command_card(command_text)
client = get_preinstall_client(data["serviceUrl"])
client.send_card(dispatcher.conversation_id, card)
return self.respond(status=204)
@dataclass(frozen=True)
|
MsTeamsWebhookEndpoint
|
python
|
jschneier__django-storages
|
tests/test_sftp.py
|
{
"start": 338,
"end": 7943
}
|
class ____(TestCase):
def setUp(self):
self.storage = sftpstorage.SFTPStorage(host="foo", root_path="root")
def test_init(self):
pass
@patch("paramiko.SSHClient")
def test_no_known_hosts_file(self, mock_ssh):
self.storage.known_host_file = "not_existed_file"
self.storage._connect()
self.assertEqual("foo", mock_ssh.return_value.connect.call_args[0][0])
@patch.object(os.path, "expanduser", return_value="/path/to/known_hosts")
@patch.object(os.path, "exists", return_value=True)
@patch("paramiko.SSHClient")
def test_error_when_known_hosts_file_not_defined(self, mock_ssh, *a):
self.storage._connect()
self.storage._ssh.load_host_keys.assert_called_once_with("/path/to/known_hosts")
@patch("paramiko.SSHClient")
def test_connect(self, mock_ssh):
self.storage._connect()
self.assertEqual("foo", mock_ssh.return_value.connect.call_args[0][0])
@patch("paramiko.SSHClient")
def test_close_unopened(self, mock_ssh):
with self.storage:
pass
mock_ssh.return_value.close.assert_not_called()
@patch("paramiko.SSHClient")
def test_close_opened(self, mock_ssh):
with self.storage as storage:
storage._connect()
mock_ssh.return_value.close.assert_called_once_with()
def test_open(self):
file_ = self.storage._open("foo")
self.assertIsInstance(file_, sftpstorage.SFTPStorageFile)
@patch("storages.backends.sftpstorage.SFTPStorage.sftp")
def test_read(self, mock_sftp):
self.storage._read("foo")
self.assertTrue(mock_sftp.open.called)
@patch("storages.backends.sftpstorage.SFTPStorage.sftp")
def test_chown(self, mock_sftp):
self.storage._chown("foo", 1, 1)
self.assertEqual(mock_sftp.chown.call_args[0], ("foo", 1, 1))
@patch("storages.backends.sftpstorage.SFTPStorage.sftp")
def test_mkdir(self, mock_sftp):
self.storage._mkdir("foo")
self.assertEqual(mock_sftp.mkdir.call_args[0], ("foo",))
@patch(
"storages.backends.sftpstorage.SFTPStorage.sftp",
**{"stat.side_effect": (FileNotFoundError(), True)},
)
def test_mkdir_parent(self, mock_sftp):
self.storage._mkdir("bar/foo")
self.assertEqual(mock_sftp.mkdir.call_args_list[0][0], ("bar",))
self.assertEqual(mock_sftp.mkdir.call_args_list[1][0], ("bar/foo",))
@patch("storages.backends.sftpstorage.SFTPStorage.sftp")
def test_save(self, mock_sftp):
self.storage._save("foo", File(io.BytesIO(b"foo"), "foo"))
self.assertTrue(mock_sftp.putfo.called)
@patch("storages.backends.sftpstorage.SFTPStorage.sftp")
def test_save_non_seekable(self, mock_sftp):
self.storage._save("foo", NonSeekableContentFile("foo"))
self.assertTrue(mock_sftp.putfo.called)
@patch(
"storages.backends.sftpstorage.SFTPStorage.sftp",
**{"stat.side_effect": (FileNotFoundError(), True)},
)
def test_save_in_subdir(self, mock_sftp):
self.storage._save("bar/foo", File(io.BytesIO(b"foo"), "foo"))
self.assertEqual(mock_sftp.stat.call_args_list[0][0], ("root/bar",))
self.assertEqual(mock_sftp.mkdir.call_args_list[0][0], ("root/bar",))
self.assertTrue(mock_sftp.putfo.called)
@patch("storages.backends.sftpstorage.SFTPStorage.sftp")
def test_delete(self, mock_sftp):
self.storage.delete("foo")
self.assertEqual(mock_sftp.remove.call_args_list[0][0], ("root/foo",))
@patch("storages.backends.sftpstorage.SFTPStorage.sftp")
def test_path_exists(self, mock_sftp):
self.assertTrue(self.storage._path_exists("root/foo"))
@patch("storages.backends.sftpstorage.SFTPStorage.sftp")
def test_exists(self, mock_sftp):
self.assertTrue(self.storage.exists("foo"))
@patch(
"storages.backends.sftpstorage.SFTPStorage.sftp",
**{"stat.side_effect": FileNotFoundError()},
)
def test_not_exists(self, mock_sftp):
self.assertFalse(self.storage.exists("foo"))
@patch(
"storages.backends.sftpstorage.SFTPStorage.sftp",
**{"stat.side_effect": FileNotFoundError()},
)
def test_not_path_exists(self, mock_sftp):
self.assertFalse(self.storage._path_exists("root/foo"))
@patch(
"storages.backends.sftpstorage.SFTPStorage.sftp",
**{"stat.side_effect": socket.timeout()},
)
def test_not_exists_timeout(self, mock_sftp):
with self.assertRaises(socket.timeout):
self.storage.exists("foo")
@patch(
"storages.backends.sftpstorage.SFTPStorage.sftp",
**{
"listdir_attr.return_value": [
MagicMock(filename="foo", st_mode=stat.S_IFDIR),
MagicMock(filename="bar", st_mode=None),
]
},
)
def test_listdir(self, mock_sftp):
dirs, files = self.storage.listdir("/")
self.assertTrue(dirs)
self.assertTrue(files)
@patch(
"storages.backends.sftpstorage.SFTPStorage.sftp",
**{
"stat.return_value.st_size": 42,
},
)
def test_size(self, mock_sftp):
self.assertEqual(self.storage.size("foo"), 42)
def test_url(self):
self.assertEqual(self.storage.url("foo"), "/media/foo")
# Test custom
self.storage.base_url = "http://bar.pt/"
self.assertEqual(self.storage.url("foo"), "http://bar.pt/foo")
# Test error
with self.assertRaises(ValueError):
self.storage.base_url = None
self.storage.url("foo")
@patch(
"storages.backends.sftpstorage.SFTPStorage.sftp",
**{
"stat.return_value.st_mtime": 1720287559,
"stat.return_value.st_atime": 1720287559,
},
)
def test_times(self, mock_sftp):
self.storage.get_modified_time("foo")
self.storage.get_accessed_time("foo")
@patch("paramiko.transport.Transport", **{"is_active.side_effect": (True, False)})
@patch("storages.backends.sftpstorage.SFTPStorage._connect")
def test_sftp(self, connect, transport):
self.assertIsNone(self.storage.sftp)
self.assertTrue(connect.called)
connect.reset_mock()
self.storage._ssh = paramiko.SSHClient()
self.storage._ssh._transport = transport
self.storage._sftp = True
self.assertTrue(self.storage.sftp)
self.assertFalse(connect.called)
self.assertTrue(self.storage.sftp)
self.assertTrue(connect.called)
def test_override_settings(self):
with override_settings(SFTP_STORAGE_ROOT="foo1"):
storage = sftpstorage.SFTPStorage()
self.assertEqual(storage.root_path, "foo1")
with override_settings(SFTP_STORAGE_ROOT="foo2"):
storage = sftpstorage.SFTPStorage()
self.assertEqual(storage.root_path, "foo2")
def test_override_class_variable(self):
class MyStorage1(sftpstorage.SFTPStorage):
root_path = "foo1"
storage = MyStorage1()
self.assertEqual(storage.root_path, "foo1")
class MyStorage2(sftpstorage.SFTPStorage):
root_path = "foo2"
storage = MyStorage2()
self.assertEqual(storage.root_path, "foo2")
def test_override_init_argument(self):
storage = sftpstorage.SFTPStorage(root_path="foo1")
self.assertEqual(storage.root_path, "foo1")
storage = sftpstorage.SFTPStorage(root_path="foo2")
self.assertEqual(storage.root_path, "foo2")
|
SFTPStorageTest
|
python
|
pytorch__pytorch
|
test/profiler/test_python_tracer.py
|
{
"start": 281,
"end": 3018
}
|
class ____(TestCase):
@skipIfPythonVersionMismatch(lambda major, minor, micro: major == 3 and minor == 12)
def test_method_with_c_function(self):
class A:
method_with_c_function = classmethod(repr)
def get_key(x):
A().method_with_c_function()
time.sleep(1.2)
return len(x)
names = ["Alice", "Bob"]
with profile(
activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA], with_stack=True
) as prof:
sorted(names, key=get_key)
with TemporaryFileName(mode="w+") as fname:
prof.export_chrome_trace(fname)
with open(fname) as f:
events = json.load(f)["traceEvents"]
found = False
for event in events:
if (
event.get("cat", "") == "python_function"
and event.get("name", "") == "<built-in function sorted>"
):
duration = event.get("dur", 0)
if duration >= 2000000:
found = True
break
self.assertTrue(found)
@skipIfPythonVersionMismatch(lambda major, minor, micro: major == 3 and minor == 12)
def test_monitoring_callback(self):
vi = sys.version_info
from sys import monitoring
with profile(
activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA], with_stack=True
):
name = monitoring.get_tool(2)
if vi.micro < 5:
self.assertEqual(name, "PyTorch Profiler")
else:
self.assertEqual(name, None)
name = monitoring.get_tool(2)
self.assertEqual(name, None)
def test_unexpected_c_return_events(self):
code = """
import threading
import time
import torch
from threading import Event, Lock
lock = Lock()
lock.acquire()
event1 = Event()
event2 = Event()
event3 = Event()
def run():
event1.set()
event2.wait()
lock.acquire()
event3.set()
threading.Thread(target=run).start()
with torch.profiler.profile(activities=[torch.profiler.ProfilerActivity.CPU], with_stack=True):
event1.wait()
event2.set()
time.sleep(1)
with torch.profiler.profile(activities=[torch.profiler.ProfilerActivity.CPU], with_stack=True):
lock.release()
event3.wait()
"""
result = subprocess.run(
[sys.executable, "-c", code], capture_output=True, text=True, check=True
)
self.assertFalse(
"Python replay stack is empty during pop operation" in result.stderr
)
if __name__ == "__main__":
run_tests()
|
TestPythonTracer
|
python
|
doocs__leetcode
|
lcof2/剑指 Offer II 045. 二叉树最底层最左边的值/Solution.py
|
{
"start": 192,
"end": 638
}
|
class ____:
def findBottomLeftValue(self, root: TreeNode) -> int:
q = deque([root])
ans = -1
while q:
n = len(q)
for i in range(n):
node = q.popleft()
if i == 0:
ans = node.val
if node.left:
q.append(node.left)
if node.right:
q.append(node.right)
return ans
|
Solution
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/protocol26.py
|
{
"start": 561,
"end": 884
}
|
class ____(Protocol[_T_co]):
@overload
def __getitem__(self, index: int, /) -> "_T_co | NestedSequence[_T_co]": ...
@overload
def __getitem__(self, index: slice, /) -> "NestedSequence[_T_co]": ...
def func(t: TupleLike[int]):
x: int | NestedSequence[int] = t
y: NestedSequence[int] = t
|
NestedSequence
|
python
|
modin-project__modin
|
modin/config/envvars.py
|
{
"start": 43351,
"end": 44252
}
|
class ____(EnvironmentVariable, type=bool):
"""
Whether to cast a DataFrame in-place when performing a merge when using hybrid mode.
This flag modifies the behavior of a cast performed on operations involving more
than one type of query compiler. If enabled the actual cast will be performed in-place
and the input DataFrame will have a new backend. If disabled the original DataFrame
will remain on the same underlying engine.
"""
varname = "MODIN_BACKEND_MERGE_CAST_IN_PLACE"
default = True
@classmethod
def enable(cls) -> None:
"""Enable casting in place when performing a merge operation betwen two different compilers."""
cls.put(True)
@classmethod
def disable(cls) -> None:
"""Disable casting in place when performing a merge operation betwen two different compilers."""
cls.put(False)
|
BackendMergeCastInPlace
|
python
|
ray-project__ray
|
release/llm_tests/serve/benchmark/firehose_utils.py
|
{
"start": 630,
"end": 2697
}
|
class ____(BaseModel):
record_name: RecordName
record_metrics: Dict[str, Any]
@field_validator("record_name", mode="before")
def validate_record_name(cls, v):
if isinstance(v, str):
return RecordName(v)
return v
def write(self, verbose: bool = False):
final_result = {
"_table": DEFAULT_TABLE_NAME,
"name": str(self.record_name.value),
"branch": os.environ.get("BUILDKITE_BRANCH", ""),
"commit": ray.__commit__,
"report_timestamp_ms": int(time.time() * 1000),
"results": {**self.record_metrics},
}
if verbose:
print(
"Writing final result to AWS Firehose:",
json.dumps(final_result, indent=4, sort_keys=True),
sep="\n",
)
# Add newline character to separate records
data = json.dumps(final_result) + "\n"
# Need to assume the role in order to share access to the Firehose
sts_client = boto3.client("sts")
assumed_role = sts_client.assume_role(
RoleArn="arn:aws:iam::830883877497:role/service-role/KinesisFirehoseServiceRole-rayllm-ci-res-us-west-2-1728664186256",
RoleSessionName="FirehosePutRecordSession",
)
credentials = assumed_role["Credentials"]
# Use the assumed credentials to create a Firehose client
firehose_client = boto3.client(
"firehose",
region_name="us-west-2",
aws_access_key_id=credentials["AccessKeyId"],
aws_secret_access_key=credentials["SecretAccessKey"],
aws_session_token=credentials["SessionToken"],
)
response = firehose_client.put_record(
DeliveryStreamName=STREAM_NAME, Record={"Data": data}
)
if verbose:
print("PutRecord response:")
print(response)
# Add some delay to make sure timestamps are unique ints.
time.sleep(SLEEP_BETWEEN_FIREHOSE_WRITES_MS / 1000)
|
FirehoseRecord
|
python
|
hynek__structlog
|
src/structlog/tracebacks.py
|
{
"start": 10907,
"end": 16573
}
|
class ____:
"""
Return a list of exception stack dictionaries for an exception.
These dictionaries are based on :class:`Stack` instances generated by
:func:`extract()` and can be dumped to JSON.
Args:
show_locals:
Whether or not to include the values of a stack frame's local
variables.
locals_max_length:
Maximum length of containers before abbreviating, or ``None`` for
no abbreviation.
locals_max_string:
Maximum length of string before truncating, or ``None`` to disable
truncating.
locals_hide_dunder:
Hide locals prefixed with double underscore.
Defaults to True.
locals_hide_sunder:
Hide locals prefixed with single underscore.
This implies hiding *locals_hide_dunder*.
Defaults to False.
suppress:
Optional sequence of modules or paths for which to suppress the
display of locals even if *show_locals* is ``True``.
max_frames:
Maximum number of frames in each stack. Frames are removed from
the inside out. The idea is, that the first frames represent your
code responsible for the exception and last frames the code where
the exception actually happened. With larger web frameworks, this
does not always work, so you should stick with the default.
use_rich: If ``True`` (the default), use rich_ to compute the repr of
locals. If ``False`` or if rich_ is not installed, fall back to
a simpler algorithm.
.. seealso::
:doc:`exceptions` for a broader explanation of *structlog*'s exception
features.
.. versionchanged:: 24.3.0
Added *locals_max_length*, *locals_hide_sunder*, *locals_hide_dunder*,
*suppress* and *use_rich* arguments.
.. versionchanged:: 25.1.0
*locals_max_length* and *locals_max_string* may be None to disable
truncation.
.. versionchanged:: 25.4.0
Handle exception groups.
"""
def __init__(
self,
*,
show_locals: bool = SHOW_LOCALS,
locals_max_length: int = LOCALS_MAX_LENGTH,
locals_max_string: int = LOCALS_MAX_STRING,
locals_hide_dunder: bool = True,
locals_hide_sunder: bool = False,
suppress: Iterable[str | ModuleType] = (),
max_frames: int = MAX_FRAMES,
use_rich: bool = True,
) -> None:
if locals_max_length is not None and locals_max_length < 0:
msg = f'"locals_max_length" must be >= 0: {locals_max_length}'
raise ValueError(msg)
if locals_max_string is not None and locals_max_string < 0:
msg = f'"locals_max_string" must be >= 0: {locals_max_string}'
raise ValueError(msg)
if max_frames < 2:
msg = f'"max_frames" must be >= 2: {max_frames}'
raise ValueError(msg)
self.show_locals = show_locals
self.locals_max_length = locals_max_length
self.locals_max_string = locals_max_string
self.locals_hide_dunder = locals_hide_dunder
self.locals_hide_sunder = locals_hide_sunder
self.suppress: Sequence[str] = []
for suppress_entity in suppress:
if not isinstance(suppress_entity, str):
if suppress_entity.__file__ is None:
msg = (
f'"suppress" item {suppress_entity!r} must be a '
f"module with '__file__' attribute"
)
raise ValueError(msg)
path = os.path.dirname(suppress_entity.__file__)
else:
path = suppress_entity
path = os.path.normpath(os.path.abspath(path))
self.suppress.append(path)
self.max_frames = max_frames
self.use_rich = use_rich
def __call__(self, exc_info: ExcInfo) -> list[dict[str, Any]]:
trace = extract(
*exc_info,
show_locals=self.show_locals,
locals_max_length=self.locals_max_length,
locals_max_string=self.locals_max_string,
locals_hide_dunder=self.locals_hide_dunder,
locals_hide_sunder=self.locals_hide_sunder,
use_rich=self.use_rich,
)
for stack in trace.stacks:
if len(stack.frames) <= self.max_frames:
continue
half = (
self.max_frames // 2
) # Force int division to handle odd numbers correctly
fake_frame = Frame(
filename="",
lineno=-1,
name=f"Skipped frames: {len(stack.frames) - (2 * half)}",
)
stack.frames[:] = [
*stack.frames[:half],
fake_frame,
*stack.frames[-half:],
]
return self._as_dict(trace)
def _as_dict(self, trace: Trace) -> list[dict[str, Any]]:
stack_dicts = []
for stack in trace.stacks:
stack_dict = asdict(stack)
for frame_dict in stack_dict["frames"]:
if frame_dict["locals"] is None or any(
frame_dict["filename"].startswith(path)
for path in self.suppress
):
del frame_dict["locals"]
if stack.is_group:
stack_dict["exceptions"] = [
self._as_dict(t) for t in stack.exceptions
]
stack_dicts.append(stack_dict)
return stack_dicts
|
ExceptionDictTransformer
|
python
|
rq__rq
|
tests/__init__.py
|
{
"start": 1342,
"end": 2127
}
|
class ____(unittest.TestCase):
"""Base class to inherit test cases from for RQ.
It sets up the Redis connection (available via self.connection), turns off
logging to the terminal and flushes the Redis database before and after
running each test.
"""
@classmethod
def setUpClass(cls):
# Set up connection to Redis
cls.connection = find_empty_redis_database()
# Shut up logging
logging.disable(logging.ERROR)
def setUp(self):
# Flush beforewards (we like our hygiene)
self.connection.flushdb()
def tearDown(self):
# Flush afterwards
self.connection.flushdb()
@classmethod
def tearDownClass(cls):
logging.disable(logging.NOTSET)
cls.connection.close()
|
RQTestCase
|
python
|
django__django
|
django/tasks/base.py
|
{
"start": 4721,
"end": 5272
}
|
class ____:
exception_class_path: str
traceback: str
@property
def exception_class(self):
# Lazy resolve the exception class.
exception_class = import_string(self.exception_class_path)
if not isclass(exception_class) or not issubclass(
exception_class, BaseException
):
raise ValueError(
f"{self.exception_class_path!r} does not reference a valid exception."
)
return exception_class
@dataclass(frozen=True, slots=True, kw_only=True)
|
TaskError
|
python
|
sympy__sympy
|
sympy/codegen/fnodes.py
|
{
"start": 17022,
"end": 17784
}
|
class ____(Token):
""" AST node explicitly mapped to a fortran "return".
Explanation
===========
Because a return statement in fortran is different from C, and
in order to aid reuse of our codegen ASTs the ordinary
``.codegen.ast.Return`` is interpreted as assignment to
the result variable of the function. If one for some reason needs
to generate a fortran RETURN statement, this node should be used.
Examples
========
>>> from sympy.codegen.fnodes import FortranReturn
>>> from sympy import fcode
>>> fcode(FortranReturn('x'))
' return x'
"""
__slots__ = _fields = ('return_value',)
defaults = {'return_value': none}
_construct_return_value = staticmethod(sympify)
|
FortranReturn
|
python
|
ApeWorX__ape
|
src/ape/utils/abi.py
|
{
"start": 10577,
"end": 14702
}
|
class ____:
"""
A class for contract return values using the struct data-structure.
"""
def items(self) -> dict:
"""Override"""
return {}
def __setitem__(self, key, value):
"""Override"""
def create_struct(name: str, types: Sequence[ABIType], output_values: Sequence) -> Any:
"""
Create a dataclass representing an ABI struct that can be used as inputs or outputs.
The struct properties can be accessed via ``.`` notation, as keys in a dictionary, or
numeric tuple access.
**NOTE**: This method assumes you already know the values to give to the struct
properties.
Args:
name (str): The name of the struct.
types (list[ABIType]: The types of values in the struct.
output_values (list[Any]): The struct property values.
Returns:
Any: The struct dataclass.
"""
def get_item(struct, key) -> Any:
# NOTE: Allow struct to function as a tuple and dict as well
struct_values = tuple(getattr(struct, field) for field in struct.__dataclass_fields__)
if isinstance(key, str):
return dict(zip(struct.__dataclass_fields__, struct_values))[key]
return struct_values[key]
def set_item(struct, key, value):
if isinstance(key, str):
setattr(struct, key, value)
else:
struct_values = tuple(getattr(struct, field) for field in struct.__dataclass_fields__)
field_to_set = struct_values[key]
setattr(struct, field_to_set, value)
def contains(struct, key):
return key in struct.__dataclass_fields__
def is_equal(struct, other) -> bool:
if not hasattr(other, "__len__"):
return NotImplemented
_len = len(other)
if _len != len(struct):
return False
if hasattr(other, "items"):
# Struct or dictionary.
for key, value in other.items():
if key not in struct:
# Different object.
return False
if struct[key] != value:
# Mismatched properties.
return False
# Both objects represent the same struct.
return True
elif isinstance(other, (list, tuple)):
# Allows comparing structs with sequence types.
# NOTE: The order of the expected sequence matters!
for itm1, itm2 in zip(struct.values(), other):
if itm1 != itm2:
return False
return True
else:
return NotImplemented
def length(struct) -> int:
return len(struct.__dataclass_fields__)
def items(struct) -> list[tuple]:
return [(k, struct[k]) for k, v in struct.__dataclass_fields__.items()]
def values(struct) -> list[Any]:
return [x[1] for x in struct.items()]
def reduce(struct) -> tuple:
return (create_struct, (name, types, output_values))
# NOTE: Should never be "_{i}", but mypy complains and we need a unique value
properties = [m.name or f"_{i}" for i, m in enumerate(types)]
methods = {
"__eq__": is_equal,
"__getitem__": get_item,
"__setitem__": set_item,
"__contains__": contains,
"__len__": length,
"__reduce__": reduce,
"items": items,
"values": values,
}
if conflicts := [p for p in properties if p in methods]:
conflicts_str = ", ".join(conflicts)
logger.debug(
"The following methods are unavailable on the struct "
f"due to having the same name as a field: {conflicts_str}"
)
for conflict in conflicts:
del methods[conflict]
struct_def = make_dataclass(
name,
properties,
namespace=methods,
bases=(Struct,), # We set a base class for subclass checking elsewhere.
)
return struct_def(*output_values)
def is_dynamic_sized_type(abi_type: Union[ABIType, str]) -> bool:
parsed = grammar.parse(str(abi_type))
return parsed.is_dynamic
|
Struct
|
python
|
facebook__pyre-check
|
tools/generate_taint_models/model.py
|
{
"start": 11628,
"end": 12229
}
|
class ____(Model):
annotation: str
target: str
def __init__(self, annotation: str, target: str) -> None:
if "-" in target:
raise ValueError("The target is not supported")
self.annotation = annotation
self.target = target
def __str__(self) -> str:
return f"{self.target}: {self.annotation} = ..."
def __eq__(self, other: object) -> bool:
if not isinstance(other, AssignmentModel):
return False
return self.target == other.target
def __hash__(self) -> int:
return hash(self.target)
|
AssignmentModel
|
python
|
numpy__numpy
|
benchmarks/benchmarks/bench_ufunc.py
|
{
"start": 7330,
"end": 7840
}
|
class ____(Benchmark):
""" Benchmark for the methods which take an argument
"""
params = [['__floordiv__', '__mod__'],
[dt for dt in TYPES1 if not dt.startswith('complex')]]
param_names = ['methods', 'npdtypes']
timeout = 10
def setup(self, methname, npdtypes):
values = get_squares_().get(npdtypes)
self.xargs = [values[0], values[1]]
def time_ndarray_meth(self, methname, npdtypes):
getattr(operator, methname)(*self.xargs)
|
MethodsV1NoComplex
|
python
|
gevent__gevent
|
src/greentest/3.12/test_ftplib.py
|
{
"start": 34335,
"end": 39268
}
|
class ____(TestCase):
"""Specific TLS_FTP class tests."""
def setUp(self, encoding=DEFAULT_ENCODING):
self.server = DummyTLS_FTPServer((HOST, 0), encoding=encoding)
self.server.start()
self.client = ftplib.FTP_TLS(timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
# Explicitly clear the attribute to prevent dangling thread
self.server = None
asyncore.close_all(ignore_all=True)
def test_control_connection(self):
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.auth()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
def test_data_connection(self):
# clear text
with self.client.transfercmd('list') as sock:
self.assertNotIsInstance(sock, ssl.SSLSocket)
self.assertEqual(sock.recv(1024),
LIST_DATA.encode(self.client.encoding))
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# secured, after PROT P
self.client.prot_p()
with self.client.transfercmd('list') as sock:
self.assertIsInstance(sock, ssl.SSLSocket)
# consume from SSL socket to finalize handshake and avoid
# "SSLError [SSL] shutdown while in init"
self.assertEqual(sock.recv(1024),
LIST_DATA.encode(self.client.encoding))
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# PROT C is issued, the connection must be in cleartext again
self.client.prot_c()
with self.client.transfercmd('list') as sock:
self.assertNotIsInstance(sock, ssl.SSLSocket)
self.assertEqual(sock.recv(1024),
LIST_DATA.encode(self.client.encoding))
self.assertEqual(self.client.voidresp(), "226 transfer complete")
def test_login(self):
# login() is supposed to implicitly secure the control connection
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.login()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
# make sure that AUTH TLS doesn't get issued again
self.client.login()
def test_auth_issued_twice(self):
self.client.auth()
self.assertRaises(ValueError, self.client.auth)
def test_context(self):
self.client.quit()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
self.assertRaises(TypeError, ftplib.FTP_TLS, keyfile=CERTFILE,
context=ctx)
self.assertRaises(TypeError, ftplib.FTP_TLS, certfile=CERTFILE,
context=ctx)
self.assertRaises(TypeError, ftplib.FTP_TLS, certfile=CERTFILE,
keyfile=CERTFILE, context=ctx)
self.client = ftplib.FTP_TLS(context=ctx, timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.auth()
self.assertIs(self.client.sock.context, ctx)
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
self.client.prot_p()
with self.client.transfercmd('list') as sock:
self.assertIs(sock.context, ctx)
self.assertIsInstance(sock, ssl.SSLSocket)
def test_ccc(self):
self.assertRaises(ValueError, self.client.ccc)
self.client.login(secure=True)
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
self.client.ccc()
self.assertRaises(ValueError, self.client.sock.unwrap)
@skipUnless(False, "FIXME: bpo-32706")
def test_check_hostname(self):
self.client.quit()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.check_hostname, True)
ctx.load_verify_locations(CAFILE)
self.client = ftplib.FTP_TLS(context=ctx, timeout=TIMEOUT)
# 127.0.0.1 doesn't match SAN
self.client.connect(self.server.host, self.server.port)
with self.assertRaises(ssl.CertificateError):
self.client.auth()
# exception quits connection
self.client.connect(self.server.host, self.server.port)
self.client.prot_p()
with self.assertRaises(ssl.CertificateError):
with self.client.transfercmd("list") as sock:
pass
self.client.quit()
self.client.connect("localhost", self.server.port)
self.client.auth()
self.client.quit()
self.client.connect("localhost", self.server.port)
self.client.prot_p()
with self.client.transfercmd("list") as sock:
pass
|
TestTLS_FTPClass
|
python
|
nedbat__coveragepy
|
coverage/cmdline.py
|
{
"start": 1217,
"end": 10323
}
|
class ____:
"""A namespace class for individual options we'll build parsers from."""
# Keep these entries alphabetized (roughly) by the option name as it
# appears on the command line.
append = optparse.make_option(
"-a",
"--append",
action="store_true",
help="Append data to the data file. Otherwise it starts clean each time.",
)
branch = optparse.make_option(
"",
"--branch",
action="store_true",
help="Measure branch coverage in addition to statement coverage.",
)
concurrency = optparse.make_option(
"",
"--concurrency",
action="store",
metavar="LIBS",
help=oneline(
"""
Properly measure code using a concurrency library.
Valid values are: {}, or a comma-list of them.
"""
).format(", ".join(sorted(CoverageConfig.CONCURRENCY_CHOICES))),
)
context = optparse.make_option(
"",
"--context",
action="store",
metavar="LABEL",
help="The context label to record for this coverage run.",
)
contexts = optparse.make_option(
"",
"--contexts",
action="store",
metavar="REGEX1,REGEX2,...",
help=oneline(
"""
Only display data from lines covered in the given contexts.
Accepts Python regexes, which must be quoted.
"""
),
)
datafile = optparse.make_option(
"",
"--data-file",
action="store",
metavar="DATAFILE",
help=oneline(
"""
Base name of the data files to operate on.
Defaults to '.coverage'. [env: COVERAGE_FILE]
"""
),
)
datafle_input = optparse.make_option(
"",
"--data-file",
action="store",
metavar="INFILE",
help=oneline(
"""
Read coverage data for report generation from this file.
Defaults to '.coverage'. [env: COVERAGE_FILE]
"""
),
)
datafile_output = optparse.make_option(
"",
"--data-file",
action="store",
metavar="OUTFILE",
help=oneline(
"""
Write the recorded coverage data to this file.
Defaults to '.coverage'. [env: COVERAGE_FILE]
"""
),
)
debug = optparse.make_option(
"",
"--debug",
action="store",
metavar="OPTS",
help="Debug options, separated by commas. [env: COVERAGE_DEBUG]",
)
directory = optparse.make_option(
"-d",
"--directory",
action="store",
metavar="DIR",
help="Write the output files to DIR.",
)
fail_under = optparse.make_option(
"",
"--fail-under",
action="store",
metavar="MIN",
type="float",
help="Exit with a status of 2 if the total coverage is less than MIN.",
)
format = optparse.make_option(
"",
"--format",
action="store",
metavar="FORMAT",
help="Output format, either text (default), markdown, or total.",
)
help = optparse.make_option(
"-h",
"--help",
action="store_true",
help="Get help on this command.",
)
ignore_errors = optparse.make_option(
"-i",
"--ignore-errors",
action="store_true",
help="Ignore errors while reading source files.",
)
include = optparse.make_option(
"",
"--include",
action="store",
metavar="PAT1,PAT2,...",
help=oneline(
"""
Include only files whose paths match one of these patterns.
Accepts shell-style wildcards, which must be quoted.
"""
),
)
keep = optparse.make_option(
"",
"--keep",
action="store_true",
help="Keep original coverage files, otherwise they are deleted.",
)
pylib = optparse.make_option(
"-L",
"--pylib",
action="store_true",
help=oneline(
"""
Measure coverage even inside the Python installed library,
which isn't done by default.
"""
),
)
show_missing = optparse.make_option(
"-m",
"--show-missing",
action="store_true",
help="Show line numbers of statements in each module that weren't executed.",
)
module = optparse.make_option(
"-m",
"--module",
action="store_true",
help=oneline(
"""
<pyfile> is an importable Python module, not a script path,
to be run as 'python -m' would run it.
"""
),
)
omit = optparse.make_option(
"",
"--omit",
action="store",
metavar="PAT1,PAT2,...",
help=oneline(
"""
Omit files whose paths match one of these patterns.
Accepts shell-style wildcards, which must be quoted.
"""
),
)
output_xml = optparse.make_option(
"-o",
"",
action="store",
dest="outfile",
metavar="OUTFILE",
help="Write the XML report to this file. Defaults to 'coverage.xml'",
)
output_json = optparse.make_option(
"-o",
"",
action="store",
dest="outfile",
metavar="OUTFILE",
help="Write the JSON report to this file. Defaults to 'coverage.json'",
)
output_lcov = optparse.make_option(
"-o",
"",
action="store",
dest="outfile",
metavar="OUTFILE",
help="Write the LCOV report to this file. Defaults to 'coverage.lcov'",
)
json_pretty_print = optparse.make_option(
"",
"--pretty-print",
action="store_true",
help="Format the JSON for human readers.",
)
parallel_mode = optparse.make_option(
"-p",
"--parallel-mode",
action="store_true",
help=oneline(
"""
Append a unique suffix to the data file name to collect separate
data from multiple processes.
"""
),
)
precision = optparse.make_option(
"",
"--precision",
action="store",
metavar="N",
type=int,
help=oneline(
"""
Number of digits after the decimal point to display for
reported coverage percentages.
"""
),
)
quiet = optparse.make_option(
"-q",
"--quiet",
action="store_true",
help="Don't print messages about what is happening.",
)
rcfile = optparse.make_option(
"",
"--rcfile",
action="store",
help=oneline(
"""
Specify configuration file.
By default '.coveragerc', 'setup.cfg', 'tox.ini', and
'pyproject.toml' are tried. [env: COVERAGE_RCFILE]
"""
),
)
save_signal = optparse.make_option(
"",
"--save-signal",
action="store",
metavar="SIGNAL",
choices=["USR1", "USR2"],
help=oneline(
"""
Specify a signal that will trigger coverage to write its collected data.
Supported values are: USR1, USR2. Not available on Windows.
"""
),
)
show_contexts = optparse.make_option(
"--show-contexts",
action="store_true",
help="Show contexts for covered lines.",
)
skip_covered = optparse.make_option(
"--skip-covered",
action="store_true",
help="Skip files with 100% coverage.",
)
no_skip_covered = optparse.make_option(
"--no-skip-covered",
action="store_false",
dest="skip_covered",
help="Disable --skip-covered.",
)
skip_empty = optparse.make_option(
"--skip-empty",
action="store_true",
help="Skip files with no code.",
)
sort = optparse.make_option(
"--sort",
action="store",
metavar="COLUMN",
help=oneline(
"""
Sort the report by the named column: name, stmts, miss, branch, brpart, or cover.
Default is name.
"""
),
)
source = optparse.make_option(
"",
"--source",
action="store",
metavar="SRC1,SRC2,...",
help="A list of directories or importable names of code to measure.",
)
timid = optparse.make_option(
"",
"--timid",
action="store_true",
help="Use the slower Python trace function core.",
)
title = optparse.make_option(
"",
"--title",
action="store",
metavar="TITLE",
help="A text string to use as the title on the HTML.",
)
version = optparse.make_option(
"",
"--version",
action="store_true",
help="Display version information and exit.",
)
|
Opts
|
python
|
huggingface__transformers
|
src/transformers/models/oneformer/processing_oneformer.py
|
{
"start": 801,
"end": 9013
}
|
class ____(ProcessorMixin):
r"""
Constructs an OneFormer processor which wraps [`OneFormerImageProcessor`] and
[`CLIPTokenizer`]/[`CLIPTokenizerFast`] into a single processor that inherits both the image processor and
tokenizer functionalities.
Args:
image_processor ([`OneFormerImageProcessor`]):
The image processor is a required input.
tokenizer ([`CLIPTokenizer`, `CLIPTokenizerFast`]):
The tokenizer is a required input.
max_seq_len (`int`, *optional*, defaults to 77)):
Sequence length for input text list.
task_seq_len (`int`, *optional*, defaults to 77):
Sequence length for input task token.
"""
def __init__(
self, image_processor=None, tokenizer=None, max_seq_length: int = 77, task_seq_length: int = 77, **kwargs
):
self.max_seq_length = max_seq_length
self.task_seq_length = task_seq_length
super().__init__(image_processor, tokenizer)
def _preprocess_text(self, text_list=None, max_length=77):
if text_list is None:
raise ValueError("tokens cannot be None.")
tokens = self.tokenizer(text_list, padding="max_length", max_length=max_length, truncation=True)
attention_masks, input_ids = tokens["attention_mask"], tokens["input_ids"]
token_inputs = []
for attn_mask, input_id in zip(attention_masks, input_ids):
token = torch.tensor(attn_mask) * torch.tensor(input_id)
token_inputs.append(token.unsqueeze(0))
token_inputs = torch.cat(token_inputs, dim=0)
return token_inputs
def __call__(self, images=None, task_inputs=None, segmentation_maps=None, **kwargs):
"""
Main method to prepare for the model one or several task input(s) and image(s). This method forwards the
`task_inputs` and `kwargs` arguments to CLIPTokenizer's [`~CLIPTokenizer.__call__`] if `task_inputs` is not
`None` to encode. To prepare the image(s), this method forwards the `images` and `kwargs` arguments to
OneFormerImageProcessor's [`~OneFormerImageProcessor.__call__`] if `images` is not `None`. Please refer to the
docstring of the above two methods for more information.
Args:
task_inputs (`str`, `list[str]`):
The sequence or batch of task_inputs sequences to be encoded. Each sequence can be a string or a list
of strings of the template "the task is {task}".
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`,
`list[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
segmentation_maps (`ImageInput`, *optional*):
The corresponding semantic segmentation maps with the pixel-wise annotations.
(`bool`, *optional*, defaults to `True`):
Whether or not to pad images up to the largest image in a batch and create a pixel mask.
If left to the default, will return a pixel mask that is:
- 1 for pixels that are real (i.e. **not masked**),
- 0 for pixels that are padding (i.e. **masked**).
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **task_inputs** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
"""
if task_inputs is None:
raise ValueError("You have to specify the task_input. Found None.")
elif images is None:
raise ValueError("You have to specify the image. Found None.")
if not all(task in ["semantic", "instance", "panoptic"] for task in task_inputs):
raise ValueError("task_inputs must be semantic, instance, or panoptic.")
encoded_inputs = self.image_processor(images, task_inputs, segmentation_maps, **kwargs)
if isinstance(task_inputs, str):
task_inputs = [task_inputs]
if isinstance(task_inputs, list) and all(isinstance(task_input, str) for task_input in task_inputs):
task_token_inputs = []
for task in task_inputs:
task_input = f"the task is {task}"
task_token_inputs.append(task_input)
encoded_inputs["task_inputs"] = self._preprocess_text(task_token_inputs, max_length=self.task_seq_length)
else:
raise TypeError("Task Inputs should be a string or a list of strings.")
if hasattr(encoded_inputs, "text_inputs"):
texts_list = encoded_inputs.text_inputs
text_inputs = []
for texts in texts_list:
text_input_list = self._preprocess_text(texts, max_length=self.max_seq_length)
text_inputs.append(text_input_list.unsqueeze(0))
encoded_inputs["text_inputs"] = torch.cat(text_inputs, dim=0)
return encoded_inputs
def encode_inputs(self, images=None, task_inputs=None, segmentation_maps=None, **kwargs):
"""
This method forwards all its arguments to [`OneFormerImageProcessor.encode_inputs`] and then tokenizes the
task_inputs. Please refer to the docstring of this method for more information.
"""
if task_inputs is None:
raise ValueError("You have to specify the task_input. Found None.")
elif images is None:
raise ValueError("You have to specify the image. Found None.")
if not all(task in ["semantic", "instance", "panoptic"] for task in task_inputs):
raise ValueError("task_inputs must be semantic, instance, or panoptic.")
encoded_inputs = self.image_processor.encode_inputs(images, task_inputs, segmentation_maps, **kwargs)
if isinstance(task_inputs, str):
task_inputs = [task_inputs]
if isinstance(task_inputs, list) and all(isinstance(task_input, str) for task_input in task_inputs):
task_token_inputs = []
for task in task_inputs:
task_input = f"the task is {task}"
task_token_inputs.append(task_input)
encoded_inputs["task_inputs"] = self._preprocess_text(task_token_inputs, max_length=self.task_seq_length)
else:
raise TypeError("Task Inputs should be a string or a list of strings.")
if hasattr(encoded_inputs, "text_inputs"):
texts_list = encoded_inputs.text_inputs
text_inputs = []
for texts in texts_list:
text_input_list = self._preprocess_text(texts, max_length=self.max_seq_length)
text_inputs.append(text_input_list.unsqueeze(0))
encoded_inputs["text_inputs"] = torch.cat(text_inputs, dim=0)
return encoded_inputs
def post_process_semantic_segmentation(self, *args, **kwargs):
"""
This method forwards all its arguments to [`OneFormerImageProcessor.post_process_semantic_segmentation`].
Please refer to the docstring of this method for more information.
"""
return self.image_processor.post_process_semantic_segmentation(*args, **kwargs)
def post_process_instance_segmentation(self, *args, **kwargs):
"""
This method forwards all its arguments to [`OneFormerImageProcessor.post_process_instance_segmentation`].
Please refer to the docstring of this method for more information.
"""
return self.image_processor.post_process_instance_segmentation(*args, **kwargs)
def post_process_panoptic_segmentation(self, *args, **kwargs):
"""
This method forwards all its arguments to [`OneFormerImageProcessor.post_process_panoptic_segmentation`].
Please refer to the docstring of this method for more information.
"""
return self.image_processor.post_process_panoptic_segmentation(*args, **kwargs)
__all__ = ["OneFormerProcessor"]
|
OneFormerProcessor
|
python
|
pytorch__pytorch
|
test/distributed/elastic/rendezvous/api_test.py
|
{
"start": 464,
"end": 7155
}
|
class ____(TestCase):
def setUp(self) -> None:
self._backend = "dummy_backend"
self._endpoint = "dummy_endpoint"
self._run_id = "dummy_run_id"
self._min_nodes = 3
self._max_nodes = 6
self._kwargs: dict[str, Any] = {}
def _create_params(self) -> RendezvousParameters:
return RendezvousParameters(
backend=self._backend,
endpoint=self._endpoint,
run_id=self._run_id,
min_nodes=self._min_nodes,
max_nodes=self._max_nodes,
**self._kwargs,
)
def test_init_initializes_params(self) -> None:
self._kwargs["dummy_param"] = "x"
params = self._create_params()
self.assertEqual(params.backend, self._backend)
self.assertEqual(params.endpoint, self._endpoint)
self.assertEqual(params.run_id, self._run_id)
self.assertEqual(params.min_nodes, self._min_nodes)
self.assertEqual(params.max_nodes, self._max_nodes)
self.assertEqual(params.get("dummy_param"), "x")
def test_init_initializes_params_if_min_nodes_equals_to_1(self) -> None:
self._min_nodes = 1
params = self._create_params()
self.assertEqual(params.min_nodes, self._min_nodes)
self.assertEqual(params.max_nodes, self._max_nodes)
def test_init_initializes_params_if_min_and_max_nodes_are_equal(self) -> None:
self._max_nodes = 3
params = self._create_params()
self.assertEqual(params.min_nodes, self._min_nodes)
self.assertEqual(params.max_nodes, self._max_nodes)
def test_init_raises_error_if_backend_is_none_or_empty(self) -> None:
for backend in [None, ""]:
with self.subTest(backend=backend):
self._backend = backend # type: ignore[assignment]
with self.assertRaisesRegex(
ValueError,
r"^The rendezvous backend name must be a non-empty string.$",
):
self._create_params()
def test_init_raises_error_if_min_nodes_is_less_than_1(self) -> None:
for min_nodes in [0, -1, -5]:
with self.subTest(min_nodes=min_nodes):
self._min_nodes = min_nodes
with self.assertRaisesRegex(
ValueError,
rf"^The minimum number of rendezvous nodes \({min_nodes}\) must be greater "
rf"than zero.$",
):
self._create_params()
def test_init_raises_error_if_max_nodes_is_less_than_min_nodes(self) -> None:
for max_nodes in [2, 1, -2]:
with self.subTest(max_nodes=max_nodes):
self._max_nodes = max_nodes
with self.assertRaisesRegex(
ValueError,
rf"^The maximum number of rendezvous nodes \({max_nodes}\) must be greater "
"than or equal to the minimum number of rendezvous nodes "
rf"\({self._min_nodes}\).$",
):
self._create_params()
def test_get_returns_none_if_key_does_not_exist(self) -> None:
params = self._create_params()
self.assertIsNone(params.get("dummy_param"))
def test_get_returns_default_if_key_does_not_exist(self) -> None:
params = self._create_params()
self.assertEqual(params.get("dummy_param", default="x"), "x")
def test_get_as_bool_returns_none_if_key_does_not_exist(self) -> None:
params = self._create_params()
self.assertIsNone(params.get_as_bool("dummy_param"))
def test_get_as_bool_returns_default_if_key_does_not_exist(self) -> None:
params = self._create_params()
self.assertTrue(params.get_as_bool("dummy_param", default=True))
def test_get_as_bool_returns_true_if_value_represents_true(self) -> None:
for value in ["1", "True", "tRue", "T", "t", "yEs", "Y", 1, True]:
with self.subTest(value=value):
self._kwargs["dummy_param"] = value
params = self._create_params()
self.assertTrue(params.get_as_bool("dummy_param"))
def test_get_as_bool_returns_false_if_value_represents_false(self) -> None:
for value in ["0", "False", "faLse", "F", "f", "nO", "N", 0, False]:
with self.subTest(value=value):
self._kwargs["dummy_param"] = value
params = self._create_params()
self.assertFalse(params.get_as_bool("dummy_param"))
def test_get_as_bool_raises_error_if_value_is_invalid(self) -> None:
for value in [
"01",
"Flse", # codespell:ignore
"Ture", # codespell:ignore
"g",
"4",
"_",
"truefalse",
2,
-1,
]:
with self.subTest(value=value):
self._kwargs["dummy_param"] = value
params = self._create_params()
with self.assertRaisesRegex(
ValueError,
r"^The rendezvous configuration option 'dummy_param' does not represent a "
r"valid boolean value.$",
):
params.get_as_bool("dummy_param")
def test_get_as_int_returns_none_if_key_does_not_exist(self) -> None:
params = self._create_params()
self.assertIsNone(params.get_as_int("dummy_param"))
def test_get_as_int_returns_default_if_key_does_not_exist(self) -> None:
params = self._create_params()
self.assertEqual(params.get_as_int("dummy_param", default=5), 5)
def test_get_as_int_returns_integer_if_value_represents_integer(self) -> None:
for value in ["0", "-10", "5", " 4", "4 ", " 4 ", 0, -4, 3]:
with self.subTest(value=value):
self._kwargs["dummy_param"] = value
params = self._create_params()
self.assertEqual(
params.get_as_int("dummy_param"), int(cast(SupportsInt, value))
)
def test_get_as_int_raises_error_if_value_is_invalid(self) -> None:
for value in ["a", "0a", "3b", "abc"]:
with self.subTest(value=value):
self._kwargs["dummy_param"] = value
params = self._create_params()
with self.assertRaisesRegex(
ValueError,
r"^The rendezvous configuration option 'dummy_param' does not represent a "
r"valid integer value.$",
):
params.get_as_int("dummy_param")
|
RendezvousParametersTest
|
python
|
ray-project__ray
|
python/ray/train/tests/test_trainer_restore.py
|
{
"start": 1543,
"end": 12494
}
|
class ____(Callback):
"""Inject failure at the configured iteration number."""
def __init__(self, fail_marker_path: Path, num_iters: int = 2):
self.num_iters = num_iters
self.fail_marker_path = fail_marker_path
def on_trial_result(
self, iteration: int, trials: List[Trial], trial: Trial, result: Dict, **info
):
if not self.fail_marker_path.exists():
return
if trial.last_result.get("training_iteration", -1) >= self.num_iters:
print(f"Failing after {self.num_iters} iters...")
self.fail_marker_path.unlink()
raise _TestSpecificError
def test_data_parallel_trainer_restore(ray_start_4_cpus, tmpdir):
"""Restoring a DataParallelTrainer with object refs captured in the train fn
or config works by re-specifying them.
Success criteria:
- Restored to the correct iteration. (1 iteration before crash, 1 after restore).
- Results are being logged to the same directory as before.
"""
dataset_size = 10
num_workers = 2
def create_train_fn_and_config():
obj_ref = ray.put({"test": 1})
def train_fn(config):
assert ray.get(obj_ref)["test"] == 1
assert ray.get(config["obj_ref"])["test"] == 1
ds = train.get_dataset_shard("train")
assert (
sum([len(batch["feature"]) for batch in ds.iter_batches()])
== dataset_size // num_workers
)
_failing_train_fn(config)
train_loop_config = {"obj_ref": obj_ref}
return train_fn, train_loop_config
datasets = {"train": ray.data.from_items([{"feature": i} for i in range(10)])}
train_fn, train_loop_config = create_train_fn_and_config()
trainer = DataParallelTrainer(
train_loop_per_worker=train_fn,
train_loop_config=train_loop_config,
datasets=datasets,
scaling_config=ScalingConfig(num_workers=num_workers),
run_config=RunConfig(
name="data_parallel_restore_test",
storage_path=str(tmpdir),
checkpoint_config=CheckpointConfig(num_to_keep=1),
),
)
with pytest.raises(TrainingFailedError) as exc_info:
result = trainer.fit()
assert isinstance(exc_info.value.__cause__, _TestSpecificError)
# Include an explicit cluster shutdown.
# Otherwise, the previously registered object references will still exist,
# and the test may trivially pass.
ray.shutdown()
ray.init(num_cpus=4)
train_fn, train_loop_config = create_train_fn_and_config()
datasets = {"train": ray.data.from_items([{"feature": i} for i in range(10)])}
trainer = DataParallelTrainer.restore(
str(tmpdir / "data_parallel_restore_test"),
train_loop_per_worker=train_fn,
train_loop_config=train_loop_config,
datasets=datasets,
)
result = trainer.fit()
assert not result.error
assert result.metrics["training_iteration"] == 2
assert result.metrics["iterations_since_restore"] == 1
assert tmpdir / "data_parallel_restore_test" in Path(result.path).parents
@pytest.mark.parametrize("trainer_cls", [XGBoostTrainer, LightGBMTrainer])
def test_gbdt_trainer_restore(ray_start_6_cpus, tmp_path, trainer_cls, monkeypatch):
"""Tests restoring gradient boosted decision tree trainers.
Success criteria:
- Picks up at the right iteration. 2 before crash. 3 after. 5 total trees.
- Results are being logged to the same directory as before.
"""
monkeypatch.setenv("TUNE_GLOBAL_CHECKPOINT_S", "0")
exp_name = f"{trainer_cls.__name__}_restore_test"
datasets = {
"train": ray.data.from_items([{"x": x, "y": x + 1} for x in range(100)])
}
fail_marker_path = tmp_path / "fail_marker"
fail_marker_path.touch()
trainer = trainer_cls(
label_column="y",
params={
"objective": (
"reg:squarederror" if trainer_cls == XGBoostTrainer else "regression"
)
},
datasets=datasets,
scaling_config=ScalingConfig(
num_workers=2, trainer_resources={"CPU": 0}, resources_per_worker={"CPU": 1}
),
run_config=RunConfig(
storage_path=str(tmp_path),
name=exp_name,
checkpoint_config=CheckpointConfig(
num_to_keep=1, checkpoint_frequency=1, checkpoint_at_end=False
),
callbacks=[FailureInjectionCallback(fail_marker_path, num_iters=2)],
),
num_boost_round=5,
)
with pytest.raises(TrainingFailedError):
result = trainer.fit()
trainer = trainer_cls.restore(str(tmp_path / exp_name), datasets=datasets)
result = trainer.fit()
assert not result.error
assert result.metrics["training_iteration"] == 5
assert result.metrics["iterations_since_restore"] == 3
assert tmp_path / exp_name in Path(result.path).parents
@pytest.mark.parametrize("name", [None, "restore_from_uri"])
def test_restore_from_uri_s3(
ray_start_4_cpus, tmp_path, monkeypatch, mock_s3_bucket_uri, name
):
"""Restoration from S3 should work."""
trainer = DataParallelTrainer(
train_loop_per_worker=lambda config: train.report({"score": 1}),
scaling_config=ScalingConfig(num_workers=2),
run_config=RunConfig(name=name, storage_path=mock_s3_bucket_uri),
)
result = trainer.fit()
if name is None:
name = Path(result.path).parent.name
# Restore from S3
assert DataParallelTrainer.can_restore(str(URI(mock_s3_bucket_uri) / name))
DataParallelTrainer.restore(str(URI(mock_s3_bucket_uri) / name))
def test_restore_with_datasets(ray_start_4_cpus, tmpdir):
"""Datasets are required to re-specify if they were originally provided."""
datasets = {
"train": ray.data.from_items([{"x": x, "y": x + 1} for x in range(8)]),
"valid": ray.data.from_items([{"x": x, "y": x + 1} for x in range(8)]),
}
trainer = DataParallelTrainer(
train_loop_per_worker=lambda config: train.report({"score": 1}),
datasets=datasets,
scaling_config=ScalingConfig(num_workers=2),
run_config=RunConfig(name="datasets_respecify_test"),
)
trainer._save(pyarrow.fs.LocalFileSystem(), str(tmpdir))
# Restore should complain, if all the datasets don't get passed in again
with pytest.raises(ValueError):
DataParallelTrainer.restore(str(tmpdir))
with pytest.raises(ValueError):
DataParallelTrainer.restore(str(tmpdir), datasets={"train": datasets["train"]})
with pytest.raises(ValueError):
DataParallelTrainer.restore(
str(tmpdir),
datasets={"train": datasets["train"], "invalid_key": datasets["valid"]},
)
trainer = DataParallelTrainer.restore(str(tmpdir), datasets=datasets)
def test_restore_from_invalid_dir(tmpdir):
"""Should raise an error if the restore directory doesn't exist or is invalid."""
with pytest.raises(ValueError):
BaseTrainer.restore(str(tmpdir))
with pytest.raises(ValueError):
BaseTrainer.restore("mock:///not/found")
def test_trainer_can_restore_utility(tmp_path):
"""Make sure that `can_restore` detects an existing experiment at a
local/remote path and only returns True if it's at the Train experiment dir root.
"""
name = "exp_name"
path = tmp_path / name
assert not DataParallelTrainer.can_restore(path)
trainer = DataParallelTrainer(
train_loop_per_worker=lambda config: train.report({"score": 1}),
scaling_config=ScalingConfig(num_workers=1),
)
(tmp_path / name).mkdir(exist_ok=True)
trainer._save(pyarrow.fs.LocalFileSystem(), str(tmp_path / name))
assert DataParallelTrainer.can_restore(path)
@pytest.mark.parametrize("eventual_success", [True, False])
def test_retry_with_max_failures(ray_start_4_cpus, eventual_success):
"""Test auto-resume of a Train run when setting max_failures > 0."""
num_failures = 2 if eventual_success else 3
max_retries = 2
final_iter = 10
def train_func():
ckpt = train.get_checkpoint()
itr = 1
restore_count = 0
if ckpt:
ckpt = load_dict_checkpoint(ckpt)
itr = ckpt["iter"] + 1
restore_count = ckpt["restore_count"] + 1
for i in range(itr, final_iter + 1):
with create_dict_checkpoint(
dict(iter=i, restore_count=restore_count)
) as checkpoint:
train.report(dict(test=i, training_iteration=i), checkpoint=checkpoint)
if restore_count < num_failures:
raise RuntimeError("try to fail me")
trainer = DataParallelTrainer(
train_func,
scaling_config=ScalingConfig(num_workers=2),
run_config=RunConfig(
failure_config=train.FailureConfig(max_failures=max_retries)
),
)
if not eventual_success:
# If we gave up due to hitting our max retry attempts,
# then `trainer.fit` should raise the last error we encountered.
with pytest.raises(TrainingFailedError):
trainer.fit()
else:
# If we encounter errors but eventually succeed, `trainer.fit` should NOT
# raise any of those errors.
result = trainer.fit()
assert not result.error
checkpoint = load_dict_checkpoint(result.checkpoint)
assert checkpoint["iter"] == final_iter
def test_restoration_after_termination(tmp_path):
"""Test that the train loop can be run again if restoring the trainer
after the run finished running successfully."""
def train_func_per_worker(config, num_epochs=5):
ckpt = train.get_checkpoint()
start_iter = 1
if ckpt:
ckpt = load_dict_checkpoint(ckpt)
start_iter = ckpt["iter"] + 1
for i in range(start_iter, num_epochs + 1):
with create_dict_checkpoint(dict(iter=i)) as checkpoint:
train.report(dict(iter=i), checkpoint=checkpoint)
name = "exp_name"
path = tmp_path / name
trainer = DataParallelTrainer(
train_loop_per_worker=train_func_per_worker,
scaling_config=ScalingConfig(num_workers=1),
run_config=RunConfig(
name=name,
storage_path=tmp_path,
checkpoint_config=CheckpointConfig(num_to_keep=2),
),
)
result = trainer.fit()
assert result.metrics["iter"] == 5
restored_trainer = DataParallelTrainer.restore(
str(path), train_loop_per_worker=partial(train_func_per_worker, num_epochs=10)
)
new_result = restored_trainer.fit()
assert new_result.metrics["iter"] == 10
assert new_result.path == result.path
assert len(list(Path(new_result.path).glob("checkpoint*"))) == 2
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-x", __file__]))
|
FailureInjectionCallback
|
python
|
astropy__astropy
|
astropy/cosmology/_src/traits/scale_factor.py
|
{
"start": 298,
"end": 1635
}
|
class ____:
"""The trait for computing the cosmological scale factor.
The scale factor is defined as :math:`a = a_0 / (1 + z)`.
"""
@property
def scale_factor0(self) -> u.Quantity:
r"""Scale factor at redshift 0.
The scale factor is defined as :math:`a = a_0 / (1 + z)`. The common convention
is to set :math:`a_0 = 1`. However, in some cases, like in some old CMB papers,
:math:`a_0` is used to normalize `a` to be a convenient number at the redshift
of interest for that paper. Explicitly using :math:`a_0` in both calculation and
code avoids ambiguity.
"""
return 1 << u.one
def scale_factor(self, z: u.Quantity | ArrayLike, /) -> u.Quantity:
"""Compute the scale factor at redshift ``z``.
The scale factor is defined as :math:`a = a_0 / (1 + z)`.
Parameters
----------
z : Quantity-like ['redshift'] | array-like
Input redshift.
.. versionchanged:: 7.0
Passing z as a keyword argument is deprecated.
.. versionchanged:: 8.0
z must be a positional argument.
Returns
-------
|Quantity|
Scale factor at each input redshift.
"""
return self.scale_factor0 / (aszarr(z) + 1)
|
ScaleFactor
|
python
|
jina-ai__jina
|
tests/integration/dynamic_batching/test_dynamic_batching_config.py
|
{
"start": 160,
"end": 1315
}
|
class ____(Executor):
@requests(on=['/cat', '/kitten'])
@dynamic_batching(preferred_batch_size=4, timeout=2000)
def cat_fun(self, docs, **kwargs):
return DocumentArray([Document(text='cat')])
@requests()
@dynamic_batching(preferred_batch_size=10, timeout=10000)
def default_fun(self, docs, **kwargs):
return DocumentArray([Document(text='bar')])
def test_save_dynamic_batching_config(tmpdir):
TMPDIR: Path = Path(tmpdir)
f = Flow(port=12345).add(
uses=MyExecutor,
name='exec0',
uses_dynamic_batching={'/foo': {'preferred_batch_size': 2, 'timeout': 4000}},
)
f.save_config(str(TMPDIR / 'flow0.yaml'))
f1 = Flow.load_config(str(TMPDIR / 'flow0.yaml'))
assert (
f._deployment_nodes['exec0'].args.uses_dynamic_batching
== f1._deployment_nodes['exec0'].args.uses_dynamic_batching
)
def test_load_dynamic_batching_config():
f = Flow.load_config(os.path.join(cur_dir, 'flow-dynamic-batching.yaml'))
assert f._deployment_nodes['exec0'].args.uses_dynamic_batching == {
'/foo': {'preferred_batch_size': 2, 'timeout': 4000}
}
|
MyExecutor
|
python
|
docker__docker-py
|
tests/unit/dockertypes_test.py
|
{
"start": 10877,
"end": 11848
}
|
class ____(unittest.TestCase):
def test_create_host_config_dict_logconfig(self):
dct = {'type': LogConfig.types.SYSLOG, 'config': {'key1': 'val1'}}
config = create_host_config(
version=DEFAULT_DOCKER_API_VERSION, log_config=dct
)
assert 'LogConfig' in config
assert isinstance(config['LogConfig'], LogConfig)
assert dct['type'] == config['LogConfig'].type
def test_create_host_config_obj_logconfig(self):
obj = LogConfig(type=LogConfig.types.SYSLOG, config={'key1': 'val1'})
config = create_host_config(
version=DEFAULT_DOCKER_API_VERSION, log_config=obj
)
assert 'LogConfig' in config
assert isinstance(config['LogConfig'], LogConfig)
assert obj == config['LogConfig']
def test_logconfig_invalid_config_type(self):
with pytest.raises(ValueError):
LogConfig(type=LogConfig.types.JSON, config='helloworld')
|
LogConfigTest
|
python
|
tensorflow__tensorflow
|
tensorflow/python/saved_model/tracing_utils_test.py
|
{
"start": 1063,
"end": 1448
}
|
class ____(base.Trackable):
def __init__(self):
self.a = variables.Variable(0)
self.b = variables.Variable(1)
def _serialize_to_tensors(self):
return {"a": self.a, "b": self.b}
def _restore_from_tensors(self, restored_tensors):
return control_flow_ops.group(
self.a.assign(restored_tensors["a"]),
self.b.assign(restored_tensors["b"]))
|
MyTrackable
|
python
|
getsentry__sentry
|
src/sentry/issues/endpoints/group_hashes.py
|
{
"start": 1006,
"end": 5557
}
|
class ____(GroupEndpoint):
publish_status = {
"PUT": ApiPublishStatus.PRIVATE,
"GET": ApiPublishStatus.PRIVATE,
}
def get(self, request: Request, group: Group) -> Response:
"""
List an Issue's Hashes
``````````````````````
This endpoint lists an issue's hashes, which are the generated
checksums used to aggregate individual events.
:pparam string issue_id: the ID of the issue to retrieve.
:pparam bool full: If this is set to true, the event payload will include the full event body, including the stacktrace.
:auth: required
"""
full = request.GET.get("full", True)
data_fn = partial(
lambda *args, **kwargs: raw_query(*args, **kwargs)["data"],
aggregations=[
("argMax(event_id, timestamp)", None, "event_id"),
("max", "timestamp", "latest_event_timestamp"),
],
filter_keys={"project_id": [group.project_id], "group_id": [group.id]},
groupby=["primary_hash"],
referrer="api.group-hashes",
orderby=["-latest_event_timestamp"],
tenant_ids={"organization_id": group.project.organization_id},
)
handle_results = partial(
self.__handle_results, group.project_id, group.id, request.user, full
)
return self.paginate(
request=request,
on_results=handle_results,
paginator=GenericOffsetPaginator(data_fn=data_fn),
)
def put(self, request: Request, group: Group) -> Response:
"""
Perform an unmerge by reassigning events with hash values corresponding to the given
grouphash ids from being part of the given group to being part of a new group.
Note that if multiple grouphash ids are given, all their corresponding events will end up in
a single new group together, rather than each hash's events ending in their own new group.
"""
grouphash_ids = request.GET.getlist("id")
if not grouphash_ids:
return Response()
grouphashes = list(
GroupHash.objects.filter(
project_id=group.project_id, group=group.id, hash__in=grouphash_ids
)
.exclude(state=GroupHash.State.LOCKED_IN_MIGRATION)
.values_list("hash", flat=True)
)
if not grouphashes:
return Response({"detail": "Already being unmerged"}, status=409)
metrics.incr(
"grouping.unmerge_issues",
sample_rate=1.0,
# We assume that if someone's merged groups, they were all from the same platform
tags={"platform": group.platform or "unknown", "sdk": group.sdk or "unknown"},
)
unmerge.delay(
group.project_id, group.id, None, grouphashes, request.user.id if request.user else None
)
return Response(status=202)
def __handle_results(
self,
project_id: int,
group_id: int,
user: User | RpcUser | AnonymousUser | None,
full: str | bool,
results: Sequence[dict[str, str]],
) -> list[GroupHashesResult]:
primary_hashes = [result["primary_hash"] for result in results]
grouphashes = {
grouphash.hash: grouphash
for grouphash in GroupHash.objects.filter(
project_id=project_id, group_id=group_id, hash__in=primary_hashes
).select_related("_metadata")
}
return [
self.__handle_result(
user, project_id, group_id, full, result, grouphashes.get(result["primary_hash"])
)
for result in results
]
def __handle_result(
self,
user: User | RpcUser | AnonymousUser | None,
project_id: int,
group_id: int,
full: str | bool,
result: dict[str, str],
grouphash: GroupHash | None = None,
) -> GroupHashesResult:
event = eventstore.backend.get_event_by_id(project_id, result["event_id"])
merged_by_seer = bool(
grouphash and grouphash.metadata and grouphash.metadata.seer_matched_grouphash
)
serializer = EventSerializer if full else SimpleEventSerializer
response: GroupHashesResult = {
"id": result["primary_hash"],
"latestEvent": serialize(event, user, serializer()),
"mergedBySeer": merged_by_seer,
}
return response
|
GroupHashesEndpoint
|
python
|
huggingface__transformers
|
src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py
|
{
"start": 77460,
"end": 85025
}
|
class ____(SeamlessM4Tv2PreTrainedModel):
def __init__(
self,
config: SeamlessM4Tv2Config,
embed_tokens: Optional[nn.Embedding] = None,
):
r"""
embed_tokens (`nn.Embedding`, *optional*):
Input embedding
"""
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.max_target_positions = config.max_position_embeddings
embed_scale = math.sqrt(config.hidden_size) if config.scale_embedding else 1.0
if embed_tokens is not None:
# if embed_tokens defined, use its shape instead
self.embed_tokens = SeamlessM4Tv2ScaledWordEmbedding(
embed_tokens.num_embeddings, embed_tokens.embedding_dim, self.padding_idx, embed_scale=embed_scale
)
self.embed_tokens.weight = embed_tokens.weight
else:
self.embed_tokens = SeamlessM4Tv2ScaledWordEmbedding(
self.vocab_size, config.hidden_size, self.padding_idx, embed_scale=embed_scale
)
self.embed_positions = SeamlessM4Tv2SinusoidalPositionalEmbedding(
self.max_target_positions,
config.hidden_size,
padding_idx=self.padding_idx,
)
layers = []
for i in range(config.decoder_layers):
layers.append(
SeamlessM4Tv2DecoderLayer(
config,
decoder_attention_heads=config.decoder_attention_heads,
decoder_ffn_dim=config.decoder_ffn_dim,
layer_idx=i,
)
)
self.layers = nn.ModuleList(layers)
self.layer_norm = nn.LayerNorm(config.hidden_size)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
input = input_ids
input_shape = input.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
input = inputs_embeds[:, :, -1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing`. Setting `use_cache=False`..."
)
use_cache = False
# initialize `past_key_values`
if use_cache and past_key_values is None:
past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config))
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
attention_mask = _prepare_4d_causal_attention_mask(
attention_mask, input_shape, inputs_embeds, past_key_values_length
)
# expand encoder attention mask
if encoder_hidden_states is not None and encoder_attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
encoder_attention_mask = _prepare_4d_attention_mask(
encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
)
# embed positions
positions = self.embed_positions(input, past_key_values_length=past_key_values_length)
hidden_states = inputs_embeds + positions.to(inputs_embeds.device)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://huggingface.co/papers/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (hidden_states,)
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop:
continue
layer_outputs = decoder_layer(
hidden_states,
attention_mask,
encoder_hidden_states, # as a positional argument for gradient checkpointing
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
output_attentions=output_attentions,
use_cache=use_cache,
cache_position=cache_position,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
hidden_states = self.layer_norm(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
if not return_dict:
return tuple(
v
for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns, all_cross_attentions]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
)
@auto_docstring(
custom_intro="""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`SeamlessM4Tv2DecoderLayer`].
"""
)
|
SeamlessM4Tv2Decoder
|
python
|
HypothesisWorks__hypothesis
|
hypothesis-python/src/hypothesis/strategies/_internal/strings.py
|
{
"start": 13823,
"end": 14655
}
|
class ____(SearchStrategy):
def __init__(self, min_size: int, max_size: int | None):
super().__init__()
self.min_size = min_size
self.max_size = (
max_size if max_size is not None else COLLECTION_DEFAULT_MAX_SIZE
)
def do_draw(self, data: ConjectureData) -> bytes:
return data.draw_bytes(self.min_size, self.max_size)
_nonempty_filters = (
*ListStrategy._nonempty_filters,
bytes,
*(getattr(bytes, n) for n in _nonempty_names),
)
_nonempty_and_content_filters = (
*(getattr(bytes, n) for n in _nonempty_and_content_names),
)
def filter(self, condition):
if (new := _string_filter_rewrite(self, bytes, condition)) is not None:
return new
return ListStrategy.filter(self, condition)
|
BytesStrategy
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.