language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | django__django | tests/gis_tests/distapp/tests.py | {
"start": 15601,
"end": 31346
} | class ____(FuncTestMixin, TestCase):
fixtures = ["initial"]
@skipUnlessDBFeature("has_Area_function")
def test_area(self):
# Reference queries:
# SELECT ST_Area(poly) FROM distapp_southtexaszipcode;
area_sq_m = [
5437908.90234375,
10183031.4389648,
11254471.0073242,
9881708.91772461,
]
# Tolerance has to be lower for Oracle
tol = 2
for i, z in enumerate(
SouthTexasZipcode.objects.annotate(area=Area("poly")).order_by("name")
):
self.assertAlmostEqual(area_sq_m[i], z.area.sq_m, tol)
@skipUnlessDBFeature("has_Distance_function")
def test_distance_simple(self):
"""
Test a simple distance query, with projected coordinates and without
transformation.
"""
lagrange = GEOSGeometry("POINT(805066.295722839 4231496.29461335)", 32140)
houston = (
SouthTexasCity.objects.annotate(dist=Distance("point", lagrange))
.order_by("id")
.first()
)
tol = 2 if connection.ops.oracle else 5
self.assertAlmostEqual(houston.dist.m, 147075.069813, tol)
@skipUnlessDBFeature("has_Distance_function", "has_Transform_function")
def test_distance_projected(self):
"""
Test the `Distance` function on projected coordinate systems.
"""
# The point for La Grange, TX
lagrange = GEOSGeometry("POINT(-96.876369 29.905320)", 4326)
# Reference distances in feet and in meters. Got these values from
# using the provided raw SQL statements.
# SELECT ST_Distance(
# point,
# ST_Transform(
# ST_GeomFromText('POINT(-96.876369 29.905320)', 4326),
# 32140
# )
# )
# FROM distapp_southtexascity;
m_distances = [
147075.069813,
139630.198056,
140888.552826,
138809.684197,
158309.246259,
212183.594374,
70870.188967,
165337.758878,
139196.085105,
]
# SELECT ST_Distance(
# point,
# ST_Transform(
# ST_GeomFromText('POINT(-96.876369 29.905320)', 4326),
# 2278
# )
# )
# FROM distapp_southtexascityft;
ft_distances = [
482528.79154625,
458103.408123001,
462231.860397575,
455411.438904354,
519386.252102563,
696139.009211594,
232513.278304279,
542445.630586414,
456679.155883207,
]
# Testing using different variations of parameters and using models
# with different projected coordinate systems.
dist1 = SouthTexasCity.objects.annotate(
distance=Distance("point", lagrange)
).order_by("id")
dist2 = SouthTexasCityFt.objects.annotate(
distance=Distance("point", lagrange)
).order_by("id")
dist_qs = [dist1, dist2]
# Ensuring expected distances are returned for each distance queryset.
for qs in dist_qs:
for i, c in enumerate(qs):
with self.subTest(c=c):
self.assertAlmostEqual(m_distances[i], c.distance.m, -1)
self.assertAlmostEqual(ft_distances[i], c.distance.survey_ft, -1)
@skipUnlessDBFeature("has_Distance_function", "supports_distance_geodetic")
def test_distance_geodetic(self):
"""
Test the `Distance` function on geodetic coordinate systems.
"""
# Testing geodetic distance calculation with a non-point geometry
# (a LineString of Wollongong and Shellharbour coords).
ls = LineString(((150.902, -34.4245), (150.87, -34.5789)), srid=4326)
# Reference query:
# SELECT ST_distance_sphere(
# point,
# ST_GeomFromText(
# 'LINESTRING(150.9020 -34.4245,150.8700 -34.5789)',
# 4326
# )
# )
# FROM distapp_australiacity ORDER BY name;
distances = [
1120954.92533513,
140575.720018241,
640396.662906304,
60580.9693849269,
972807.955955075,
568451.8357838,
40435.4335201384,
0,
68272.3896586844,
12375.0643697706,
0,
]
qs = AustraliaCity.objects.annotate(distance=Distance("point", ls)).order_by(
"name"
)
for city, distance in zip(qs, distances):
with self.subTest(city=city, distance=distance):
# Testing equivalence to within a meter (kilometer on
# SpatiaLite).
tol = -3 if connection.ops.spatialite else 0
self.assertAlmostEqual(distance, city.distance.m, tol)
@skipUnlessDBFeature("has_Distance_function", "supports_distance_geodetic")
def test_distance_geodetic_spheroid(self):
tol = 2 if connection.ops.oracle else 4
# Got the reference distances using the raw SQL statements:
# SELECT ST_distance_spheroid(
# point,
# ST_GeomFromText('POINT(151.231341 -33.952685)', 4326),
# 'SPHEROID["WGS 84",6378137.0,298.257223563]'
# )
# FROM distapp_australiacity WHERE (NOT (id = 11));
# SELECT ST_distance_sphere(
# point,
# ST_GeomFromText('POINT(151.231341 -33.952685)', 4326)
# )
# FROM distapp_australiacity
# WHERE (NOT (id = 11)); st_distance_sphere
spheroid_distances = [
60504.0628957201,
77023.9489850262,
49154.8867574404,
90847.4358768573,
217402.811919332,
709599.234564757,
640011.483550888,
7772.00667991925,
1047861.78619339,
1165126.55236034,
]
sphere_distances = [
60580.9693849267,
77144.0435286473,
49199.4415344719,
90804.7533823494,
217713.384600405,
709134.127242793,
639828.157159169,
7786.82949717788,
1049204.06569028,
1162623.7238134,
]
# Testing with spheroid distances first.
hillsdale = AustraliaCity.objects.get(name="Hillsdale")
qs = (
AustraliaCity.objects.exclude(id=hillsdale.id)
.annotate(distance=Distance("point", hillsdale.point, spheroid=True))
.order_by("id")
)
for i, c in enumerate(qs):
with self.subTest(c=c):
self.assertAlmostEqual(spheroid_distances[i], c.distance.m, tol)
if connection.ops.postgis or connection.ops.spatialite:
# PostGIS uses sphere-only distances by default, testing these as
# well.
qs = (
AustraliaCity.objects.exclude(id=hillsdale.id)
.annotate(distance=Distance("point", hillsdale.point))
.order_by("id")
)
for i, c in enumerate(qs):
with self.subTest(c=c):
self.assertAlmostEqual(sphere_distances[i], c.distance.m, tol)
@skipIfDBFeature("supports_distance_geodetic")
@skipUnlessDBFeature("has_Distance_function")
def test_distance_function_raw_result(self):
distance = (
Interstate.objects.annotate(
d=Distance(Point(0, 0, srid=4326), Point(0, 1, srid=4326)),
)
.first()
.d
)
self.assertEqual(distance, 1)
@skipUnlessDBFeature("has_Distance_function")
def test_distance_function_d_lookup(self):
qs = Interstate.objects.annotate(
d=Distance(Point(0, 0, srid=3857), Point(0, 1, srid=3857)),
).filter(d=D(m=1))
self.assertTrue(qs.exists())
@skipUnlessDBFeature("supports_tolerance_parameter")
def test_distance_function_tolerance_escaping(self):
qs = (
Interstate.objects.annotate(
d=Distance(
Point(500, 500, srid=3857),
Point(0, 0, srid=3857),
tolerance="0.05) = 1 OR 1=1 OR (1+1",
),
)
.filter(d=D(m=1))
.values("pk")
)
msg = "The tolerance parameter has the wrong type"
with self.assertRaisesMessage(TypeError, msg):
qs.exists()
@skipUnlessDBFeature("supports_tolerance_parameter")
def test_distance_function_tolerance(self):
# Tolerance is greater than distance.
qs = (
Interstate.objects.annotate(
d=Distance(
Point(0, 0, srid=3857),
Point(1, 1, srid=3857),
tolerance=1.5,
),
)
.filter(d=0)
.values("pk")
)
self.assertIs(qs.exists(), True)
@skipIfDBFeature("supports_distance_geodetic")
@skipUnlessDBFeature("has_Distance_function")
def test_distance_function_raw_result_d_lookup(self):
qs = Interstate.objects.annotate(
d=Distance(Point(0, 0, srid=4326), Point(0, 1, srid=4326)),
).filter(d=D(m=1))
msg = "Distance measure is supplied, but units are unknown for result."
with self.assertRaisesMessage(ValueError, msg):
list(qs)
@skipUnlessDBFeature("has_Distance_function", "has_Transform_function")
def test_distance_transform(self):
"""
Test the `Distance` function used with `Transform` on a geographic
field.
"""
# We'll be using a Polygon (created by buffering the centroid
# of 77005 to 100m) -- which aren't allowed in geographic distance
# queries normally, however our field has been transformed to
# a non-geographic system.
z = SouthTexasZipcode.objects.get(name="77005")
# Reference query:
# SELECT ST_Distance(
# ST_Transform("distapp_censuszipcode"."poly", 32140),
# ST_GeomFromText('<buffer_wkt>', 32140))
# FROM "distapp_censuszipcode";
dists_m = [3553.30384972258, 1243.18391525602, 2186.15439472242]
# Having our buffer in the SRID of the transformation and of the field
# -- should get the same results. The first buffer has no need for
# transformation SQL because it is the same SRID as what was given
# to `transform()`. The second buffer will need to be transformed,
# however.
buf1 = z.poly.centroid.buffer(100)
buf2 = buf1.transform(4269, clone=True)
ref_zips = ["77002", "77025", "77401"]
for buf in [buf1, buf2]:
qs = (
CensusZipcode.objects.exclude(name="77005")
.annotate(distance=Distance(Transform("poly", 32140), buf))
.order_by("name")
)
self.assertEqual(ref_zips, sorted(c.name for c in qs))
for i, z in enumerate(qs):
self.assertAlmostEqual(z.distance.m, dists_m[i], 5)
@skipUnlessDBFeature("has_Distance_function")
def test_distance_order_by(self):
qs = (
SouthTexasCity.objects.annotate(
distance=Distance("point", Point(3, 3, srid=32140))
)
.order_by("distance")
.values_list("name", flat=True)
.filter(name__in=("San Antonio", "Pearland"))
)
self.assertSequenceEqual(qs, ["San Antonio", "Pearland"])
@skipUnlessDBFeature("has_Length_function")
def test_length(self):
"""
Test the `Length` function.
"""
# Reference query (should use `length_spheroid`).
# SELECT ST_length_spheroid(
# ST_GeomFromText('<wkt>', 4326)
# 'SPHEROID["WGS 84",6378137,298.257223563,
# AUTHORITY["EPSG","7030"]]'
# );
len_m1 = 473504.769553813
len_m2 = 4617.668
if connection.features.supports_length_geodetic:
qs = Interstate.objects.annotate(length=Length("path"))
tol = 2 if connection.ops.oracle else 3
self.assertAlmostEqual(len_m1, qs[0].length.m, tol)
# TODO: test with spheroid argument (True and False)
else:
# Does not support geodetic coordinate systems.
with self.assertRaises(NotSupportedError):
list(Interstate.objects.annotate(length=Length("path")))
# Now doing length on a projected coordinate system.
i10 = SouthTexasInterstate.objects.annotate(length=Length("path")).get(
name="I-10"
)
self.assertAlmostEqual(len_m2, i10.length.m, 2)
self.assertTrue(
SouthTexasInterstate.objects.annotate(length=Length("path"))
.filter(length__gt=4000)
.exists()
)
# Length with an explicit geometry value.
qs = Interstate.objects.annotate(length=Length(i10.path))
self.assertAlmostEqual(qs.first().length.m, len_m2, 2)
@skipUnlessDBFeature("has_Perimeter_function")
def test_perimeter(self):
"""
Test the `Perimeter` function.
"""
# Reference query:
# SELECT ST_Perimeter(distapp_southtexaszipcode.poly)
# FROM distapp_southtexaszipcode;
perim_m = [
18404.3550889361,
15627.2108551001,
20632.5588368978,
17094.5996143697,
]
tol = 2 if connection.ops.oracle else 7
qs = SouthTexasZipcode.objects.annotate(perimeter=Perimeter("poly")).order_by(
"name"
)
for i, z in enumerate(qs):
self.assertAlmostEqual(perim_m[i], z.perimeter.m, tol)
# Running on points; should return 0.
qs = SouthTexasCity.objects.annotate(perim=Perimeter("point"))
for city in qs:
self.assertEqual(0, city.perim.m)
@skipUnlessDBFeature("has_Perimeter_function")
def test_perimeter_geodetic(self):
# Currently only Oracle supports calculating the perimeter on geodetic
# geometries (without being transformed).
qs1 = CensusZipcode.objects.annotate(perim=Perimeter("poly"))
if connection.features.supports_perimeter_geodetic:
self.assertAlmostEqual(qs1[0].perim.m, 18406.3818954314, 3)
else:
with self.assertRaises(NotSupportedError):
list(qs1)
# But should work fine when transformed to projected coordinates
qs2 = CensusZipcode.objects.annotate(
perim=Perimeter(Transform("poly", 32140))
).filter(name="77002")
self.assertAlmostEqual(qs2[0].perim.m, 18404.355, 3)
@skipUnlessDBFeature(
"supports_null_geometries", "has_Area_function", "has_Distance_function"
)
def test_measurement_null_fields(self):
"""
Test the measurement functions on fields with NULL values.
"""
# Creating SouthTexasZipcode w/NULL value.
SouthTexasZipcode.objects.create(name="78212")
# Performing distance/area queries against the NULL PolygonField,
# and ensuring the result of the operations is None.
htown = SouthTexasCity.objects.get(name="Downtown Houston")
z = SouthTexasZipcode.objects.annotate(
distance=Distance("poly", htown.point), area=Area("poly")
).get(name="78212")
self.assertIsNone(z.distance)
self.assertIsNone(z.area)
| DistanceFunctionsTests |
python | allegroai__clearml | clearml/backend_api/services/v2_13/queues.py | {
"start": 65078,
"end": 67441
} | class ____(Request):
"""
Moves a task entry one step forward towards the top of the queue.
:param queue: Queue id
:type queue: str
:param task: Task id
:type task: str
:param count: Number of positions in the queue to move the task forward
relative to the current position. Optional, the default value is 1.
:type count: int
"""
_service = "queues"
_action = "move_task_forward"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"count": {
"description": "Number of positions in the queue to move the task forward relative to the current position. Optional, the default value is 1.",
"type": "integer",
},
"queue": {"description": "Queue id", "type": "string"},
"task": {"description": "Task id", "type": "string"},
},
"required": ["queue", "task"],
"type": "object",
}
def __init__(self, queue: str, task: str, count: Optional[int] = None, **kwargs: Any) -> None:
super(MoveTaskForwardRequest, self).__init__(**kwargs)
self.queue = queue
self.task = task
self.count = count
@schema_property("queue")
def queue(self) -> str:
return self._property_queue
@queue.setter
def queue(self, value: str) -> None:
if value is None:
self._property_queue = None
return
self.assert_isinstance(value, "queue", six.string_types)
self._property_queue = value
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("count")
def count(self) -> Optional[int]:
return self._property_count
@count.setter
def count(self, value: Optional[int]) -> None:
if value is None:
self._property_count = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "count", six.integer_types)
self._property_count = value
| MoveTaskForwardRequest |
python | getsentry__sentry | tests/sentry/notifications/api/endpoints/test_user_notification_details.py | {
"start": 1259,
"end": 1818
} | class ____(UserNotificationDetailsTestBase):
method = "put"
def test_saves_and_returns_values(self) -> None:
org = self.create_organization()
self.create_member(user=self.user, organization=org)
data = {
"personalActivityNotifications": True,
"selfAssignOnResolve": True,
}
self.get_success_response("me", **data)
def test_reject_invalid_values(self) -> None:
self.get_error_response("me", status_code=400, **{"personalActivityNotifications": 6})
| UserNotificationDetailsPutTest |
python | getsentry__sentry | src/sentry/models/deletedteam.py | {
"start": 185,
"end": 1055
} | class ____(DeletedEntry):
"""
This model tracks an intent to delete. If an org is marked pending_delete
through the UI, a deletedteam is created to log this deletion.
This model does not account for aborted or failed deletions and is currently
unable to log deletions that occur implicitly (i.e. when the sole parent object
is deleted, the child is also marked for deletion as well).
"""
name = models.CharField(max_length=64, null=True)
slug = models.CharField(max_length=50, null=True)
organization_id = BoundedBigIntegerField(null=True)
organization_name = models.CharField(max_length=64, null=True)
organization_slug = models.CharField(max_length=50, null=True)
class Meta:
app_label = "sentry"
db_table = "sentry_deletedteam"
__repr__ = sane_repr("date_deleted", "slug", "reason")
| DeletedTeam |
python | getsentry__sentry | tests/sentry_plugins/github/test_provider.py | {
"start": 6300,
"end": 9549
} | class ____(TestCase):
@cached_property
def provider(self) -> GitHubAppsRepositoryProvider:
return GitHubAppsRepositoryProvider("github_apps")
@patch.object(
GithubPluginAppsClient,
"get_repositories",
return_value=orjson.loads(INSTALLATION_REPOSITORIES_API_RESPONSE),
)
@patch.object(
GithubPluginClient,
"get_installations",
return_value=orjson.loads(LIST_INSTALLATION_API_RESPONSE),
)
def test_link_auth(self, *args: MagicMock) -> None:
user = self.create_user()
organization = self.create_organization()
self.create_usersocialauth(
user=user, provider="github_apps", extra_data={"access_token": "abcdefg"}
)
integration = self.create_integration(
organization=organization, provider="github_apps", external_id="1"
)
self.provider.link_auth(user, organization, {"integration_id": integration.id})
with assume_test_silo_mode(SiloMode.CONTROL):
assert OrganizationIntegration.objects.filter(
organization_id=organization.id, integration=integration
).exists()
def test_delete_repository(self) -> None:
user = self.create_user()
organization = self.create_organization()
integration = self.create_integration(
organization=organization, provider="github_apps", external_id="1"
)
repo = Repository.objects.create(
name="example-repo",
provider="github_apps",
organization_id=organization.id,
integration_id=integration.id,
)
# just check that it doesn't throw / try to delete a webhook
assert self.provider.delete_repository(repo=repo, actor=user) is None
@patch.object(GithubPluginAppsClient, "get_last_commits", return_value=[])
def test_compare_commits_no_start(self, mock_get_last_commits: MagicMock) -> None:
organization = self.create_organization()
integration = self.create_integration(
organization=organization, provider="github_apps", external_id="1"
)
repo = Repository.objects.create(
name="example-repo",
provider="github_apps",
organization_id=organization.id,
integration_id=integration.id,
config={"name": "example-repo"},
)
self.provider.compare_commits(repo, None, "a" * 40)
assert mock_get_last_commits.called
@patch.object(GithubPluginAppsClient, "compare_commits", return_value={"commits": []})
def test_compare_commits(self, mock_compare_commits: MagicMock) -> None:
organization = self.create_organization()
integration = self.create_integration(
organization=organization, provider="github_apps", external_id="1"
)
repo = Repository.objects.create(
name="example-repo",
provider="github_apps",
organization_id=organization.id,
integration_id=integration.id,
config={"name": "example-repo"},
)
self.provider.compare_commits(repo, "b" * 40, "a" * 40)
assert mock_compare_commits.called
| GitHubAppsProviderTest |
python | pytorch__pytorch | torch/utils/_pytree.py | {
"start": 23760,
"end": 23988
} | class ____(Generic[K, T]):
key: K
def __str__(self) -> str:
return f"[{self.key!r}]"
def get(self, mapping: Mapping[K, T]) -> T:
return mapping[self.key]
@dataclasses.dataclass(frozen=True)
| MappingKey |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI059.py | {
"start": 1585,
"end": 1647
} | class ____(Generic[T], str, metaclass=type): # PYI059
...
| C2 |
python | pandas-dev__pandas | pandas/tests/indexing/test_iloc.py | {
"start": 45367,
"end": 47198
} | class ____:
# NB: this test should work for _any_ Series we can pass as
# series_with_simple_index
def test_iloc_float_raises(self, series_with_simple_index, frame_or_series):
# GH#4892
# float_indexers should raise exceptions
# on appropriate Index types & accessors
# this duplicates the code below
# but is specifically testing for the error
# message
obj = series_with_simple_index
if frame_or_series is DataFrame:
obj = obj.to_frame()
msg = "Cannot index by location index with a non-integer key"
with pytest.raises(TypeError, match=msg):
obj.iloc[3.0]
with pytest.raises(IndexError, match=_slice_iloc_msg):
obj.iloc[3.0] = 0
@pytest.mark.parametrize("has_ref", [True, False])
def test_iloc_getitem_setitem_fancy_exceptions(self, float_frame, has_ref):
with pytest.raises(IndexingError, match="Too many indexers"):
float_frame.iloc[:, :, :]
if has_ref:
view = float_frame[:] # noqa: F841
with pytest.raises(IndexError, match="too many indices for array"):
# GH#32257 we let numpy do validation, get their exception
float_frame.iloc[:, :, :] = 1
def test_iloc_frame_indexer(self):
# GH#39004
df = DataFrame({"a": [1, 2, 3]})
indexer = DataFrame({"a": [True, False, True]})
msg = "DataFrame indexer for .iloc is not supported. Consider using .loc"
with pytest.raises(TypeError, match=msg):
df.iloc[indexer] = 1
msg = (
"DataFrame indexer is not allowed for .iloc\n"
"Consider using .loc for automatic alignment."
)
with pytest.raises(IndexError, match=msg):
df.iloc[indexer]
| TestILocErrors |
python | kamyu104__LeetCode-Solutions | Python/number-of-flowers-in-full-bloom.py | {
"start": 85,
"end": 674
} | class ____(object):
def fullBloomFlowers(self, flowers, persons):
"""
:type flowers: List[List[int]]
:type persons: List[int]
:rtype: List[int]
"""
cnt = collections.Counter()
for s, e in flowers:
cnt[s] += 1
cnt[e+1] -= 1
events = sorted(cnt.iterkeys())
prefix = [0]
for x in events:
prefix.append(prefix[-1]+cnt[x])
return [prefix[bisect.bisect_right(events, t)] for t in persons]
# Time: O(nlogn + mlogn)
# Space: O(n)
import bisect
# binary search
| Solution |
python | Lightning-AI__lightning | tests/tests_pytorch/loops/test_loops.py | {
"start": 45954,
"end": 46213
} | class ____:
def __init__(self, start=0):
self.index = start
def __iter__(self):
for i in range(self.index, len(self)):
self.index = i
yield self.index
def __len__(self):
return 10
| NotStatefulIterable |
python | getsentry__sentry | tests/sentry/models/test_projecttemplate.py | {
"start": 104,
"end": 666
} | class ____(TestCase):
def setUp(self) -> None:
self.org = self.create_organization()
def tearDown(self) -> None:
self.org.delete()
def test_create_simple_project_template(self) -> None:
project_template = ProjectTemplate.objects.create(
name="test_project_template", organization=self.org
)
project_template.save()
project_template.refresh_from_db()
assert project_template.name == "test_project_template"
assert project_template.organization == self.org
| ProjectTemplateTest |
python | Pylons__pyramid | tests/test_scripts/test_pdistreport.py | {
"start": 1930,
"end": 2155
} | class ____:
def __init__(self, name):
self.version = '1'
self.metadata = email.message.Message()
self.metadata['Name'] = name
self.metadata['Summary'] = f'summary for {name=}'
| DummyDistribution |
python | allegroai__clearml | clearml/utilities/pigar/reqs.py | {
"start": 3619,
"end": 19663
} | class ____(object):
def __init__(self, fpath: str, lineno: int) -> None:
self._fpath = fpath
self._lineno = lineno - 1
self._modules = ImportedModules()
self._str_codes = collections.deque()
self._try_imports = set()
def visit_Import(self, node: ast.Import, try_: bool = False) -> None:
"""As we know: `import a [as b]`."""
lineno = node.lineno + self._lineno
for alias in node.names:
self._modules.add(alias.name, self._fpath, lineno)
if try_:
self._try_imports.add(alias.name)
def visit_ImportFrom(self, node: ast.ImportFrom, try_: bool = False) -> None:
"""
As we know: `from a import b [as c]`. If node.level is not 0,
import statement like this `from .a import b`.
"""
mod_name = node.module
level = node.level
if mod_name is None:
level -= 1
mod_name = ""
for alias in node.names:
name = level * "." + mod_name + "." + alias.name
self._modules.add(name, self._fpath, node.lineno + self._lineno)
if try_:
self._try_imports.add(name)
def visit_TryExcept(self, node: ast.Try) -> None:
"""
If modules which imported by `try except` and not found,
maybe them come from other Python version.
"""
for ipt in node.body:
if ipt.__class__.__name__.startswith("Import"):
method = "visit_" + ipt.__class__.__name__
getattr(self, method)(ipt, True)
for handler in node.handlers:
for ipt in handler.body:
if ipt.__class__.__name__.startswith("Import"):
method = "visit_" + ipt.__class__.__name__
getattr(self, method)(ipt, True)
# For Python 3.3+
visit_Try = visit_TryExcept
def visit_Exec(self, node: Any) -> None:
"""
Check `expression` of `exec(expression[, globals[, locals]])`.
**Just available in python 2.**
"""
if hasattr(node.body, "s"):
self._str_codes.append((node.body.s, node.lineno + self._lineno))
# PR#13: https://github.com/damnever/pigar/pull/13
# Sometimes exec statement may be called with tuple in Py2.7.6
elif hasattr(node.body, "elts") and len(node.body.elts) >= 1:
self._str_codes.append((node.body.elts[0].s, node.lineno + self._lineno))
def visit_Expr(self, node: ast.Expr) -> None:
"""
Check `expression` of `eval(expression[, globals[, locals]])`.
Check `expression` of `exec(expression[, globals[, locals]])`
in python 3.
Check `name` of `__import__(name[, globals[, locals[,
fromlist[, level]]]])`.
Check `name` or `package` of `importlib.import_module(name,
package=None)`.
"""
# Built-in functions
value = node.value
if isinstance(value, ast.Call):
if hasattr(value.func, "id"):
if value.func.id == "eval" and hasattr(node.value.args[0], "s"):
self._str_codes.append((node.value.args[0].s, node.lineno + self._lineno))
# **`exec` function in Python 3.**
elif value.func.id == "exec" and hasattr(node.value.args[0], "s"):
self._str_codes.append((node.value.args[0].s, node.lineno + self._lineno))
# `__import__` function.
elif value.func.id == "__import__" and len(node.value.args) > 0 and hasattr(node.value.args[0], "s"):
self._modules.add(node.value.args[0].s, self._fpath, node.lineno + self._lineno)
# `import_module` function.
elif getattr(value.func, "attr", "") == "import_module":
module = getattr(value.func, "value", None)
if module is not None and getattr(module, "id", "") == "importlib":
args = node.value.args
arg_len = len(args)
if arg_len > 0 and hasattr(args[0], "s"):
name = args[0].s
if not name.startswith("."):
self._modules.add(name, self._fpath, node.lineno + self._lineno)
elif arg_len == 2 and hasattr(args[1], "s"):
self._modules.add(args[1].s, self._fpath, node.lineno + self._lineno)
def visit_FunctionDef(self, node: ast.FunctionDef) -> None:
"""
Check docstring of function, if docstring is used for doctest.
"""
docstring = self._parse_docstring(node)
if docstring:
self._str_codes.append((docstring, node.lineno + self._lineno + 2))
def visit_ClassDef(self, node: ast.ClassDef) -> None:
"""
Check docstring of class, if docstring is used for doctest.
"""
docstring = self._parse_docstring(node)
if docstring:
self._str_codes.append((docstring, node.lineno + self._lineno + 2))
def visit(self, node: ast.AST) -> None:
"""Visit a node, no recursively."""
for node in ast.walk(node):
method = "visit_" + node.__class__.__name__
getattr(self, method, lambda x: x)(node)
@staticmethod
def _parse_docstring(node: ast.AST) -> Optional[str]:
"""Extract code from docstring."""
docstring = ast.get_docstring(node)
if docstring:
parser = doctest.DocTestParser()
try:
dt = parser.get_doctest(docstring, {}, None, None, None)
except ValueError:
# >>> 'abc'
pass
else:
examples = dt.examples
return "\n".join([example.source for example in examples])
return None
@property
def modules(self) -> ImportedModules:
return self._modules
@property
def str_codes(self) -> collections.deque:
return self._str_codes
@property
def try_imports(self) -> set:
return set((name.split(".")[0] if name and "." in name else name) for name in self._try_imports)
def _checked_cache(func: Callable[[Any], Any]) -> Callable[[Any], Any]:
checked = dict()
@functools.wraps(func)
def _wrapper(name: str) -> Any:
if name not in checked:
checked[name] = func(name)
return checked[name]
return _wrapper
@_checked_cache
def is_std_or_local_lib(name: str) -> Union[bool, str]:
"""Check whether it is stdlib module.
True if std lib
False if installed package
str if local library
"""
# check if one of the builtin modules first
if name in sys.builtin_module_names:
return True
exist = True
if six.PY2:
import imp # noqa
from types import FileType # noqa
module_info = ("", "", "")
try:
module_info = imp.find_module(name)
except ImportError:
try:
# __import__(name)
importlib.import_module(name)
module_info = imp.find_module(name)
sys.modules.pop(name)
except ImportError:
exist = False
# Testcase: ResourceWarning
if isinstance(module_info[0], FileType):
module_info[0].close() # noqa
mpath = module_info[1] # noqa
# make sure we remove built-in modules
if mpath and not os.path.exists(mpath):
mpath = None
else:
module_info = None
try:
module_info = importlib.util.find_spec(name) # noqa
except ImportError:
return False
except ValueError:
# if we got here, the loader failed on us, meaning this is definitely a module and not std
return False
if not module_info:
if name == "__builtin__":
return True
return False
mpath = module_info.origin
# this is a subpackage
if not mpath and module_info.loader is not None:
return False
# this is std
if mpath == "built-in":
mpath = None
if exist and mpath is not None:
if "site-packages" in mpath or "dist-packages" in mpath or "bin/" in mpath and mpath.endswith(".py"):
exist = False
elif (
(sys.prefix not in mpath)
and (six.PY2 or (sys.base_exec_prefix not in mpath))
and (six.PY2 or (sys.base_prefix not in mpath))
):
exist = mpath
return exist
def get_installed_pkgs_detail() -> Dict[str, Union[Tuple[str, str], Dict[str, Tuple[str, str]]]]:
"""
HACK: bugfix of the original pigar get_installed_pkgs_detail
Get mapping for import top level name
and install package name with version.
"""
mapping = dict()
for path in sys.path:
if os.path.isdir(path) and path.rstrip("/").endswith(("site-packages", "dist-packages")):
new_mapping = _search_path(path)
# BUGFIX:
# override with previous, just like python resolves imports, the first match is the one used.
# unlike the original implementation, where the last one is used.
new_mapping.update(mapping)
mapping = new_mapping
# HACK: prefer tensorflow_gpu over tensorflow
if "tensorflow_gpu" in mapping:
mapping["tensorflow"] = mapping["tensorflow_gpu"]
# HACK: prefer tensorflow_macos over tensorflow
if "tensorflow_macos" in mapping:
p = mapping.pop("tensorflow_macos", None)
if p and isinstance(p, (tuple, list)) and len(p) == 2:
mapping["tensorflow"] = ("tensorflow", p[1])
return mapping
def is_base_module(module_path: str) -> bool:
python_base = "{}python{}.{}".format(os.sep, sys.version_info.major, sys.version_info.minor)
for path in sys.path:
if os.path.isdir(path) and path.rstrip("/").endswith((python_base,)):
if not path[-1] == os.sep:
path += os.sep
if module_path.startswith(path):
return True
return False
def _search_path(path: str) -> dict:
mapping = dict()
for file in os.listdir(path):
# Install from PYPI.
# broken pip packages might start with '~' - ignore it
if str(file).startswith("~"):
continue
if fnmatch.fnmatch(file, "*-info"):
pkg_name, version = file.split("-")[:2]
if version.endswith("dist"):
version = version.rsplit(".", 1)[0]
# Issue for ubuntu: sudo pip install xxx
elif version.endswith("egg"):
version = version.rsplit(".", 1)[0]
mapping_pkg_name = pkg_name
# pep610 support. add support for new pip>=20.1 git reference feature
git_direct_json = os.path.join(path, file, "direct_url.json")
if os.path.isfile(git_direct_json):
# noinspection PyBroadException
try:
with open(git_direct_json, "r") as f:
direct_json = json.load(f)
if "vcs_info" in direct_json:
vcs_info = direct_json["vcs_info"]
git_url = "{vcs}+{url}@{commit}#egg={package}".format(
vcs=vcs_info["vcs"],
url=direct_json["url"],
commit=vcs_info["commit_id"],
package=pkg_name,
)
# If subdirectory is present, append this to the git_url
if "subdirectory" in direct_json:
git_url = "{git_url}&subdirectory={subdirectory}".format(
git_url=git_url,
subdirectory=direct_json["subdirectory"],
)
# Bugfix: package name should be the URL link, because we need it unique
# mapping[pkg_name] = ('-e', git_url)
pkg_name, version = "-e {}".format(git_url), ""
elif "url" in direct_json:
url_link = direct_json.get("url", "").strip().lower()
if url_link and not url_link.startswith("file://"):
git_url = direct_json["url"]
# If subdirectory is present, append this to the git_url
if "subdirectory" in direct_json:
git_url = "{git_url}#subdirectory={subdirectory}".format(
git_url=direct_json["url"],
subdirectory=direct_json["subdirectory"],
)
pkg_name, version = git_url, ""
except Exception:
pass
# default
mapping[mapping_pkg_name] = (pkg_name, version)
# analyze 'top_level.txt' if it exists
top_level = os.path.join(path, file, "top_level.txt")
if not os.path.isfile(top_level):
continue
with open(top_level, "r") as f:
for line in f:
top_package = line.strip()
# NOTICE: this is a namespace package
if top_package and mapping_pkg_name.startswith("{}_".format(top_package)):
top = mapping.get(top_package, dict())
if not isinstance(top, dict):
top = {top_package: top}
top[mapping_pkg_name] = (pkg_name, version)
mapping[top_package] = top
else:
mapping[top_package] = (pkg_name, version)
# Install from local and available in GitHub.
elif fnmatch.fnmatch(file, "*-link"):
link = os.path.join(path, file)
if not os.path.isfile(link):
continue
# Link path.
with open(link, "r") as f:
for line in f:
line = line.strip()
if line != ".":
dev_dir = line
if not dev_dir:
continue
if not os.path.exists(dev_dir):
continue
# Egg info path.
info_dir = [_file for _file in os.listdir(dev_dir) if _file.endswith("egg-info")]
if not info_dir:
continue
info_dir = info_dir[0]
top_level = os.path.join(dev_dir, info_dir, "top_level.txt")
# Check whether it can be imported.
if not os.path.isfile(top_level):
continue
# Check .git dir.
git_path = os.path.join(dev_dir, ".git")
if os.path.isdir(git_path):
config = parse_git_config(git_path)
url = config.get('remote "origin"', {}).get("url")
if not url:
continue
branch = 'branch "master"'
if branch not in config:
for section in config:
if "branch" in section:
branch = section
break
if not branch:
continue
branch = branch.split()[1][1:-1]
pkg_name = info_dir.split(".egg")[0]
git_url = "git+{0}@{1}#egg={2}".format(url, branch, pkg_name)
with open(top_level, "r") as f:
for line in f:
# Bugfix: package name should be the URL link, because we need it unique
# mapping[line.strip()] = ('-e', git_url)
mapping[line.strip()] = ("-e {}".format(git_url), "")
return mapping
| ImportChecker |
python | huggingface__transformers | src/transformers/models/nystromformer/modeling_nystromformer.py | {
"start": 16333,
"end": 17055
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = NystromformerPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=True)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->Nystromformer
| NystromformerLMPredictionHead |
python | protocolbuffers__protobuf | objectivec/DevTools/pddm.py | {
"start": 4365,
"end": 11565
} | class ____(object):
"""Hold a set of macros and can resolve/expand them."""
def __init__(self, a_file=None):
"""Initializes the collection.
Args:
a_file: The file like stream to parse.
Raises:
PDDMError if there are any issues.
"""
self._macros = dict()
if a_file:
self.ParseInput(a_file)
class MacroDefinition(object):
"""Holds a macro definition."""
def __init__(self, name, arg_names):
self._name = name
self._args = tuple(arg_names)
self._body = ''
self._needNewLine = False
def AppendLine(self, line):
if self._needNewLine:
self._body += '\n'
self._body += line
self._needNewLine = not line.endswith('\n')
@property
def name(self):
return self._name
@property
def args(self):
return self._args
@property
def body(self):
return self._body
def ParseInput(self, a_file):
"""Consumes input extracting definitions.
Args:
a_file: The file like stream to parse.
Raises:
PDDMError if there are any issues.
"""
input_lines = a_file.read().splitlines()
self.ParseLines(input_lines)
def ParseLines(self, input_lines):
"""Parses list of lines.
Args:
input_lines: A list of strings of input to parse (no newlines on the
strings).
Raises:
PDDMError if there are any issues.
"""
current_macro = None
for line in input_lines:
if line.startswith('PDDM-'):
directive = line.split(' ', 1)[0]
if directive == 'PDDM-DEFINE':
name, args = self._ParseDefineLine(line)
if self._macros.get(name):
raise PDDMError('Attempt to redefine macro: "%s"' % line)
current_macro = self.MacroDefinition(name, args)
self._macros[name] = current_macro
continue
if directive == 'PDDM-DEFINE-END':
if not current_macro:
raise PDDMError('Got DEFINE-END directive without an active macro:'
' "%s"' % line)
current_macro = None
continue
raise PDDMError('Hit a line with an unknown directive: "%s"' % line)
if current_macro:
current_macro.AppendLine(line)
continue
# Allow blank lines between macro definitions.
if line.strip() == '':
continue
raise PDDMError('Hit a line that wasn\'t a directive and no open macro'
' definition: "%s"' % line)
def _ParseDefineLine(self, input_line):
assert input_line.startswith('PDDM-DEFINE')
line = input_line[12:].strip()
match = _MACRO_RE.match(line)
# Must match full line
if match is None or match.group(0) != line:
raise PDDMError('Failed to parse macro definition: "%s"' % input_line)
name = match.group('name')
args_str = match.group('args').strip()
args = []
if args_str:
for part in args_str.split(','):
arg = part.strip()
if arg == '':
raise PDDMError('Empty arg name in macro definition: "%s"'
% input_line)
if not _MACRO_ARG_NAME_RE.match(arg):
raise PDDMError('Invalid arg name "%s" in macro definition: "%s"'
% (arg, input_line))
if arg in args:
raise PDDMError('Arg name "%s" used more than once in macro'
' definition: "%s"' % (arg, input_line))
args.append(arg)
return (name, tuple(args))
def Expand(self, macro_ref_str):
"""Expands the macro reference.
Args:
macro_ref_str: String of a macro reference (i.e. foo(a, b)).
Returns:
The text from the expansion.
Raises:
PDDMError if there are any issues.
"""
match = _MACRO_RE.match(macro_ref_str)
if match is None or match.group(0) != macro_ref_str:
raise PDDMError('Failed to parse macro reference: "%s"' % macro_ref_str)
if match.group('name') not in self._macros:
raise PDDMError('No macro named "%s".' % match.group('name'))
return self._Expand(match, [], macro_ref_str)
def _FormatStack(self, macro_ref_stack):
result = ''
for _, macro_ref in reversed(macro_ref_stack):
result += '\n...while expanding "%s".' % macro_ref
return result
def _Expand(self, macro_ref_match, macro_stack, macro_ref_str=None):
if macro_ref_str is None:
macro_ref_str = macro_ref_match.group('macro_ref')
name = macro_ref_match.group('name')
for prev_name, prev_macro_ref in macro_stack:
if name == prev_name:
raise PDDMError('Found macro recursion, invoking "%s":%s' %
(macro_ref_str, self._FormatStack(macro_stack)))
macro = self._macros[name]
args_str = macro_ref_match.group('args').strip()
args = []
if args_str or len(macro.args):
args = [x.strip() for x in args_str.split(',')]
if len(args) != len(macro.args):
raise PDDMError('Expected %d args, got: "%s".%s' %
(len(macro.args), macro_ref_str,
self._FormatStack(macro_stack)))
# Replace args usages.
result = self._ReplaceArgValues(macro, args, macro_ref_str, macro_stack)
# Expand any macro invokes.
new_macro_stack = macro_stack + [(name, macro_ref_str)]
while True:
eval_result = self._EvalMacrosRefs(result, new_macro_stack)
# Consume all ## directives to glue things together.
eval_result = eval_result.replace('##', '')
if eval_result == result:
break
result = eval_result
return result
def _ReplaceArgValues(self,
macro, arg_values, macro_ref_to_report, macro_stack):
if len(arg_values) == 0:
# Nothing to do
return macro.body
assert len(arg_values) == len(macro.args)
args = dict(list(zip(macro.args, arg_values)))
def _lookupArg(match):
val = args[match.group('name')]
opt = match.group('option')
if opt:
if opt == 'S': # Spaces for the length
return ' ' * len(val)
elif opt == 'l': # Lowercase first character
if val:
return val[0].lower() + val[1:]
else:
return val
elif opt == 'L': # All Lowercase
return val.lower()
elif opt == 'u': # Uppercase first character
if val:
return val[0].upper() + val[1:]
else:
return val
elif opt == 'U': # All Uppercase
return val.upper()
else:
raise PDDMError('Unknown arg option "%s$%s" while expanding "%s".%s'
% (match.group('name'), match.group('option'),
macro_ref_to_report,
self._FormatStack(macro_stack)))
return val
# Let the regex do the work!
macro_arg_ref_re = _MacroArgRefRe(macro.args)
return macro_arg_ref_re.sub(_lookupArg, macro.body)
def _EvalMacrosRefs(self, text, macro_stack):
macro_ref_re = _MacroRefRe(list(self._macros.keys()))
def _resolveMacro(match):
return self._Expand(match, macro_stack)
return macro_ref_re.sub(_resolveMacro, text)
| MacroCollection |
python | allegroai__clearml | clearml/backend_api/services/v2_9/projects.py | {
"start": 53338,
"end": 54213
} | class ____(Request):
"""
:param project: Project id
:type project: str
"""
_service = "projects"
_action = "get_by_id"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {"project": {"description": "Project id", "type": "string"}},
"required": ["project"],
"type": "object",
}
def __init__(self, project: str, **kwargs: Any) -> None:
super(GetByIdRequest, self).__init__(**kwargs)
self.project = project
@schema_property("project")
def project(self) -> str:
return self._property_project
@project.setter
def project(self, value: str) -> None:
if value is None:
self._property_project = None
return
self.assert_isinstance(value, "project", six.string_types)
self._property_project = value
| GetByIdRequest |
python | davidhalter__jedi | jedi/api/__init__.py | {
"start": 28824,
"end": 32428
} | class ____(Script):
"""
Jedi's API for Python REPLs.
Implements all of the methods that are present in :class:`.Script` as well.
In addition to completions that normal REPL completion does like
``str.upper``, Jedi also supports code completion based on static code
analysis. For example Jedi will complete ``str().upper``.
>>> from os.path import join
>>> namespace = locals()
>>> script = Interpreter('join("").up', [namespace])
>>> print(script.complete()[0].name)
upper
All keyword arguments are same as the arguments for :class:`.Script`.
:param str code: Code to parse.
:type namespaces: typing.List[dict]
:param namespaces: A list of namespace dictionaries such as the one
returned by :func:`globals` and :func:`locals`.
"""
def __init__(self, code, namespaces, *, project=None, **kwds):
try:
namespaces = [dict(n) for n in namespaces]
except Exception:
raise TypeError("namespaces must be a non-empty list of dicts.")
environment = kwds.get('environment', None)
if environment is None:
environment = InterpreterEnvironment()
else:
if not isinstance(environment, InterpreterEnvironment):
raise TypeError("The environment needs to be an InterpreterEnvironment subclass.")
if project is None:
project = Project(Path.cwd())
super().__init__(code, environment=environment, project=project, **kwds)
self.namespaces = namespaces
self._inference_state.allow_unsafe_executions = \
settings.allow_unsafe_interpreter_executions
# Dynamic params search is important when we work on functions that are
# called by other pieces of code. However for interpreter completions
# this is not important at all, because the current code is always new
# and will never be called by something.
# Also sometimes this logic goes a bit too far like in
# https://github.com/ipython/ipython/issues/13866, where it takes
# seconds to do a simple completion.
self._inference_state.do_dynamic_params_search = False
@cache.memoize_method
def _get_module_context(self):
if self.path is None:
file_io = None
else:
file_io = KnownContentFileIO(self.path, self._code)
tree_module_value = ModuleValue(
self._inference_state, self._module_node,
file_io=file_io,
string_names=('__main__',),
code_lines=self._code_lines,
)
return interpreter.MixedModuleContext(
tree_module_value,
self.namespaces,
)
def preload_module(*modules):
"""
Preloading modules tells Jedi to load a module now, instead of lazy parsing
of modules. This can be useful for IDEs, to control which modules to load
on startup.
:param modules: different module names, list of string.
"""
for m in modules:
s = "import %s as x; x." % m
Script(s).complete(1, len(s))
def set_debug_function(func_cb=debug.print_to_stdout, warnings=True,
notices=True, speed=True):
"""
Define a callback debug function to get all the debug messages.
If you don't specify any arguments, debug messages will be printed to stdout.
:param func_cb: The callback function for debug messages.
"""
debug.debug_function = func_cb
debug.enable_warning = warnings
debug.enable_notice = notices
debug.enable_speed = speed
| Interpreter |
python | keon__algorithms | tests/test_maths.py | {
"start": 12099,
"end": 12500
} | class ____(unittest.TestCase):
"""[summary]
Test for the file find_order_simple.py
Arguments:
unittest {[type]} -- [description]
"""
def test_find_order_simple(self):
self.assertEqual(1, find_order(1, 1))
self.assertEqual(6, find_order(3, 7))
self.assertEqual(-1, find_order(128, 256))
self.assertEqual(352, find_order(3, 353))
| TestFindOrder |
python | pytorch__pytorch | test/distributed/test_c10d_common.py | {
"start": 81852,
"end": 82995
} | class ____(MultiProcessTestCase):
@property
def world_size(self):
return 4
def setUp(self):
super().setUp()
self._spawn_processes()
def tearDown(self):
super().tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
def testWithoutEnv(self):
with self.assertRaisesRegex(RuntimeError, "LOCAL_RANK"):
dist.get_node_local_rank()
def testWithoutEnvWithFallback(self):
self.assertEqual(dist.get_node_local_rank(fallback_rank=2), 2)
def testNodeLocalRankOverridesFallback(self):
os.environ["LOCAL_RANK"] = str(self.rank)
self.assertEqual(dist.get_node_local_rank(fallback_rank=123), self.rank)
def testNodeLocalRank(self):
os.environ["LOCAL_RANK"] = str(self.rank)
self.assertEqual(dist.get_node_local_rank(), self.rank)
if __name__ == "__main__":
if device_type != "cpu":
assert not torch.get_device_module()._initialized, (
f"test_distributed must not have initialized {device_type} context on main process"
)
run_tests()
| LocalRankTest |
python | walkccc__LeetCode | solutions/137. Single Number II/137.py | {
"start": 0,
"end": 183
} | class ____:
def singleNumber(self, nums: list[int]) -> int:
ones = 0
twos = 0
for num in nums:
ones ^= num & ~twos
twos ^= num & ~ones
return ones
| Solution |
python | django__django | tests/forms_tests/tests/test_formsets.py | {
"start": 72088,
"end": 72214
} | class ____(Form):
title = CharField()
pub_date = DateField()
ArticleFormSet = formset_factory(ArticleForm)
| ArticleForm |
python | pypa__pip | src/pip/_vendor/urllib3/contrib/securetransport.py | {
"start": 12361,
"end": 29652
} | class ____(object):
"""
API-compatibility wrapper for Python's OpenSSL wrapped socket object.
Note: _makefile_refs, _drop(), and _reuse() are needed for the garbage
collector of PyPy.
"""
def __init__(self, socket):
self.socket = socket
self.context = None
self._makefile_refs = 0
self._closed = False
self._exception = None
self._keychain = None
self._keychain_dir = None
self._client_cert_chain = None
# We save off the previously-configured timeout and then set it to
# zero. This is done because we use select and friends to handle the
# timeouts, but if we leave the timeout set on the lower socket then
# Python will "kindly" call select on that socket again for us. Avoid
# that by forcing the timeout to zero.
self._timeout = self.socket.gettimeout()
self.socket.settimeout(0)
@contextlib.contextmanager
def _raise_on_error(self):
"""
A context manager that can be used to wrap calls that do I/O from
SecureTransport. If any of the I/O callbacks hit an exception, this
context manager will correctly propagate the exception after the fact.
This avoids silently swallowing those exceptions.
It also correctly forces the socket closed.
"""
self._exception = None
# We explicitly don't catch around this yield because in the unlikely
# event that an exception was hit in the block we don't want to swallow
# it.
yield
if self._exception is not None:
exception, self._exception = self._exception, None
self.close()
raise exception
def _set_ciphers(self):
"""
Sets up the allowed ciphers. By default this matches the set in
util.ssl_.DEFAULT_CIPHERS, at least as supported by macOS. This is done
custom and doesn't allow changing at this time, mostly because parsing
OpenSSL cipher strings is going to be a freaking nightmare.
"""
ciphers = (Security.SSLCipherSuite * len(CIPHER_SUITES))(*CIPHER_SUITES)
result = Security.SSLSetEnabledCiphers(
self.context, ciphers, len(CIPHER_SUITES)
)
_assert_no_error(result)
def _set_alpn_protocols(self, protocols):
"""
Sets up the ALPN protocols on the context.
"""
if not protocols:
return
protocols_arr = _create_cfstring_array(protocols)
try:
result = Security.SSLSetALPNProtocols(self.context, protocols_arr)
_assert_no_error(result)
finally:
CoreFoundation.CFRelease(protocols_arr)
def _custom_validate(self, verify, trust_bundle):
"""
Called when we have set custom validation. We do this in two cases:
first, when cert validation is entirely disabled; and second, when
using a custom trust DB.
Raises an SSLError if the connection is not trusted.
"""
# If we disabled cert validation, just say: cool.
if not verify:
return
successes = (
SecurityConst.kSecTrustResultUnspecified,
SecurityConst.kSecTrustResultProceed,
)
try:
trust_result = self._evaluate_trust(trust_bundle)
if trust_result in successes:
return
reason = "error code: %d" % (trust_result,)
except Exception as e:
# Do not trust on error
reason = "exception: %r" % (e,)
# SecureTransport does not send an alert nor shuts down the connection.
rec = _build_tls_unknown_ca_alert(self.version())
self.socket.sendall(rec)
# close the connection immediately
# l_onoff = 1, activate linger
# l_linger = 0, linger for 0 seoncds
opts = struct.pack("ii", 1, 0)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, opts)
self.close()
raise ssl.SSLError("certificate verify failed, %s" % reason)
def _evaluate_trust(self, trust_bundle):
# We want data in memory, so load it up.
if os.path.isfile(trust_bundle):
with open(trust_bundle, "rb") as f:
trust_bundle = f.read()
cert_array = None
trust = Security.SecTrustRef()
try:
# Get a CFArray that contains the certs we want.
cert_array = _cert_array_from_pem(trust_bundle)
# Ok, now the hard part. We want to get the SecTrustRef that ST has
# created for this connection, shove our CAs into it, tell ST to
# ignore everything else it knows, and then ask if it can build a
# chain. This is a buuuunch of code.
result = Security.SSLCopyPeerTrust(self.context, ctypes.byref(trust))
_assert_no_error(result)
if not trust:
raise ssl.SSLError("Failed to copy trust reference")
result = Security.SecTrustSetAnchorCertificates(trust, cert_array)
_assert_no_error(result)
result = Security.SecTrustSetAnchorCertificatesOnly(trust, True)
_assert_no_error(result)
trust_result = Security.SecTrustResultType()
result = Security.SecTrustEvaluate(trust, ctypes.byref(trust_result))
_assert_no_error(result)
finally:
if trust:
CoreFoundation.CFRelease(trust)
if cert_array is not None:
CoreFoundation.CFRelease(cert_array)
return trust_result.value
def handshake(
self,
server_hostname,
verify,
trust_bundle,
min_version,
max_version,
client_cert,
client_key,
client_key_passphrase,
alpn_protocols,
):
"""
Actually performs the TLS handshake. This is run automatically by
wrapped socket, and shouldn't be needed in user code.
"""
# First, we do the initial bits of connection setup. We need to create
# a context, set its I/O funcs, and set the connection reference.
self.context = Security.SSLCreateContext(
None, SecurityConst.kSSLClientSide, SecurityConst.kSSLStreamType
)
result = Security.SSLSetIOFuncs(
self.context, _read_callback_pointer, _write_callback_pointer
)
_assert_no_error(result)
# Here we need to compute the handle to use. We do this by taking the
# id of self modulo 2**31 - 1. If this is already in the dictionary, we
# just keep incrementing by one until we find a free space.
with _connection_ref_lock:
handle = id(self) % 2147483647
while handle in _connection_refs:
handle = (handle + 1) % 2147483647
_connection_refs[handle] = self
result = Security.SSLSetConnection(self.context, handle)
_assert_no_error(result)
# If we have a server hostname, we should set that too.
if server_hostname:
if not isinstance(server_hostname, bytes):
server_hostname = server_hostname.encode("utf-8")
result = Security.SSLSetPeerDomainName(
self.context, server_hostname, len(server_hostname)
)
_assert_no_error(result)
# Setup the ciphers.
self._set_ciphers()
# Setup the ALPN protocols.
self._set_alpn_protocols(alpn_protocols)
# Set the minimum and maximum TLS versions.
result = Security.SSLSetProtocolVersionMin(self.context, min_version)
_assert_no_error(result)
result = Security.SSLSetProtocolVersionMax(self.context, max_version)
_assert_no_error(result)
# If there's a trust DB, we need to use it. We do that by telling
# SecureTransport to break on server auth. We also do that if we don't
# want to validate the certs at all: we just won't actually do any
# authing in that case.
if not verify or trust_bundle is not None:
result = Security.SSLSetSessionOption(
self.context, SecurityConst.kSSLSessionOptionBreakOnServerAuth, True
)
_assert_no_error(result)
# If there's a client cert, we need to use it.
if client_cert:
self._keychain, self._keychain_dir = _temporary_keychain()
self._client_cert_chain = _load_client_cert_chain(
self._keychain, client_cert, client_key
)
result = Security.SSLSetCertificate(self.context, self._client_cert_chain)
_assert_no_error(result)
while True:
with self._raise_on_error():
result = Security.SSLHandshake(self.context)
if result == SecurityConst.errSSLWouldBlock:
raise socket.timeout("handshake timed out")
elif result == SecurityConst.errSSLServerAuthCompleted:
self._custom_validate(verify, trust_bundle)
continue
else:
_assert_no_error(result)
break
def fileno(self):
return self.socket.fileno()
# Copy-pasted from Python 3.5 source code
def _decref_socketios(self):
if self._makefile_refs > 0:
self._makefile_refs -= 1
if self._closed:
self.close()
def recv(self, bufsiz):
buffer = ctypes.create_string_buffer(bufsiz)
bytes_read = self.recv_into(buffer, bufsiz)
data = buffer[:bytes_read]
return data
def recv_into(self, buffer, nbytes=None):
# Read short on EOF.
if self._closed:
return 0
if nbytes is None:
nbytes = len(buffer)
buffer = (ctypes.c_char * nbytes).from_buffer(buffer)
processed_bytes = ctypes.c_size_t(0)
with self._raise_on_error():
result = Security.SSLRead(
self.context, buffer, nbytes, ctypes.byref(processed_bytes)
)
# There are some result codes that we want to treat as "not always
# errors". Specifically, those are errSSLWouldBlock,
# errSSLClosedGraceful, and errSSLClosedNoNotify.
if result == SecurityConst.errSSLWouldBlock:
# If we didn't process any bytes, then this was just a time out.
# However, we can get errSSLWouldBlock in situations when we *did*
# read some data, and in those cases we should just read "short"
# and return.
if processed_bytes.value == 0:
# Timed out, no data read.
raise socket.timeout("recv timed out")
elif result in (
SecurityConst.errSSLClosedGraceful,
SecurityConst.errSSLClosedNoNotify,
):
# The remote peer has closed this connection. We should do so as
# well. Note that we don't actually return here because in
# principle this could actually be fired along with return data.
# It's unlikely though.
self.close()
else:
_assert_no_error(result)
# Ok, we read and probably succeeded. We should return whatever data
# was actually read.
return processed_bytes.value
def settimeout(self, timeout):
self._timeout = timeout
def gettimeout(self):
return self._timeout
def send(self, data):
processed_bytes = ctypes.c_size_t(0)
with self._raise_on_error():
result = Security.SSLWrite(
self.context, data, len(data), ctypes.byref(processed_bytes)
)
if result == SecurityConst.errSSLWouldBlock and processed_bytes.value == 0:
# Timed out
raise socket.timeout("send timed out")
else:
_assert_no_error(result)
# We sent, and probably succeeded. Tell them how much we sent.
return processed_bytes.value
def sendall(self, data):
total_sent = 0
while total_sent < len(data):
sent = self.send(data[total_sent : total_sent + SSL_WRITE_BLOCKSIZE])
total_sent += sent
def shutdown(self):
with self._raise_on_error():
Security.SSLClose(self.context)
def close(self):
# TODO: should I do clean shutdown here? Do I have to?
if self._makefile_refs < 1:
self._closed = True
if self.context:
CoreFoundation.CFRelease(self.context)
self.context = None
if self._client_cert_chain:
CoreFoundation.CFRelease(self._client_cert_chain)
self._client_cert_chain = None
if self._keychain:
Security.SecKeychainDelete(self._keychain)
CoreFoundation.CFRelease(self._keychain)
shutil.rmtree(self._keychain_dir)
self._keychain = self._keychain_dir = None
return self.socket.close()
else:
self._makefile_refs -= 1
def getpeercert(self, binary_form=False):
# Urgh, annoying.
#
# Here's how we do this:
#
# 1. Call SSLCopyPeerTrust to get hold of the trust object for this
# connection.
# 2. Call SecTrustGetCertificateAtIndex for index 0 to get the leaf.
# 3. To get the CN, call SecCertificateCopyCommonName and process that
# string so that it's of the appropriate type.
# 4. To get the SAN, we need to do something a bit more complex:
# a. Call SecCertificateCopyValues to get the data, requesting
# kSecOIDSubjectAltName.
# b. Mess about with this dictionary to try to get the SANs out.
#
# This is gross. Really gross. It's going to be a few hundred LoC extra
# just to repeat something that SecureTransport can *already do*. So my
# operating assumption at this time is that what we want to do is
# instead to just flag to urllib3 that it shouldn't do its own hostname
# validation when using SecureTransport.
if not binary_form:
raise ValueError("SecureTransport only supports dumping binary certs")
trust = Security.SecTrustRef()
certdata = None
der_bytes = None
try:
# Grab the trust store.
result = Security.SSLCopyPeerTrust(self.context, ctypes.byref(trust))
_assert_no_error(result)
if not trust:
# Probably we haven't done the handshake yet. No biggie.
return None
cert_count = Security.SecTrustGetCertificateCount(trust)
if not cert_count:
# Also a case that might happen if we haven't handshaked.
# Handshook? Handshaken?
return None
leaf = Security.SecTrustGetCertificateAtIndex(trust, 0)
assert leaf
# Ok, now we want the DER bytes.
certdata = Security.SecCertificateCopyData(leaf)
assert certdata
data_length = CoreFoundation.CFDataGetLength(certdata)
data_buffer = CoreFoundation.CFDataGetBytePtr(certdata)
der_bytes = ctypes.string_at(data_buffer, data_length)
finally:
if certdata:
CoreFoundation.CFRelease(certdata)
if trust:
CoreFoundation.CFRelease(trust)
return der_bytes
def version(self):
protocol = Security.SSLProtocol()
result = Security.SSLGetNegotiatedProtocolVersion(
self.context, ctypes.byref(protocol)
)
_assert_no_error(result)
if protocol.value == SecurityConst.kTLSProtocol13:
raise ssl.SSLError("SecureTransport does not support TLS 1.3")
elif protocol.value == SecurityConst.kTLSProtocol12:
return "TLSv1.2"
elif protocol.value == SecurityConst.kTLSProtocol11:
return "TLSv1.1"
elif protocol.value == SecurityConst.kTLSProtocol1:
return "TLSv1"
elif protocol.value == SecurityConst.kSSLProtocol3:
return "SSLv3"
elif protocol.value == SecurityConst.kSSLProtocol2:
return "SSLv2"
else:
raise ssl.SSLError("Unknown TLS version: %r" % protocol)
def _reuse(self):
self._makefile_refs += 1
def _drop(self):
if self._makefile_refs < 1:
self.close()
else:
self._makefile_refs -= 1
if _fileobject: # Platform-specific: Python 2
def makefile(self, mode, bufsize=-1):
self._makefile_refs += 1
return _fileobject(self, mode, bufsize, close=True)
else: # Platform-specific: Python 3
def makefile(self, mode="r", buffering=None, *args, **kwargs):
# We disable buffering with SecureTransport because it conflicts with
# the buffering that ST does internally (see issue #1153 for more).
buffering = 0
return backport_makefile(self, mode, buffering, *args, **kwargs)
WrappedSocket.makefile = makefile
| WrappedSocket |
python | pytorch__pytorch | test/fx/quantization.py | {
"start": 7291,
"end": 12814
} | class ____:
def __init__(
self, mod, patterns=_DEFAULT_QUANTIZATION_PATTERNS, quant_ctor=DefaultQuant
):
self.root = mod
self.graph = mod.graph
self.quant_ctor = quant_ctor
# cached information for observe
self.state_dict = self.root.state_dict()
self.modules = dict(self.root.named_modules())
# match the patterns that will get quantized
self.matches = self._find_matches(patterns)
# find _inputs_ to matched nodes that are not quantized, these
# have to be quantized, which requires measuring stats,
# initialize an quant_ctor object for each
self.quants = self._find_quants(quant_ctor)
def observe(self, args):
# most of this function is just an interpreter for the graph
# it would be possible to put this in some abstraction, but
# it is pretty nice to just be able to see exactly what is happening here
# and hack on it.
# maybe we should just provide an example interpreter that people copy/paste
# then edit.
args_iter = iter(args)
env = {}
def load_arg(a):
return map_arg(a, lambda node: env[node.name])
for node in self.graph.nodes:
if node.op == "placeholder":
result = next(args_iter)
elif node.op == "get_attr":
result = self.state_dict[node.target]
elif node.op == "call_function":
result = node.target(*load_arg(node.args), **load_arg(node.kwargs))
elif node.op == "call_method":
self_obj, *args = load_arg(node.args)
kwargs = load_arg(node.kwargs)
result = getattr(self_obj, node.target)(*args, **kwargs)
elif node.op == "call_module":
result = self.modules[node.target](
*load_arg(node.args), **load_arg(node.kwargs)
)
elif node.op == "output":
return load_arg(node.args[0])
env[node.name] = result
root_node, obj = self.matches.get(node.name, (None, None))
if root_node is node:
obj.observe(node, env)
if node.name in self.quants:
self.quants[node.name].observe(node, env)
raise RuntimeError("Graph had no output node!")
def quantize(self):
self.quantized_graph = Graph()
env = {}
quant_env = {}
def load_arg(n, quantized):
if not quantized:
if n.name not in env and n.name in quant_env:
env[n.name] = Proxy(quant_env[n.name]).dequantize().node
return env[n.name]
else:
if n.name not in quant_env and n.name in env:
quant_env[n.name] = self.quants[n.name].quantize(env[n.name])
return quant_env[n.name]
def copy_recursive(node):
r = env[node.name] = self.quantized_graph.node_copy(
node, lambda n: load_arg(n, quantized=False)
)
return r
for node in self.graph.nodes:
root_node, obj = self.matches.get(node.name, (None, None))
if root_node is None:
# not quantized just copy it
env[node.name] = self.quantized_graph.node_copy(
node, lambda n: load_arg(n, quantized=False)
)
elif root_node is node:
r = obj.quantize(
self,
node,
lambda a: map_arg(a, lambda n: load_arg(n, quantized=True)),
)
if r is NotImplemented:
# quantizer choose to quantize the node take the entire match, and just copy it over
env[node.name] = copy_recursive(node)
else:
quant_env[node.name] = r
return GraphModule(self.root, self.quantized_graph)
def _find_matches(self, patterns):
modules = dict(self.root.named_modules())
match_map = {} # node name -> (root_node, match_value?)
def apply_match(pattern, node, match):
if isinstance(pattern, tuple):
s, *args = pattern
apply_match(s, node, match)
for subpattern, arg in zip(args, node.args):
apply_match(subpattern, arg, match)
else:
match_map[node.name] = match
for node in reversed(self.graph.nodes):
if node.name not in match_map:
for pattern, value in patterns.items():
if matches(modules, node, pattern):
apply_match(pattern, node, (node, value(self, node)))
return match_map
def _find_quants(self, quant_ctor):
quants = {}
def visit_arg(n):
# note: we have to measure quantization information
# even for nodes where we might not use it because it is already
# quantized. This is because each match has the option to
# say NotImplemented (if for instance, it is an __add__ and the data type is not appropriate)
if n.name not in quants:
quants[n.name] = quant_ctor(self, n)
for node in self.graph.nodes:
if node.name in self.matches:
map_arg(node.args, visit_arg)
map_arg(node.kwargs, visit_arg)
return quants
| Quantizer |
python | gevent__gevent | src/greentest/3.10/test_socket.py | {
"start": 104197,
"end": 104750
} | class ____(SendrecvmsgBase):
# Base class for tests on connectionless-mode sockets. Users must
# supply sockets on attributes cli and serv to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.serv
@property
def cli_sock(self):
return self.cli
@property
def sendmsg_to_server_defaults(self):
return ([], [], 0, self.serv_addr)
def sendToServer(self, msg):
return self.cli_sock.sendto(msg, self.serv_addr)
| SendrecvmsgConnectionlessBase |
python | getsentry__sentry | tests/sentry/integrations/gitlab/tasks/test_pr_comment.py | {
"start": 5046,
"end": 7586
} | class ____(GitlabCommentTestCase):
def test_simple(self) -> None:
"""one pr with one issue"""
commit = self.add_commit_to_repo(self.repo, self.user, self.project)
pr = self.add_pr_to_commit(commit)
groupowner = self.add_groupowner_to_commit(commit, self.project, self.user)
results = self.pr_comment_workflow.get_issue_ids_from_pr(pr=pr)
assert results == [groupowner.group_id]
def test_multiple_issues(self) -> None:
"""one pr with multiple issues"""
commit = self.add_commit_to_repo(self.repo, self.user, self.project)
pr = self.add_pr_to_commit(commit)
groupowner_1 = self.add_groupowner_to_commit(commit, self.project, self.user)
groupowner_2 = self.add_groupowner_to_commit(commit, self.project, self.user)
groupowner_3 = self.add_groupowner_to_commit(commit, self.project, self.user)
results = self.pr_comment_workflow.get_issue_ids_from_pr(pr=pr)
assert results == [groupowner_1.group_id, groupowner_2.group_id, groupowner_3.group_id]
def test_multiple_prs(self) -> None:
"""multiple eligible PRs with one issue each"""
commit_1 = self.add_commit_to_repo(self.repo, self.user, self.project)
commit_2 = self.add_commit_to_repo(self.repo, self.user, self.project)
pr_1 = self.add_pr_to_commit(commit_1)
pr_2 = self.add_pr_to_commit(commit_2)
groupowner_1 = self.add_groupowner_to_commit(commit_1, self.project, self.user)
groupowner_2 = self.add_groupowner_to_commit(commit_2, self.project, self.user)
results = self.pr_comment_workflow.get_issue_ids_from_pr(pr=pr_1)
assert results == [groupowner_1.group_id]
results = self.pr_comment_workflow.get_issue_ids_from_pr(pr=pr_2)
assert results == [groupowner_2.group_id]
def test_multiple_commits(self) -> None:
"""Multiple eligible commits with one issue each"""
commit_1 = self.add_commit_to_repo(self.repo, self.user, self.project)
commit_2 = self.add_commit_to_repo(self.repo, self.user, self.project)
pr = self.add_pr_to_commit(commit_1)
self.add_branch_commit_to_pr(commit_2, pr)
groupowner_1 = self.add_groupowner_to_commit(commit_1, self.project, self.user)
groupowner_2 = self.add_groupowner_to_commit(commit_2, self.project, self.user)
results = self.pr_comment_workflow.get_issue_ids_from_pr(pr=pr)
assert results == [groupowner_1.group_id, groupowner_2.group_id]
| TestPrToIssueQuery |
python | apache__airflow | airflow-ctl/src/airflowctl/api/datamodels/generated.py | {
"start": 72790,
"end": 73044
} | class ____(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
actions: Annotated[
list[BulkCreateActionPoolBody | BulkUpdateActionPoolBody | BulkDeleteActionPoolBody],
Field(title="Actions"),
]
| BulkBodyPoolBody |
python | pypa__warehouse | tests/unit/test_sessions.py | {
"start": 21407,
"end": 23965
} | class ____:
def test_has_options(self):
assert set(session_view.options) == {"uses_session"}
@pytest.mark.parametrize("uses_session", [False, None])
def test_invalid_session(self, uses_session):
context = pretend.stub()
request = pretend.stub(session=pretend.stub())
response = pretend.stub()
@pretend.call_recorder
def view(context, request):
assert isinstance(request.session, InvalidSession)
return response
info = pretend.stub(options={}, exception_only=False)
if uses_session is not None:
info.options["uses_session"] = uses_session
derived_view = session_view(view, info)
assert derived_view(context, request) is response
assert view.calls == [pretend.call(context, request)]
def test_valid_session(self, monkeypatch):
add_vary_cb = pretend.call_recorder(lambda fn: fn)
add_vary = pretend.call_recorder(lambda vary: add_vary_cb)
monkeypatch.setattr(warehouse.sessions, "add_vary", add_vary)
context = pretend.stub()
request = pretend.stub(session=Session())
response = pretend.stub()
@pretend.call_recorder
def view(context, request):
assert isinstance(request.session, Session)
return response
info = pretend.stub(options={"uses_session": True})
derived_view = session_view(view, info)
assert derived_view(context, request) is response
assert view.calls == [pretend.call(context, request)]
assert add_vary.calls == [pretend.call("Cookie")]
assert add_vary_cb.calls == [pretend.call(view)]
def test_includeme(monkeypatch):
session_factory_obj = pretend.stub()
session_factory_cls = pretend.call_recorder(lambda secret, url: session_factory_obj)
monkeypatch.setattr(warehouse.sessions, "SessionFactory", session_factory_cls)
config = pretend.stub(
set_session_factory=pretend.call_recorder(lambda factory: None),
registry=pretend.stub(
settings={"sessions.secret": "my secret", "sessions.url": "my url"}
),
add_view_deriver=pretend.call_recorder(lambda *a, **kw: None),
)
includeme(config)
assert config.set_session_factory.calls == [pretend.call(session_factory_obj)]
assert session_factory_cls.calls == [pretend.call("my secret", "my url")]
assert config.add_view_deriver.calls == [
pretend.call(session_view, over="csrf_view", under=viewderivers.INGRESS)
]
| TestSessionView |
python | python__mypy | mypy/stubutil.py | {
"start": 14977,
"end": 16098
} | class ____:
"""Abstract base class for extracting a list of FunctionSigs for each function."""
def remove_self_type(
self, inferred: list[FunctionSig] | None, self_var: str
) -> list[FunctionSig] | None:
"""Remove type annotation from self/cls argument"""
if inferred:
for signature in inferred:
if signature.args:
if signature.args[0].name == self_var:
signature.args[0].type = None
return inferred
@abstractmethod
def get_function_sig(
self, default_sig: FunctionSig, ctx: FunctionContext
) -> list[FunctionSig] | None:
"""Return a list of signatures for the given function.
If no signature can be found, return None. If all of the registered SignatureGenerators
for the stub generator return None, then the default_sig will be used.
"""
pass
@abstractmethod
def get_property_type(self, default_type: str | None, ctx: FunctionContext) -> str | None:
"""Return the type of the given property"""
pass
| SignatureGenerator |
python | PrefectHQ__prefect | src/prefect/client/base.py | {
"start": 17095,
"end": 26895
} | class ____(httpx.Client):
"""
A Prefect wrapper for the async httpx client with support for retry-after headers
for the provided status codes (typically 429, 502 and 503).
Additionally, this client will always call `raise_for_status` on responses.
For more details on rate limit headers, see:
[Configuring Cloudflare Rate Limiting](https://support.cloudflare.com/hc/en-us/articles/115001635128-Configuring-Rate-Limiting-from-UI)
"""
def __init__(
self,
*args: Any,
enable_csrf_support: bool = False,
raise_on_all_errors: bool = True,
**kwargs: Any,
):
self.enable_csrf_support: bool = enable_csrf_support
self.csrf_token: Optional[str] = None
self.csrf_token_expiration: Optional[datetime] = None
self.csrf_client_id: uuid.UUID = uuid.uuid4()
self.raise_on_all_errors: bool = raise_on_all_errors
super().__init__(*args, **kwargs)
user_agent = (
f"prefect/{prefect.__version__} (API {constants.SERVER_API_VERSION})"
)
self.headers["User-Agent"] = user_agent
# Add custom headers from settings
custom_headers = get_current_settings().client.custom_headers
for header_name, header_value in custom_headers.items():
# Prevent overriding critical headers
if header_name.lower() in {
"user-agent",
"prefect-csrf-token",
"prefect-csrf-client",
}:
logger.warning(
f"Custom header '{header_name}' is ignored because it conflicts with "
f"a protected header managed by Prefect. Protected headers include: "
f"User-Agent, Prefect-Csrf-Token, Prefect-Csrf-Client"
)
else:
self.headers[header_name] = header_value
def _send_with_retry(
self,
request: Request,
send: Callable[[Request], Response],
send_args: tuple[Any, ...],
send_kwargs: dict[str, Any],
retry_codes: set[int] = set(),
retry_exceptions: tuple[type[Exception], ...] = tuple(),
):
"""
Send a request and retry it if it fails.
Sends the provided request and retries it up to PREFECT_CLIENT_MAX_RETRIES times
if the request either raises an exception listed in `retry_exceptions` or
receives a response with a status code listed in `retry_codes`.
Retries are not counted against the limit if the response headers contains
a reserved value, indicating that the server is undergoing maintenance. These
requests will retry indefinitely until the header is no longer returned.
Retries will be delayed based on either the retry header (preferred) or
exponential backoff if a retry header is not provided.
"""
try_count = 0
response = None
if TYPE_CHECKING:
# older httpx versions type method as str | bytes | Unknown
# but in reality it is always a string.
assert isinstance(request.method, str) # type: ignore
is_change_request = request.method.lower() in {"post", "put", "patch", "delete"}
if self.enable_csrf_support and is_change_request:
self._add_csrf_headers(request=request)
while try_count <= PREFECT_CLIENT_MAX_RETRIES.value():
retry_seconds = None
exc_info = None
try:
response = send(request, *send_args, **send_kwargs)
except retry_exceptions: # type: ignore
try_count += 1
if try_count > PREFECT_CLIENT_MAX_RETRIES.value():
raise
# Otherwise, we will ignore this error but capture the info for logging
exc_info = sys.exc_info()
else:
if response.headers.get("Prefect-Maintenance") != "true":
try_count += 1
# We got a response; check if it's a CSRF error, otherwise
# return immediately if it is not retryable
if (
response.status_code == status.HTTP_403_FORBIDDEN
and "Invalid CSRF token" in response.text
):
# We got a CSRF error, clear the token and try again
self.csrf_token = None
self._add_csrf_headers(request)
elif response.status_code not in retry_codes:
return response
if "Retry-After" in response.headers:
retry_seconds = float(response.headers["Retry-After"])
# Use an exponential back-off if not set in a header
if retry_seconds is None:
retry_seconds = 2**try_count
# Add jitter
jitter_factor = PREFECT_CLIENT_RETRY_JITTER_FACTOR.value()
if retry_seconds > 0 and jitter_factor > 0:
if response is not None and "Retry-After" in response.headers:
# Always wait for _at least_ retry seconds if requested by the API
retry_seconds = bounded_poisson_interval(
retry_seconds, retry_seconds * (1 + jitter_factor)
)
else:
# Otherwise, use a symmetrical jitter
retry_seconds = clamped_poisson_interval(
retry_seconds, jitter_factor
)
logger.debug(
(
"Encountered retryable exception during request. "
if exc_info
else (
"Received response with retryable status code"
f" {response.status_code if response else 'unknown'}. "
)
)
+ f"Another attempt will be made in {retry_seconds}s. "
"This is attempt"
f" {try_count}/{PREFECT_CLIENT_MAX_RETRIES.value() + 1}.",
exc_info=exc_info,
)
time.sleep(retry_seconds)
assert response is not None, (
"Retry handling ended without response or exception"
)
# We ran out of retries, return the failed response
return response
def send(self, request: Request, *args: Any, **kwargs: Any) -> Response:
"""
Send a request with automatic retry behavior for the following status codes:
- 403 Forbidden, if the request failed due to CSRF protection
- 408 Request Timeout
- 429 CloudFlare-style rate limiting
- 502 Bad Gateway
- 503 Service unavailable
- Any additional status codes provided in `PREFECT_CLIENT_RETRY_EXTRA_CODES`
"""
super_send = super().send
response = self._send_with_retry(
request=request,
send=super_send,
send_args=args,
send_kwargs=kwargs,
retry_codes={
status.HTTP_429_TOO_MANY_REQUESTS,
status.HTTP_503_SERVICE_UNAVAILABLE,
status.HTTP_502_BAD_GATEWAY,
status.HTTP_408_REQUEST_TIMEOUT,
*PREFECT_CLIENT_RETRY_EXTRA_CODES.value(),
},
retry_exceptions=(
httpx.ReadTimeout,
httpx.PoolTimeout,
httpx.ConnectTimeout,
# `ConnectionResetError` when reading socket raises as a `ReadError`
httpx.ReadError,
# Sockets can be closed during writes resulting in a `WriteError`
httpx.WriteError,
# Uvicorn bug, see https://github.com/PrefectHQ/prefect/issues/7512
httpx.RemoteProtocolError,
# HTTP2 bug, see https://github.com/PrefectHQ/prefect/issues/7442
httpx.LocalProtocolError,
),
)
# Convert to a Prefect response to add nicer errors messages
response = PrefectResponse.from_httpx_response(response)
if self.raise_on_all_errors:
response.raise_for_status()
return response
def _add_csrf_headers(self, request: Request):
now = datetime.now(timezone.utc)
if not self.enable_csrf_support:
return
if not self.csrf_token or (
self.csrf_token_expiration and now > self.csrf_token_expiration
):
token_request = self.build_request(
"GET", f"/csrf-token?client={self.csrf_client_id}"
)
try:
token_response = self.send(token_request)
except PrefectHTTPStatusError as exc:
old_server = exc.response.status_code == status.HTTP_404_NOT_FOUND
unconfigured_server = (
exc.response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
and "CSRF protection is disabled." in exc.response.text
)
if old_server or unconfigured_server:
# The token endpoint is either unavailable, suggesting an
# older server, or CSRF protection is disabled. In either
# case we should disable CSRF support.
self.enable_csrf_support = False
return
raise
token: CsrfToken = CsrfToken.model_validate(token_response.json())
self.csrf_token = token.token
self.csrf_token_expiration = token.expiration
request.headers["Prefect-Csrf-Token"] = self.csrf_token
request.headers["Prefect-Csrf-Client"] = str(self.csrf_client_id)
| PrefectHttpxSyncClient |
python | sympy__sympy | sympy/codegen/fnodes.py | {
"start": 18605,
"end": 18679
} | class ____(FFunction):
""" Fortran kind function. """
nargs = 1
| kind |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/guides/components/creating-an-inline-component/asset-with-schedule-final.py | {
"start": 51,
"end": 600
} | class ____(dg.Component, dg.Model, dg.Resolvable):
asset_key: list[str]
cron_schedule: str
def build_defs(self, context: dg.ComponentLoadContext) -> dg.Definitions:
@dg.asset(key=dg.AssetKey(self.asset_key))
def asset():
return randint(1, 100)
schedule = dg.ScheduleDefinition(
name=f"{'_'.join(self.asset_key)}_schedule",
cron_schedule=self.cron_schedule,
target=asset,
)
return dg.Definitions(assets=[asset], schedules=[schedule])
| AssetWithSchedule |
python | jazzband__django-simple-history | simple_history/tests/models.py | {
"start": 15945,
"end": 16154
} | class ____(models.Model):
name = models.CharField(max_length=30)
email = models.EmailField(max_length=255, unique=True)
register(ContactRegister, table_name="contacts_register_history")
| ContactRegister |
python | django__django | django/contrib/postgres/aggregates/general.py | {
"start": 910,
"end": 996
} | class ____(Aggregate):
function = "BOOL_OR"
output_field = BooleanField()
| BoolOr |
python | openai__openai-python | src/openai/resources/files.py | {
"start": 14032,
"end": 27045
} | class ____(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncFilesWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncFilesWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncFilesWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncFilesWithStreamingResponse(self)
async def create(
self,
*,
file: FileTypes,
purpose: FilePurpose,
expires_after: file_create_params.ExpiresAfter | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> FileObject:
"""Upload a file that can be used across various endpoints.
Individual files can be
up to 512 MB, and the size of all files uploaded by one organization can be up
to 1 TB.
- The Assistants API supports files up to 2 million tokens and of specific file
types. See the
[Assistants Tools guide](https://platform.openai.com/docs/assistants/tools)
for details.
- The Fine-tuning API only supports `.jsonl` files. The input also has certain
required formats for fine-tuning
[chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input)
or
[completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input)
models.
- The Batch API only supports `.jsonl` files up to 200 MB in size. The input
also has a specific required
[format](https://platform.openai.com/docs/api-reference/batch/request-input).
Please [contact us](https://help.openai.com/) if you need to increase these
storage limits.
Args:
file: The File object (not file name) to be uploaded.
purpose: The intended purpose of the uploaded file. One of: - `assistants`: Used in the
Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for
fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`:
Flexible file type for any purpose - `evals`: Used for eval data sets
expires_after: The expiration policy for a file. By default, files with `purpose=batch` expire
after 30 days and all other files are persisted until they are manually deleted.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
body = deepcopy_minimal(
{
"file": file,
"purpose": purpose,
"expires_after": expires_after,
}
)
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
# multipart/form-data; boundary=---abc--
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return await self._post(
"/files",
body=await async_maybe_transform(body, file_create_params.FileCreateParams),
files=files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FileObject,
)
async def retrieve(
self,
file_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> FileObject:
"""
Returns information about a specific file.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return await self._get(
f"/files/{file_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FileObject,
)
def list(
self,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
purpose: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[FileObject, AsyncCursorPage[FileObject]]:
"""Returns a list of files.
Args:
after: A cursor for use in pagination.
`after` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
ending with obj_foo, your subsequent call can include after=obj_foo in order to
fetch the next page of the list.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
10,000, and the default is 10,000.
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
order and `desc` for descending order.
purpose: Only return files with the given purpose.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._get_api_list(
"/files",
page=AsyncCursorPage[FileObject],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
"purpose": purpose,
},
file_list_params.FileListParams,
),
),
model=FileObject,
)
async def delete(
self,
file_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> FileDeleted:
"""
Delete a file and remove it from all vector stores.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return await self._delete(
f"/files/{file_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FileDeleted,
)
async def content(
self,
file_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> _legacy_response.HttpxBinaryResponseContent:
"""
Returns the contents of the specified file.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
return await self._get(
f"/files/{file_id}/content",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
@typing_extensions.deprecated("The `.content()` method should be used instead")
async def retrieve_content(
self,
file_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> str:
"""
Returns the contents of the specified file.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return await self._get(
f"/files/{file_id}/content",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=str,
)
async def wait_for_processing(
self,
id: str,
*,
poll_interval: float = 5.0,
max_wait_seconds: float = 30 * 60,
) -> FileObject:
"""Waits for the given file to be processed, default timeout is 30 mins."""
TERMINAL_STATES = {"processed", "error", "deleted"}
start = time.time()
file = await self.retrieve(id)
while file.status not in TERMINAL_STATES:
await self._sleep(poll_interval)
file = await self.retrieve(id)
if time.time() - start > max_wait_seconds:
raise RuntimeError(
f"Giving up on waiting for file {id} to finish processing after {max_wait_seconds} seconds."
)
return file
| AsyncFiles |
python | pytorch__pytorch | test/test_autograd.py | {
"start": 379190,
"end": 392359
} | class ____(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.clone()
@staticmethod
def forward(ctx, gO):
return gO.clone()
def get_out():
inp = torch.rand(2, requires_grad=True)
# The python function is first so that it runs
# last in the backward pass
right = Foo.apply(inp)
# An op that creates new memory
left1 = inp.clone()
# An op that saves its input
left2 = left1 ** 2
# Inplace modify so that the backward for
# left2 always raises an error
left1 += 1
# An op that takes both side as input.
# After running, both side's last op will be in
# the ready queue
# And the op for left will run first as it was
# executed last during the forward
out = left2 + right
return out
# Nothing should be global variables here as, from what
# I can see, python leaks all the global objects
get_out().sum().backward()
# This used to deadlock when the PyNode is being destroyed after
# the error is raised.
"""
try:
subprocess.check_output(
[sys.executable, "-c", script],
stderr=subprocess.STDOUT,
# On Windows, opening the subprocess with the default CWD makes `import torch`
# fail, so just set CWD to this script's directory
cwd=os.path.dirname(os.path.realpath(__file__)),
# It is ok to have an extra long timeout here as a timeout means the test failed
timeout=20,
)
except subprocess.TimeoutExpired as e:
self.fail(
msg="Example code timed out! See the code sample in the test for details."
)
except subprocess.CalledProcessError as e:
if e.returncode < 0:
# Sometimes we segfault instead of deadlocking
self.fail("Subprocess exited with a fatal signal")
else:
err_msg = (
"RuntimeError: one of the variables needed for gradient computation"
)
self.assertTrue(err_msg in e.output.decode("utf-8"))
def test_view_func_replay(self):
with torch.autograd._force_original_view_tracking(True):
def _assert_match_metadata(a, b):
self.assertEqual(a.size(), b.size())
self.assertEqual(a.stride(), b.stride())
self.assertEqual(a.storage_offset(), b.storage_offset())
self.assertEqual(a.device, b.device)
self.assertEqual(a.dtype, b.dtype)
def _test_fn(fn, inp, *args, use_unsafe_view_func=False):
outs = fn(inp, *args)
# handle functions that return multiple views (e.g. split)
if isinstance(outs, torch.Tensor):
outs = [outs]
for out in outs:
self.assertTrue(out._is_view())
self.assertTrue(out._base is inp)
# forward view_func
new_inp = inp.clone()
_assert_match_metadata(new_inp, inp)
if use_unsafe_view_func:
new_out = out._view_func_unsafe(new_inp)
else:
new_out = out._view_func(new_inp)
_assert_match_metadata(new_out, out)
self.assertEqual(new_out, out)
# reverse view_func
new_out = out.detach()
new_inp = out._rev_view_func_unsafe(new_out)
_assert_match_metadata(new_inp, inp)
self.assertTrue(new_inp._is_view())
self.assertTrue(new_inp._base is new_out)
# test individual view ops
_test_fn(torch.ops.aten.alias.default, torch.rand(2, 2))
_test_fn(torch.as_strided, torch.rand(2, 2), (4,), (1,))
_test_fn(torch.chunk, torch.rand(2, 4), 2, -1)
_test_fn(torch.diagonal, torch.rand(4, 4))
_test_fn(torch.ops.aten.expand.default, torch.rand(4, 1), (-1, 3))
_test_fn(torch.narrow, torch.rand(2, 2), 0, 1, 1)
_test_fn(torch.permute, torch.rand(2, 3, 4), (1, 0, 2))
_test_fn(torch.select, torch.rand(2, 2), 0, 0)
_test_fn(torch.ops.aten.slice.Tensor, torch.rand(2, 2), 1, 1, 2)
_test_fn(torch.split, torch.rand(2, 2), 1)
_test_fn(torch.split_with_sizes, torch.rand(2, 4), [1, 3], -1)
_test_fn(torch.squeeze, torch.rand(2, 1, 4))
_test_fn(torch.squeeze, torch.rand(2, 1, 4), 1)
_test_fn(torch.squeeze, torch.rand(2, 1, 1, 4), [1, 2])
_test_fn(torch.t, torch.rand(2, 4))
_test_fn(torch.transpose, torch.rand(2, 4), 0, 1)
_test_fn(torch.unbind, torch.rand(1, 5))
_test_fn(torch.ops.aten.unfold.default, torch.rand(1, 5), 1, 3, 2)
_test_fn(torch.unsqueeze, torch.rand(2, 4), -2)
_test_fn(torch.ops.aten.view.default, torch.rand(2, 10), (-1, 5, 2))
_test_fn(torch.view_as_complex, torch.rand(2, 2))
_test_fn(torch.view_as_real, torch.rand(2, 2, dtype=torch.cfloat))
# test view chains
_test_fn(
lambda x: x.unsqueeze(-1).transpose(-1, -2).squeeze(1),
torch.randn(2, 4),
)
_test_fn(
lambda x: x.chunk(2, -1)[0].transpose(0, 1).unsqueeze(-1),
torch.randn(2, 3, 4),
)
_test_fn(
lambda x: x.split_with_sizes([1, 3], -1)[0].chunk(2, 0),
torch.randn(2, 3, 4),
)
# chains with missing view_func()s use as_strided() to cover the gaps
def chain_with_only_parent_view_func(x):
with torch.autograd._force_original_view_tracking(True):
x = x.split_with_sizes([1, 3], -1)[0]
with torch.autograd._force_original_view_tracking(False):
x = x.chunk(2, 0)
return x
_test_fn(chain_with_only_parent_view_func, torch.randn(2, 3, 4))
def chain_with_only_current_view_func(x):
with torch.autograd._force_original_view_tracking(False):
x = x.split_with_sizes([1, 3], -1)[0]
with torch.autograd._force_original_view_tracking(True):
x = x.chunk(2, 0)
return x
_test_fn(chain_with_only_current_view_func, torch.randn(2, 3, 4))
# TODO: Move this somewhere else
# test NT views
from torch.nested._internal.nested_tensor import (
nested_view_from_values_offsets,
)
values = torch.randn(10, 5)
offsets = torch.tensor([0, 3, 6, 10])
_test_fn(nested_view_from_values_offsets, values, offsets)
nt = nested_view_from_values_offsets(values, offsets).detach().clone()
_test_fn(
torch.ops.aten._nested_get_values.default, nt, use_unsafe_view_func=True
)
def chain_nt_to_dense_back_and_forth(nt):
# NJT1 -> dense -> NJT2 -> dense
offsets2 = nt.offsets().detach().clone()
return nested_view_from_values_offsets(nt.values(), offsets2).values()
_test_fn(chain_nt_to_dense_back_and_forth, nt, use_unsafe_view_func=True)
def chain_dense_to_nt_back_and_forth(values, offsets):
offsets2 = offsets.detach().clone()
# dense -> NJT1 -> dense -> NJT2
return nested_view_from_values_offsets(
nested_view_from_values_offsets(values, offsets).values(), offsets2
)
_test_fn(
chain_dense_to_nt_back_and_forth,
values,
offsets,
use_unsafe_view_func=True,
)
def test_view_func_replay_with_modified_state(self):
with torch.autograd._force_original_view_tracking(True):
base = torch.randn(3, 4, 5)
view = base.select(1, 2)
def symint_visitor_fn(x):
# modify saved index
return x + 1
# ensure modifying state changes view replay
new_base = torch.randn_like(base)
new_view = view._view_func(new_base, symint_visitor_fn=symint_visitor_fn)
self.assertEqual(new_view, new_base.select(1, 3))
# ensure saved state reverts back afterwards
self.assertEqual(view._view_func(new_base), new_base.select(1, 2))
# check modifying tensor state. currently, slice_inverse() is the only
# view that saves a tensor
base = torch.randn(3, 4, 5)
sliced = base[:, 2:3, :].detach()
view = torch.ops.aten.slice_inverse(sliced, base, 1, 2, 3, 1)
replacement_shape = (1, 2, 3)
def tensor_visitor_fn(x):
# return tensor with a smaller shape than the saved one
return torch.randn(*replacement_shape)
# ensure modifying state changes view replay
new_sliced = torch.ones_like(base)[:, 2:3, :].detach()
new_view = view._view_func(new_sliced, tensor_visitor_fn=tensor_visitor_fn)
self.assertEqual(new_view.shape, replacement_shape)
self.assertEqual(
new_view, new_sliced.as_strided(replacement_shape, (6, 3, 1))
)
# ensure saved state reverts back afterwards
self.assertEqual(view._view_func(sliced), base)
def test_setup_context_when_forward_has_default_args(self):
class PowFunction(Function):
@staticmethod
def forward(x, y=3):
return torch.pow(x, y)
@staticmethod
def setup_context(ctx, inputs, output):
x, y = inputs
ctx.save_for_backward(x)
ctx.y = y
@staticmethod
def backward(ctx, gO):
(x,) = ctx.saved_tensors
y = ctx.y
return gO * y * torch.pow(x, y - 1), None
class PowFunctionWithClassmethod(Function):
@classmethod
def forward(cls, x, y=3):
return torch.pow(x, y)
@classmethod
def setup_context(cls, ctx, inputs, output):
x, y = inputs
ctx.save_for_backward(x)
ctx.y = y
@classmethod
def backward(cls, ctx, gO):
(x,) = ctx.saved_tensors
y = ctx.y
return gO * y * torch.pow(x, y - 1), None
x = torch.tensor(2.0, requires_grad=True)
y = torch.tensor(8.0)
y_expected = torch.tensor(12.0)
y1 = PowFunction.apply(x)
(y1_expected,) = torch.autograd.grad(y1, x)
y2 = PowFunctionWithClassmethod.apply(x)
(y2_expected,) = torch.autograd.grad(y2, x)
self.assertEqual(y, y1)
self.assertEqual(y_expected, y1_expected)
self.assertEqual(y, y2)
self.assertEqual(y_expected, y2_expected)
@unittest.skipIf(not TEST_CUDA, "test requires CUDA")
def test_gradcheck_default_device_placement_context(self):
# During gradcheck with fast_mode=True, we create a random vector on the CPU device using a CPU generator.
# This test ensures that this still works when the default device is set to something else by the user.
with torch.device("cuda"):
x = torch.randn(3, dtype=torch.double, requires_grad=True)
def func(inp):
return inp**2.0
self.assertTrue(gradcheck(func, x, fast_mode=True))
def test_grad_thread_safety(self):
import threading
from concurrent.futures import ThreadPoolExecutor
NUM_ITERS = 10
NUM_THREADS = 4
# Concurrent calls to tensor.untyped_storage()
def access_grad(tensor, barrier):
barrier.wait()
return weakref.ref(tensor.grad)
for i in range(NUM_ITERS):
tensor = torch.tensor([1.0, 2.0, 3.0], requires_grad=True)
(tensor**2).sum().backward()
barrier = threading.Barrier(NUM_THREADS)
with ThreadPoolExecutor(max_workers=NUM_THREADS) as executor:
futures = [
executor.submit(access_grad, tensor, barrier)
for _ in range(NUM_THREADS)
]
# Check that all the grad tensors returned were the same
for future in futures:
self.assertEqual(future.result()(), tensor.grad)
self.assertIsNotNone(tensor.grad)
def index_perm_variable(shape, max_indices):
if not isinstance(shape, tuple):
shape = (shape,)
index = torch.randperm(max_indices).narrow(0, 0, reduce(mul, shape)).view(shape)
return index
def bernoulli_scalar():
return torch.tensor(0, dtype=torch.uint8).bernoulli_()
| Foo |
python | doocs__leetcode | solution/3500-3599/3567.Minimum Absolute Difference in Sliding Submatrix/Solution.py | {
"start": 0,
"end": 590
} | class ____:
def minAbsDiff(self, grid: List[List[int]], k: int) -> List[List[int]]:
m, n = len(grid), len(grid[0])
ans = [[0] * (n - k + 1) for _ in range(m - k + 1)]
for i in range(m - k + 1):
for j in range(n - k + 1):
nums = []
for x in range(i, i + k):
for y in range(j, j + k):
nums.append(grid[x][y])
nums.sort()
d = min((abs(a - b) for a, b in pairwise(nums) if a != b), default=0)
ans[i][j] = d
return ans
| Solution |
python | django__django | tests/admin_views/test_adminsite.py | {
"start": 407,
"end": 782
} | class ____(admin.AdminSite):
site_title = "Custom title"
site_header = "Custom site"
custom_site = CustomAdminSite(name="test_custom_adminsite")
custom_site.register(User)
urlpatterns = [
path("test_admin/admin/", site.urls),
path("test_custom_admin/admin/", custom_site.urls),
]
@override_settings(ROOT_URLCONF="admin_views.test_adminsite")
| CustomAdminSite |
python | getsentry__sentry | tests/sentry/integrations/msteams/test_action_state_change.py | {
"start": 1262,
"end": 15989
} | class ____(APITestCase):
def setUp(self) -> None:
super().setUp()
self.user = self.create_user(is_superuser=False)
owner = self.create_user()
self.org = self.create_organization(owner=owner)
self.team = self.create_team(organization=self.org, members=[self.user])
with assume_test_silo_mode(SiloMode.CONTROL):
self.integration = self.create_provider_integration(
provider="msteams",
name="Fellowship of the Ring",
external_id="f3ll0wsh1p",
metadata={
"service_url": "https://smba.trafficmanager.net/amer",
"access_token": "y0u_5h4ll_n07_p455",
"expires_at": int(time.time()) + 86400,
},
)
self.create_organization_integration(
organization_id=self.org.id, integration=self.integration
)
self.idp = self.create_identity_provider(type="msteams", external_id="f3ll0wsh1p")
self.identity = Identity.objects.create(
external_id="g4nd4lf",
idp=self.idp,
user=self.user,
status=IdentityStatus.VALID,
scopes=[],
)
self.project1 = self.create_project(organization=self.org)
self.event1 = self.store_event(
data={"message": "oh no"},
project_id=self.project1.id,
)
assert self.event1.group is not None
self.group1 = self.event1.group
def post_webhook(
self,
action_type: str = "dummy",
user_id: str = "g4nd4lf",
team_id: str = "f3ll0wsh1p",
tenant_id: str = "m17hr4nd1r",
conversation_type: str = "channel",
channel_id: str | None = None,
group_id: str | None = None,
resolve_input: str | None = None,
archive_input: str | None = None,
assign_input: str | None = None,
) -> Response:
replyToId = "12345"
channel_data = {"tenant": {"id": tenant_id}}
if conversation_type == "channel":
conversation_id = channel_id if channel_id else team_id
channel_data["team"] = {"id": team_id}
channel_data["channel"] = {"id": conversation_id}
else:
conversation_id = "user_conversation_id"
responses.add(
method=responses.PUT,
url="https://smba.trafficmanager.net/amer/v3/conversations/%s/activities/%s"
% (conversation_id, replyToId),
json={},
)
payload = {
"type": "message",
"from": {"id": user_id},
"channelData": channel_data,
"conversation": {"conversationType": conversation_type, "id": conversation_id},
"value": {
"payload": {
"groupId": group_id or self.group1.id,
"eventId": self.event1.event_id,
"actionType": action_type,
"rules": [],
"integrationId": self.integration.id,
},
"resolveInput": resolve_input,
"archiveInput": archive_input,
"assignInput": assign_input,
},
"replyToId": replyToId,
}
webhook_url = reverse("sentry-integration-msteams-webhooks")
return self.client.post(webhook_url, data=payload)
@patch("sentry.integrations.msteams.webhook.verify_signature", return_vaue=True)
@patch("sentry.integrations.msteams.link_identity.sign")
@responses.activate
def test_ask_linking(self, sign: MagicMock, verify: MagicMock) -> None:
sign.return_value = "signed_parameters"
def user_conversation_id_callback(
request: PreparedRequest,
) -> tuple[int, dict[str, str], str]:
assert request.body is not None
payload = orjson.loads(request.body)
if payload["members"] == [{"id": "s4ur0n"}] and payload["channelData"] == {
"tenant": {"id": "7h3_gr347"}
}:
return 200, {}, orjson.dumps({"id": "d4rk_l0rd"}).decode()
return 404, {}, orjson.dumps({}).decode()
responses.add_callback(
method=responses.POST,
url="https://smba.trafficmanager.net/amer/v3/conversations",
callback=user_conversation_id_callback,
)
responses.add(
method=responses.POST,
url="https://smba.trafficmanager.net/amer/v3/conversations/d4rk_l0rd/activities",
status=200,
json={},
)
resp = self.post_webhook(user_id="s4ur0n", tenant_id="7h3_gr347")
# assert sign is called with the right arguments
assert sign.call_args.kwargs == {
"salt": SALT,
"integration_id": self.integration.id,
"organization_id": self.org.id,
"teams_user_id": "s4ur0n",
"team_id": "f3ll0wsh1p",
"tenant_id": "7h3_gr347",
}
linking_url = build_linking_url(
self.integration, self.org, "s4ur0n", "f3ll0wsh1p", "7h3_gr347"
)
data = orjson.loads(responses.calls[1].request.body)
assert resp.status_code == 201
assert "attachments" in data
assert data["attachments"][0]["content"] == build_linking_card(linking_url)
@responses.activate
@patch("sentry.integrations.msteams.webhook.verify_signature", return_value=True)
def test_archive_issue(self, verify: MagicMock) -> None:
resp = self.post_webhook(action_type=ACTION_TYPE.ARCHIVE, archive_input="-1")
self.group1 = Group.objects.get(id=self.group1.id)
assert resp.status_code == 200, resp.content
assert self.group1.get_status() == GroupStatus.IGNORED
@responses.activate
@patch("sentry.integrations.msteams.webhook.verify_signature", return_value=True)
def test_no_archive_input(self, verify: MagicMock) -> None:
resp = self.post_webhook(action_type=ACTION_TYPE.ARCHIVE, archive_input="")
self.group1 = Group.objects.get(id=self.group1.id)
assert resp.status_code == 200, resp.content
assert self.group1.get_status() == GroupStatus.UNRESOLVED
@responses.activate
@patch("sentry.integrations.msteams.webhook.verify_signature", return_value=True)
def test_archive_issue_with_additional_user_auth(self, verify: MagicMock) -> None:
with assume_test_silo_mode(SiloMode.CONTROL):
auth_idp = AuthProvider.objects.create(organization_id=self.org.id, provider="nobody")
AuthIdentity.objects.create(auth_provider=auth_idp, user=self.user)
resp = self.post_webhook(action_type=ACTION_TYPE.ARCHIVE, archive_input="-1")
self.group1 = Group.objects.get(id=self.group1.id)
assert resp.status_code == 200, resp.content
assert self.group1.get_status() == GroupStatus.IGNORED
@responses.activate
@patch.object(ApiClient, "put")
@patch("sentry.integrations.msteams.webhook.verify_signature", return_value=True)
def test_archive_with_params(self, verify: MagicMock, client_put: MagicMock) -> None:
client_put.return_value = HttpResponse(status=200)
self.post_webhook(action_type=ACTION_TYPE.ARCHIVE, archive_input="100")
expected_data = {"status": "ignored", "statusDetails": {"ignoreCount": 100}}
assert_mock_called_once_with_partial(client_put, data=expected_data)
@responses.activate
@patch("sentry.integrations.msteams.webhook.verify_signature", return_value=True)
def test_assign_to_team(self, verify: MagicMock) -> None:
resp = self.post_webhook(
action_type=ACTION_TYPE.ASSIGN, assign_input=f"team:{self.team.id}"
)
assert resp.status_code == 200, resp.content
assert GroupAssignee.objects.filter(group=self.group1, team=self.team).exists()
activity = Activity.objects.get(group=self.group1)
assert activity.data == {
"assignee": str(self.team.id),
"assigneeEmail": None,
"assigneeName": self.team.name,
"assigneeType": "team",
"integration": ActivityIntegration.MSTEAMS.value,
}
@responses.activate
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
@patch("sentry.integrations.msteams.webhook.verify_signature", return_value=True)
def test_assign_to_me(self, verify: MagicMock, mock_record: MagicMock) -> None:
resp = self.post_webhook(action_type=ACTION_TYPE.ASSIGN, assign_input="ME")
assert resp.status_code == 200, resp.content
assert GroupAssignee.objects.filter(group=self.group1, user_id=self.user.id).exists()
assert b"Unassign" in responses.calls[0].request.body
assert f"Assigned to {self.user.email}".encode() in responses.calls[0].request.body
activity = Activity.objects.get(group=self.group1)
assert activity.data == {
"assignee": str(self.user.id),
"assigneeEmail": self.user.email,
"assigneeName": self.user.name,
"assigneeType": "user",
"integration": ActivityIntegration.MSTEAMS.value,
}
assert_slo_metric(mock_record, EventLifecycleOutcome.SUCCESS)
@responses.activate
@patch("sentry.integrations.msteams.webhook.verify_signature", return_value=True)
def test_assign_to_me_personal_message(self, verify: MagicMock) -> None:
resp = self.post_webhook(
action_type=ACTION_TYPE.ASSIGN, assign_input="ME", conversation_type="personal"
)
assert resp.status_code == 200, resp.content
assert GroupAssignee.objects.filter(group=self.group1, user_id=self.user.id).exists()
assert b"Unassign" in responses.calls[0].request.body
assert "user_conversation_id" in responses.calls[0].request.url
assert f"Assigned to {self.user.email}".encode() in responses.calls[0].request.body
@responses.activate
@patch("sentry.integrations.msteams.webhook.verify_signature", return_value=True)
def test_assign_to_me_channel_message(self, verify: MagicMock) -> None:
resp = self.post_webhook(
action_type=ACTION_TYPE.ASSIGN, assign_input="ME", channel_id="some_channel_id"
)
assert resp.status_code == 200, resp.content
assert GroupAssignee.objects.filter(group=self.group1, user_id=self.user.id).exists()
assert b"Unassign" in responses.calls[0].request.body
assert "some_channel_id" in responses.calls[0].request.url
assert f"Assigned to {self.user.email}".encode() in responses.calls[0].request.body
@responses.activate
@patch("sentry.integrations.msteams.webhook.verify_signature", return_value=True)
def test_assign_to_me_multiple_identities(self, verify: MagicMock) -> None:
org2 = self.create_organization(owner=None)
with assume_test_silo_mode(SiloMode.CONTROL):
integration2 = self.create_provider_integration(
provider="msteams",
name="Army of Mordor",
external_id="54rum4n",
metadata={
"service_url": "https://smba.trafficmanager.net/amer",
"access_token": "y0u_h4v3_ch053n_d347h",
"expires_at": int(time.time()) + 86400,
},
)
self.create_organization_integration(organization_id=org2.id, integration=integration2)
idp2 = self.create_identity_provider(type="msteams", external_id="54rum4n")
Identity.objects.create(
external_id="7h3_gr3y",
idp=idp2,
user=self.user,
status=IdentityStatus.VALID,
scopes=[],
)
resp = self.post_webhook(action_type=ACTION_TYPE.ASSIGN, assign_input="ME")
assert resp.status_code == 200, resp.content
assert GroupAssignee.objects.filter(group=self.group1, user_id=self.user.id).exists()
@responses.activate
@patch("sentry.integrations.msteams.webhook.verify_signature", return_value=True)
def test_resolve_issue(self, verify: MagicMock) -> None:
resp = self.post_webhook(action_type=ACTION_TYPE.RESOLVE, resolve_input="resolved")
self.group1 = Group.objects.get(id=self.group1.id)
assert resp.status_code == 200, resp.content
assert self.group1.get_status() == GroupStatus.RESOLVED
assert b"Unresolve" in responses.calls[0].request.body
@responses.activate
@patch("sentry.integrations.msteams.webhook.verify_signature", return_value=True)
def test_no_resolve_input(self, verify: MagicMock) -> None:
resp = self.post_webhook(action_type=ACTION_TYPE.RESOLVE, resolve_input="")
self.group1 = Group.objects.get(id=self.group1.id)
assert resp.status_code == 200, resp.content
assert self.group1.get_status() == GroupStatus.UNRESOLVED
assert b"Resolve" in responses.calls[0].request.body
@responses.activate
@patch("sentry.integrations.msteams.webhook.verify_signature", return_value=True)
def test_unassign_issue(self, verify: MagicMock) -> None:
GroupAssignee.objects.create(group=self.group1, project=self.project1, user_id=self.user.id)
resp = self.post_webhook(action_type=ACTION_TYPE.UNASSIGN, resolve_input="resolved")
self.group1 = Group.objects.get(id=self.group1.id)
assert resp.status_code == 200, resp.content
assert not GroupAssignee.objects.filter(group=self.group1, user_id=self.user.id).exists()
assert b"Assign" in responses.calls[0].request.body
@responses.activate
@patch.object(ApiClient, "put")
@patch("sentry.integrations.msteams.webhook.verify_signature", return_value=True)
def test_resolve_with_params(self, verify: MagicMock, client_put: MagicMock) -> None:
client_put.return_value = HttpResponse(status=200)
self.post_webhook(
action_type=ACTION_TYPE.RESOLVE, resolve_input="resolved:inCurrentRelease"
)
expected_data = {"status": "resolved", "statusDetails": {"inRelease": "latest"}}
assert_mock_called_once_with_partial(client_put, data=expected_data)
@responses.activate
@patch("sentry.integrations.msteams.webhook.verify_signature", return_value=True)
def test_no_integration(self, verify: MagicMock) -> None:
with assume_test_silo_mode(SiloMode.CONTROL):
self.integration.delete()
resp = self.post_webhook()
assert resp.status_code == 404
| StatusActionTest |
python | PyCQA__pylint | tests/functional/a/assignment/assignment_from_no_return.py | {
"start": 306,
"end": 677
} | class ____:
def some_method(self):
pass
@decorate
def some_other_decorated_method(self):
pass
def some_other_method(self):
value = self.some_method() # [assignment-from-no-return]
other_value = self.some_other_decorated_method()
return value + other_value
VALUE = some_func() # [assignment-from-no-return]
| Class |
python | falconry__falcon | tests/test_default_router.py | {
"start": 3677,
"end": 25146
} | class ____:
def __init__(self, times, eggs=False):
self._times = times
self._eggs = eggs
def convert(self, fragment):
item = fragment
if self._eggs:
item += '&eggs'
return ', '.join(item for i in range(self._times))
# =====================================================================
# Regression tests for use cases reported by users
# =====================================================================
def test_user_regression_versioned_url():
router = DefaultRouter()
router.add_route('/{version}/messages', ResourceWithId(2))
resource, __, __, __ = router.find('/v2/messages')
assert resource.resource_id == 2
router.add_route('/v2', ResourceWithId(1))
resource, __, __, __ = router.find('/v2')
assert resource.resource_id == 1
resource, __, __, __ = router.find('/v2/messages')
assert resource.resource_id == 2
resource, __, __, __ = router.find('/v1/messages')
assert resource.resource_id == 2
route = router.find('/v1')
assert route is None
def test_user_regression_recipes():
router = DefaultRouter()
router.add_route('/recipes/{activity}/{type_id}', ResourceWithId(1))
router.add_route('/recipes/baking', ResourceWithId(2))
resource, __, __, __ = router.find('/recipes/baking/4242')
assert resource.resource_id == 1
resource, __, __, __ = router.find('/recipes/baking')
assert resource.resource_id == 2
route = router.find('/recipes/grilling')
assert route is None
@pytest.mark.parametrize(
'uri_template,path,expected_params',
[
(
'/serviceRoot/People|{field}',
'/serviceRoot/People|susie',
{'field': 'susie'},
),
(
'/serviceRoot/People[{field}]',
"/serviceRoot/People['calvin']",
{'field': "'calvin'"},
),
(
'/serviceRoot/People({field})',
"/serviceRoot/People('hobbes')",
{'field': "'hobbes'"},
),
(
'/serviceRoot/People({field})',
"/serviceRoot/People('hob)bes')",
{'field': "'hob)bes'"},
),
(
'/serviceRoot/People({field})(z)',
'/serviceRoot/People(hobbes)(z)',
{'field': 'hobbes'},
),
(
"/serviceRoot/People('{field}')",
"/serviceRoot/People('rosalyn')",
{'field': 'rosalyn'},
),
('/^{field}', '/^42', {'field': '42'}),
('/+{field}', '/+42', {'field': '42'}),
(
'/foo/{first}_{second}/bar',
'/foo/abc_def_ghijk/bar',
# NOTE(kgriffs): The regex pattern is greedy, so this is
# expected. We can not change this behavior in a minor
# release, since it would be a breaking change. If there
# is enough demand for it, we could introduce an option
# to toggle this behavior.
{'first': 'abc_def', 'second': 'ghijk'},
),
# NOTE(kgriffs): Why someone would use a question mark like this
# I have no idea (esp. since it would have to be encoded to avoid
# being mistaken for the query string separator). Including it only
# for completeness.
('/items/{x}?{y}', '/items/1080?768', {'x': '1080', 'y': '768'}),
('/items/{x}|{y}', '/items/1080|768', {'x': '1080', 'y': '768'}),
('/items/{x},{y}', '/items/1080,768', {'x': '1080', 'y': '768'}),
('/items/{x}^^{y}', '/items/1080^^768', {'x': '1080', 'y': '768'}),
('/items/{x}*{y}*', '/items/1080*768*', {'x': '1080', 'y': '768'}),
('/thing-2/something+{field}+', '/thing-2/something+42+', {'field': '42'}),
(
'/thing-2/something*{field}/notes',
'/thing-2/something*42/notes',
{'field': '42'},
),
(
'/thing-2/something+{field}|{q}/notes',
'/thing-2/something+else|z/notes',
{'field': 'else', 'q': 'z'},
),
(
"serviceRoot/$metadata#Airports('{field}')/Name",
"serviceRoot/$metadata#Airports('KSFO')/Name",
{'field': 'KSFO'},
),
],
)
def test_user_regression_special_chars(uri_template, path, expected_params):
router = DefaultRouter()
router.add_route(uri_template, ResourceWithId(1))
route = router.find(path)
assert route is not None
resource, __, params, __ = route
assert resource.resource_id == 1
assert params == expected_params
# =====================================================================
# Other tests
# =====================================================================
@pytest.mark.parametrize('uri_template', [{}, set(), object()])
def test_not_str(asgi, util, uri_template):
app = util.create_app(asgi)
with pytest.raises(TypeError):
app.add_route(uri_template, ResourceWithId(-1))
def test_root_path():
router = DefaultRouter()
router.add_route('/', ResourceWithId(42))
resource, __, __, __ = router.find('/')
assert resource.resource_id == 42
expected_src = textwrap.dedent(
"""
def find(path, return_values, patterns, converters, params):
path_len = len(path)
if path_len > 0:
if path[0] == '':
if path_len == 1:
return return_values[0]
return None
return None
return None
"""
).strip()
assert router.finder_src == expected_src
@pytest.mark.parametrize(
'uri_template',
[
'/{field}{field}',
'/{field}...{field}',
'/{field}/{another}/{field}',
'/{field}/something/something/{field}/something',
],
)
def test_duplicate_field_names(uri_template):
router = DefaultRouter()
with pytest.raises(ValueError):
router.add_route(uri_template, ResourceWithId(1))
@pytest.mark.parametrize(
'uri_template,path',
[
('/items/thing', '/items/t'),
('/items/{x}|{y}|', '/items/1080|768'),
('/items/{x}*{y}foo', '/items/1080*768foobar'),
('/items/{x}*768*', '/items/1080*768***'),
],
)
def test_match_entire_path(uri_template, path):
router = DefaultRouter()
router.add_route(uri_template, ResourceWithId(1))
route = router.find(path)
assert route is None
@pytest.mark.parametrize(
'uri_template',
[
'/teams/{conflict}', # simple vs simple
'/emojis/signs/{id_too}', # another simple vs simple
'/repos/{org}/{repo}/compare/{complex}:{vs}...{complex2}:{conflict}',
'/teams/{id:int}/settings', # converted vs. non-converted
],
)
def test_conflict(router, uri_template):
with pytest.raises(ValueError):
router.add_route(uri_template, ResourceWithId(-1))
@pytest.mark.parametrize(
'uri_template',
[
'/repos/{org}/{repo}/compare/{simple_vs_complex}',
'/repos/{complex}.{vs}.{simple}',
'/repos/{org}/{repo}/compare/{complex}:{vs}...{complex2}/full',
],
)
def test_non_conflict(router, uri_template):
router.add_route(uri_template, ResourceWithId(-1))
@pytest.mark.parametrize(
'uri_template',
[
# Missing field name
'/{}',
'/repos/{org}/{repo}/compare/{}',
'/repos/{complex}.{}.{thing}',
# Field names must be valid Python identifiers
'/{9v}',
'/{524hello}/world',
'/hello/{1world}',
'/repos/{complex}.{9v}.{thing}/etc',
'/{*kgriffs}',
'/{@kgriffs}',
'/repos/{complex}.{v}.{@thing}/etc',
'/{-kgriffs}',
'/repos/{complex}.{-v}.{thing}/etc',
'/repos/{simple-thing}/etc',
# Neither fields nor literal segments may not contain whitespace
'/this and that',
'/this\tand\tthat/this\nand\nthat/{thing }/world',
'/{thing\t}/world',
'/{\nthing}/world',
'/{th\ving}/world',
'/{ thing}/world',
'/{ thing }/world',
'/{thing}/wo rld',
'/{thing} /world',
'/repos/{or g}/{repo}/compare/{thing}',
'/repos/{org}/{repo}/compare/{th\ting}',
],
)
def test_invalid_field_name(router, uri_template):
with pytest.raises(ValueError):
router.add_route(uri_template, ResourceWithId(-1))
def test_print_src(router):
"""Diagnostic test that simply prints the router's find() source code.
Example:
$ tox -e py3_debug -- -k test_print_src -s
"""
print('\n\n' + router.finder_src + '\n')
def test_override(router):
router.add_route('/emojis/signs/0', ResourceWithId(-1))
resource, __, __, __ = router.find('/emojis/signs/0')
assert resource.resource_id == -1
def test_literal_segment(router):
resource, __, __, __ = router.find('/emojis/signs/0')
assert resource.resource_id == 12
resource, __, __, __ = router.find('/emojis/signs/1')
assert resource.resource_id == 13
resource, __, __, __ = router.find('/emojis/signs/42')
assert resource.resource_id == 14
resource, __, __, __ = router.find('/emojis/signs/42/small.jpg')
assert resource.resource_id == 23
route = router.find('/emojis/signs/1/small')
assert route is None
@pytest.mark.parametrize(
'path',
[
'/teams',
'/emojis/signs',
'/gists',
'/gists/42',
],
)
def test_dead_segment(router, path):
route = router.find(path)
assert route is None
@pytest.mark.parametrize(
'path',
[
'/repos/racker/falcon/compare/foo',
'/repos/racker/falcon/compare/foo/full',
],
)
def test_malformed_pattern(router, path):
route = router.find(path)
assert route is None
def test_literal(router):
resource, __, __, __ = router.find('/user/memberships')
assert resource.resource_id == 8
@pytest.mark.parametrize(
'path,expected_params',
[
('/cvt/teams/007', {'id': 7}),
('/cvt/teams/1234/members', {'id': 1234}),
('/cvt/teams/default/members/700-5', {'id': 700, 'tenure': 5}),
(
'/cvt/repos/org/repo/compare/xkcd:353',
{'org': 'org', 'repo': 'repo', 'usr0': 'xkcd', 'branch0': 353},
),
(
'/cvt/repos/org/repo/compare/gunmachan:1234...kumamon:5678/part',
{
'org': 'org',
'repo': 'repo',
'usr0': 'gunmachan',
'branch0': 1234,
'usr1': 'kumamon',
'branch1': 5678,
},
),
(
'/cvt/repos/xkcd/353/compare/susan:0001/full',
{'org': 'xkcd', 'repo': '353', 'usr0': 'susan', 'branch0': 1},
),
],
)
def test_converters(router, path, expected_params):
__, __, params, __ = router.find(path)
assert params == expected_params
@pytest.mark.parametrize(
'uri_template',
[
'/foo/{bar:int(0)}',
'/foo/{bar:int(num_digits=0)}',
'/foo/{bar:int(-1)}/baz',
'/foo/{bar:int(num_digits=-1)}/baz',
],
)
def test_converters_with_invalid_options(router, uri_template):
# NOTE(kgriffs): Sanity-check that errors are properly bubbled up
# when calling add_route(). Additional checks can be found
# in test_uri_converters.py
with pytest.raises(ValueError, match='Cannot instantiate converter') as e:
router.add_route(uri_template, ResourceWithId(1))
assert e.value.__cause__ is not None
@pytest.mark.parametrize(
'uri_template',
[
'/foo/{bar:}',
'/foo/{bar:unknown}/baz',
],
)
def test_converters_malformed_specification(router, uri_template):
with pytest.raises(ValueError):
router.add_route(uri_template, ResourceWithId(1))
def test_variable(router):
resource, __, params, __ = router.find('/teams/42')
assert resource.resource_id == 6
assert params == {'id': '42'}
__, __, params, __ = router.find('/emojis/signs/stop')
assert params == {'id': 'stop'}
__, __, params, __ = router.find('/gists/42/raw')
assert params == {'id': '42'}
__, __, params, __ = router.find('/images/42.gif')
assert params == {'id': '42'}
def test_single_character_field_name(router):
__, __, params, __ = router.find('/item/1234')
assert params == {'q': '1234'}
@pytest.mark.parametrize(
'path,expected_id',
[
('/teams/default', 19),
('/teams/default/members', 7),
('/cvt/teams/default', 31),
('/cvt/teams/default/members/1234-10', 32),
('/teams/1234', 6),
('/teams/1234/members', 7),
('/gists/first', 20),
('/gists/first/raw', 18),
('/gists/first/pdf', 21),
('/gists/1776/pdf', 21),
('/emojis/signs/78', 13),
('/emojis/signs/78/small.png', 24),
('/emojis/signs/78/small(png)', 25),
('/emojis/signs/78/small_png', 26),
],
)
def test_literal_vs_variable(router, path, expected_id):
resource, __, __, __ = router.find(path)
assert resource.resource_id == expected_id
@pytest.mark.parametrize(
'path',
[
# Misc.
'/this/does/not/exist',
'/user/bogus',
'/repos/racker/falcon/compare/johndoe:master...janedoe:dev/bogus',
# Literal vs variable (teams)
'/teams',
'/teams/42/members/undefined',
'/teams/42/undefined',
'/teams/42/undefined/segments',
'/teams/default/members/undefined',
'/teams/default/members/thing/undefined',
'/teams/default/members/thing/undefined/segments',
'/teams/default/undefined',
'/teams/default/undefined/segments',
# Literal vs. variable (converters)
'/cvt/teams/default/members', # 'default' can't be converted to an int
'/cvt/teams/NaN',
'/cvt/teams/default/members/NaN',
# Literal vs variable (emojis)
'/emojis/signs',
'/emojis/signs/0/small',
'/emojis/signs/0/undefined',
'/emojis/signs/0/undefined/segments',
'/emojis/signs/20/small',
'/emojis/signs/20/undefined',
'/emojis/signs/42/undefined',
'/emojis/signs/78/undefined',
],
)
def test_not_found(router, path):
route = router.find(path)
assert route is None
def test_subsegment_not_found(router):
route = router.find('/emojis/signs/0/x')
assert route is None
def test_multivar(router):
resource, __, params, __ = router.find('/repos/racker/falcon/commits')
assert resource.resource_id == 4
assert params == {'org': 'racker', 'repo': 'falcon'}
resource, __, params, __ = router.find('/repos/racker/falcon/compare/all')
assert resource.resource_id == 11
assert params == {'org': 'racker', 'repo': 'falcon'}
@pytest.mark.parametrize(
'url_postfix,resource_id',
[
('', 5),
('/full', 10),
('/part', 15),
],
)
def test_complex(router, url_postfix, resource_id):
uri = '/repos/racker/falcon/compare/johndoe:master...janedoe:dev'
resource, __, params, __ = router.find(uri + url_postfix)
assert resource.resource_id == resource_id
assert params == {
'org': 'racker',
'repo': 'falcon',
'usr0': 'johndoe',
'branch0': 'master',
'usr1': 'janedoe',
'branch1': 'dev',
}
@pytest.mark.parametrize(
'url_postfix,resource_id,expected_template',
[
('', 16, '/repos/{org}/{repo}/compare/{usr0}:{branch0}'),
('/full', 17, '/repos/{org}/{repo}/compare/{usr0}:{branch0}/full'),
],
)
def test_complex_alt(router, url_postfix, resource_id, expected_template):
uri = '/repos/falconry/falcon/compare/johndoe:master' + url_postfix
resource, __, params, uri_template = router.find(uri)
assert resource.resource_id == resource_id
assert params == {
'org': 'falconry',
'repo': 'falcon',
'usr0': 'johndoe',
'branch0': 'master',
}
assert uri_template == expected_template
def test_options_converters_set(router):
router.options.converters['spam'] = SpamConverter
router.add_route('/{food:spam(3, eggs=True)}', ResourceWithId(1))
resource, __, params, __ = router.find('/spam')
assert params == {'food': 'spam&eggs, spam&eggs, spam&eggs'}
@pytest.mark.parametrize('converter_name', ['spam', 'spam_2'])
def test_options_converters_update(router, converter_name):
router.options.converters.update(
{
'spam': SpamConverter,
'spam_2': SpamConverter,
}
)
template = '/{food:' + converter_name + '(3, eggs=True)}'
router.add_route(template, ResourceWithId(1))
resource, __, params, __ = router.find('/spam')
assert params == {'food': 'spam&eggs, spam&eggs, spam&eggs'}
@pytest.mark.parametrize(
'name',
[
'has whitespace',
'whitespace ',
' whitespace ',
' whitespace',
'funky$character',
'42istheanswer',
'with-hyphen',
],
)
def test_options_converters_invalid_name(router, name):
with pytest.raises(ValueError):
router.options.converters[name] = object
def test_options_converters_invalid_name_on_update(router):
with pytest.raises(ValueError):
router.options.converters.update(
{
'valid_name': SpamConverter,
'7eleven': SpamConverter,
}
)
@pytest.fixture
def param_router():
r = DefaultRouter()
r.add_route('/c/foo/{bar}/baz', ResourceWithId(1))
r.add_route('/c/{foo}/bar/other', ResourceWithId(2))
r.add_route('/c/foo/{a:int}-{b}/a', ResourceWithId(3))
r.add_route('/upload/{service}/auth/token', ResourceWithId(4))
r.add_route('/upload/youtube/{project_id}/share', ResourceWithId(5))
r.add_route('/x/y/{a}.{b}/z', ResourceWithId(6))
r.add_route('/x/{y}/o.o/w', ResourceWithId(7))
return r
@pytest.mark.parametrize(
'route, expected, num',
(
('/c/foo/arg/baz', {'bar': 'arg'}, 1),
('/c/foo/bar/other', {'foo': 'foo'}, 2),
('/c/foo/42-7/baz', {'bar': '42-7'}, 1),
('/upload/youtube/auth/token', {'service': 'youtube'}, 4),
('/x/y/o.o/w', {'y': 'y'}, 7),
),
)
def test_params_in_non_taken_branches(param_router, route, expected, num):
resource, __, params, __ = param_router.find(route)
assert resource.resource_id == num
assert params == expected
# capture path
def test_capture_path_no_children():
router = DefaultRouter()
router.add_route('/foo/{bar:path}', ResourceWithId(1))
res = router.finder_src
with pytest.raises(
ValueError,
match='Cannot add route with template "/foo/{bar:path}/child". '
'Field name "bar" uses the converter "path"',
):
router.add_route('/foo/{bar:path}/child', ResourceWithId(1))
with pytest.raises(
ValueError,
match='Cannot add route with template "/{bar:path}/child". '
'Field name "bar" uses the converter "path"',
):
router.add_route('/{bar:path}/child', ResourceWithId(1))
assert res == router.finder_src
@pytest.mark.parametrize(
'template',
(
'/foo/{bar:path}-x',
'/foo/x-{bar:path}',
'/foo/{x}-{bar:path}',
'/foo/{bar:path}-{x}',
),
)
def test_capture_path_complex(template):
router = DefaultRouter()
with pytest.raises(
ValueError, match='Cannot use converter "path" of variable "bar" in a template '
):
router.add_route(template, ResourceWithId(1))
@pytest.fixture
def capture_path_router():
router = DefaultRouter()
router.add_route('/foo/bar/baz', ResourceWithId(1))
router.add_route('/foo/{bar:path}', ResourceWithId(2))
router.add_route('/foo/bar/{foo:path}', ResourceWithId(3))
router.add_route('/{baz:path}', ResourceWithId(4))
router.add_route('/x/{v1:int}/{v2}/{other:path}', ResourceWithId(5))
router.add_route('/y/{v1:int}/{v2:int}/{other:path}', ResourceWithId(6))
return router
@pytest.mark.parametrize(
'route, expected, num',
(
('/foo/bar/baz', {}, 1),
('/foo/some/path/here', {'bar': 'some/path/here'}, 2),
('/foo/bar/bar', {'foo': 'bar'}, 3),
(
'/foo/bar/bar-1/2/3/4/5/5/6/7/8/98/9/0/-/9/',
{'foo': 'bar-1/2/3/4/5/5/6/7/8/98/9/0/-/9/'},
3,
),
('/x/1/2/3', {'v1': 1, 'v2': '2', 'other': '3'}, 5),
('/x/1/2/3/4/5/6', {'v1': 1, 'v2': '2', 'other': '3/4/5/6'}, 5),
('/upload/youtube/auth/token', {'baz': 'upload/youtube/auth/token'}, 4),
('/x/y/o.o/w', {'baz': 'x/y/o.o/w'}, 4),
('/foo', {'baz': 'foo'}, 4),
('/foo/', {'bar': ''}, 2),
('/foo/bar', {'bar': 'bar'}, 2),
('/foo/bar/', {'foo': ''}, 3),
('/foo/bar/baz/other', {'foo': 'baz/other'}, 3),
('/y/1/2/3', {'v1': 1, 'v2': 2, 'other': '3'}, 6),
('/y/1/a/3', {'baz': 'y/1/a/3'}, 4),
),
)
def test_capture_path(capture_path_router, route, expected, num):
resource, __, params, __ = capture_path_router.find(route)
assert resource.resource_id == num
assert params == expected
def test_capture_path_no_match():
router = DefaultRouter()
router.add_route('/foo/bar/baz', ResourceWithId(1))
router.add_route('/foo/{bar:path}', ResourceWithId(2))
router.add_route('/foo/bar/{foo:path}', ResourceWithId(3))
assert router.find('/foo') is None
| SpamConverter |
python | google__jax | jax/_src/shard_map.py | {
"start": 51223,
"end": 52471
} | class ____(Exception):
pass
def _match_spec(mesh: Mesh, check_vma, manual_axes, src_pspec: PartitionSpec,
dst_pspec: PartitionSpec, x: JaxType) -> JaxType:
fn = HashablePartial(_match, mesh, check_vma, manual_axes, src_pspec,
dst_pspec)
with core.eval_context(), api.disable_jit(False):
if set(mesh.axis_names) == manual_axes:
return api.jit(fn, out_shardings=NamedSharding(mesh, dst_pspec))(x)
return api.jit(fn)(x)
def _match(mesh, check_vma, manual_axes, src_pspec, dst_pspec, x):
return shard_map(_rem_singleton, mesh=mesh, in_specs=src_pspec,
out_specs=dst_pspec, check_vma=check_vma,
axis_names=manual_axes)(x)
def _rem_singleton(x): return lax.squeeze(x, [0])
def _add_singleton(x): return lax.expand_dims(x, [0])
def _maybe_check_special(outs):
if not config.debug_nans.value and not config.debug_infs.value: return
bufs = [s.data for leaf in tree_leaves(outs)
for s in getattr(leaf, 'addressable_shards', [])]
try:
dispatch.check_special('shard_map', bufs)
except api_util.InternalFloatingPointError as e:
raise FloatingPointError(f'Invalid value ({e.ty}) encountered in sharded computation.') from None
| _RepError |
python | mlflow__mlflow | tests/gateway/tools.py | {
"start": 519,
"end": 2625
} | class ____:
def __init__(self, config_path: str | Path, *args, **kwargs):
self.port = get_safe_port()
self.host = "localhost"
self.url = f"http://{self.host}:{self.port}"
self.workers = 2
self.process = subprocess.Popen(
[
sys.executable,
"-m",
"mlflow",
"gateway",
"start",
"--config-path",
config_path,
"--host",
self.host,
"--port",
str(self.port),
"--workers",
str(self.workers),
],
*args,
**kwargs,
)
self.wait_until_ready()
def wait_until_ready(self) -> None:
s = time.time()
while time.time() - s < 10:
try:
if self.get("health").ok:
return
except requests.exceptions.ConnectionError:
time.sleep(0.5)
raise Exception("Gateway failed to start")
def wait_reload(self) -> None:
"""
Should be called after we update a gateway config file in tests to ensure
that the gateway service has reloaded the config.
"""
time.sleep(self.workers)
def request(self, method: str, path: str, *args: Any, **kwargs: Any) -> requests.Response:
return requests.request(method, f"{self.url}/{path}", *args, **kwargs)
def get(self, path: str, *args: Any, **kwargs: Any) -> requests.Response:
return self.request("GET", path, *args, **kwargs)
def assert_health(self):
assert self.get("health").ok
def post(self, path: str, *args: Any, **kwargs: Any) -> requests.Response:
return self.request("POST", path, *args, **kwargs)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
kill_child_processes(self.process.pid)
self.process.terminate()
self.process.wait()
def save_yaml(path, conf):
path.write_text(yaml.safe_dump(conf))
| Gateway |
python | getsentry__sentry | src/sentry/utils/event_frames.py | {
"start": 937,
"end": 1062
} | class ____(Protocol):
def __call__(self, frame: EventFrame) -> str | None:
pass
@dataclass(frozen=True)
| FrameMunger |
python | apache__airflow | dev/breeze/src/airflow_breeze/utils/selective_checks.py | {
"start": 3861,
"end": 4987
} | class ____(Enum):
ENVIRONMENT_FILES = auto()
PYTHON_PRODUCTION_FILES = auto()
JAVASCRIPT_PRODUCTION_FILES = auto()
ALWAYS_TESTS_FILES = auto()
API_FILES = auto()
GIT_PROVIDER_FILES = auto()
STANDARD_PROVIDER_FILES = auto()
API_CODEGEN_FILES = auto()
HELM_FILES = auto()
DEPENDENCY_FILES = auto()
DOC_FILES = auto()
UI_FILES = auto()
SYSTEM_TEST_FILES = auto()
KUBERNETES_FILES = auto()
TASK_SDK_FILES = auto()
TASK_SDK_INTEGRATION_TEST_FILES = auto()
GO_SDK_FILES = auto()
AIRFLOW_CTL_FILES = auto()
ALL_PYPROJECT_TOML_FILES = auto()
ALL_PYTHON_FILES = auto()
ALL_SOURCE_FILES = auto()
ALL_AIRFLOW_PYTHON_FILES = auto()
ALL_AIRFLOW_CTL_PYTHON_FILES = auto()
ALL_PROVIDERS_PYTHON_FILES = auto()
ALL_PROVIDERS_DISTRIBUTION_CONFIG_FILES = auto()
ALL_DEV_PYTHON_FILES = auto()
ALL_DEVEL_COMMON_PYTHON_FILES = auto()
ALL_PROVIDER_YAML_FILES = auto()
TESTS_UTILS_FILES = auto()
ASSET_FILES = auto()
UNIT_TEST_FILES = auto()
DEVEL_TOML_FILES = auto()
UI_ENGLISH_TRANSLATION_FILES = auto()
| FileGroupForCi |
python | bokeh__bokeh | src/bokeh/events.py | {
"start": 8922,
"end": 9465
} | class ____(ModelEvent):
''' Announce a button click event on a Bokeh button widget.
'''
event_name = 'button_click'
def __init__(self, model: AbstractButton | None) -> None:
from .models.widgets import AbstractButton, ToggleButtonGroup
if model is not None and not isinstance(model, (AbstractButton, ToggleButtonGroup)):
clsname = self.__class__.__name__
raise ValueError(f"{clsname} event only applies to button and button group models")
super().__init__(model=model)
| ButtonClick |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_task_instances.py | {
"start": 76844,
"end": 95681
} | class ____(TestTaskInstanceEndpoint):
def test_should_respond_200(self, test_client, session):
self.create_task_instances(session, task_instances=[{"state": State.SUCCESS}], with_ti_history=True)
response = test_client.get(
"/dags/example_python_operator/dagRuns/TEST_DAG_RUN_ID/taskInstances/print_the_context/tries/1"
)
assert response.status_code == 200
assert response.json() == {
"dag_id": "example_python_operator",
"dag_display_name": "example_python_operator",
"duration": 10000.0,
"end_date": "2020-01-03T00:00:00Z",
"executor": None,
"executor_config": "{}",
"hostname": "",
"map_index": -1,
"max_tries": 0,
"operator": "PythonOperator",
"operator_name": "PythonOperator",
"pid": 100,
"pool": "default_pool",
"pool_slots": 1,
"priority_weight": 9,
"queue": "default_queue",
"queued_when": None,
"scheduled_when": None,
"start_date": "2020-01-02T00:00:00Z",
"state": "success",
"task_id": "print_the_context",
"task_display_name": "print_the_context",
"try_number": 1,
"unixname": getuser(),
"dag_run_id": "TEST_DAG_RUN_ID",
"dag_version": mock.ANY,
"hitl_detail": None,
}
@pytest.mark.parametrize("try_number", [1, 2])
def test_should_respond_200_with_different_try_numbers(self, test_client, try_number, session):
self.create_task_instances(session, task_instances=[{"state": State.SUCCESS}], with_ti_history=True)
response = test_client.get(
f"/dags/example_python_operator/dagRuns/TEST_DAG_RUN_ID/taskInstances/print_the_context/tries/{try_number}",
)
assert response.status_code == 200
assert response.json() == {
"dag_id": "example_python_operator",
"dag_display_name": "example_python_operator",
"duration": 10000.0,
"end_date": "2020-01-03T00:00:00Z",
"executor": None,
"executor_config": "{}",
"hostname": "",
"map_index": -1,
"max_tries": 0 if try_number == 1 else 1,
"operator": "PythonOperator",
"operator_name": "PythonOperator",
"pid": 100,
"pool": "default_pool",
"pool_slots": 1,
"priority_weight": 9,
"queue": "default_queue",
"queued_when": None,
"scheduled_when": None,
"start_date": "2020-01-02T00:00:00Z",
"state": "success" if try_number == 1 else None,
"task_id": "print_the_context",
"task_display_name": "print_the_context",
"try_number": try_number,
"unixname": getuser(),
"dag_run_id": "TEST_DAG_RUN_ID",
"dag_version": mock.ANY,
"hitl_detail": None,
}
@pytest.mark.parametrize("try_number", [1, 2])
def test_should_respond_200_with_mapped_task_at_different_try_numbers(
self, test_client, try_number, session
):
tis = self.create_task_instances(session, task_instances=[{"state": State.FAILED}])
old_ti = tis[0]
for idx in (1, 2):
ti = TaskInstance(
task=old_ti.task, run_id=old_ti.run_id, map_index=idx, dag_version_id=old_ti.dag_version_id
)
ti.rendered_task_instance_fields = RTIF(ti, render_templates=False)
ti.try_number = 1
for attr in ["duration", "end_date", "pid", "start_date", "state", "queue", "note"]:
setattr(ti, attr, getattr(old_ti, attr))
session.add(ti)
session.commit()
tis = session.query(TaskInstance).all()
# Record the task instance history
from airflow.models.taskinstance import clear_task_instances
clear_task_instances(tis, session)
# Simulate the try_number increasing to new values in TI
for ti in tis:
if ti.map_index > 0:
ti.try_number += 1
ti.queue = "default_queue"
session.merge(ti)
session.commit()
tis = session.query(TaskInstance).all()
# in each loop, we should get the right mapped TI back
for map_index in (1, 2):
# Get the info from TIHistory: try_number 1, try_number 2 is TI table(latest)
# TODO: Add "REMOTE_USER": "test" as per legacy code after adding Authentication
response = test_client.get(
"/dags/example_python_operator/dagRuns/TEST_DAG_RUN_ID/taskInstances"
f"/print_the_context/{map_index}/tries/{try_number}",
)
assert response.status_code == 200
assert response.json() == {
"dag_id": "example_python_operator",
"dag_display_name": "example_python_operator",
"duration": 10000.0,
"end_date": "2020-01-03T00:00:00Z",
"executor": None,
"executor_config": "{}",
"hostname": "",
"map_index": map_index,
"max_tries": 0 if try_number == 1 else 1,
"operator": "PythonOperator",
"operator_name": "PythonOperator",
"pid": 100,
"pool": "default_pool",
"pool_slots": 1,
"priority_weight": 9,
"queue": "default_queue",
"queued_when": None,
"scheduled_when": None,
"start_date": "2020-01-02T00:00:00Z",
"state": "failed" if try_number == 1 else None,
"task_id": "print_the_context",
"task_display_name": "print_the_context",
"try_number": try_number,
"unixname": getuser(),
"dag_run_id": "TEST_DAG_RUN_ID",
"dag_version": mock.ANY,
"hitl_detail": None,
}
def test_should_respond_200_with_task_state_in_deferred(self, test_client, session):
now = pendulum.now("UTC")
ti = self.create_task_instances(
session,
task_instances=[{"state": State.DEFERRED}],
update_extras=True,
)[0]
ti.trigger = Trigger("none", {})
ti.trigger.created_date = now
ti.triggerer_job = Job()
TriggererJobRunner(job=ti.triggerer_job)
ti.triggerer_job.state = "running"
ti.try_number = 1
session.merge(ti)
session.flush()
# Record the TaskInstanceHistory
TaskInstanceHistory.record_ti(ti, session=session)
session.flush()
# Change TaskInstance try_number to 2, ensuring api checks TIHistory
ti = session.query(TaskInstance).one_or_none()
ti.try_number = 2
session.merge(ti)
# Set duration and end_date in TaskInstanceHistory for easy testing
tih = session.query(TaskInstanceHistory).all()[0]
tih.duration = 10000
tih.end_date = self.default_time + dt.timedelta(days=2)
session.merge(tih)
session.commit()
# Get the task instance details from TIHistory:
response = test_client.get(
"/dags/example_python_operator/dagRuns/TEST_DAG_RUN_ID/taskInstances/print_the_context/tries/1",
)
assert response.status_code == 200
data = response.json()
assert data == {
"dag_id": "example_python_operator",
"dag_display_name": "example_python_operator",
"duration": 10000.0,
"end_date": "2020-01-03T00:00:00Z",
"executor": None,
"executor_config": "{}",
"hostname": "",
"map_index": -1,
"max_tries": 0,
"operator": "PythonOperator",
"operator_name": "PythonOperator",
"pid": 100,
"pool": "default_pool",
"pool_slots": 1,
"priority_weight": 9,
"queue": "default_queue",
"queued_when": None,
"scheduled_when": None,
"start_date": "2020-01-02T00:00:00Z",
"state": "failed",
"task_id": "print_the_context",
"task_display_name": "print_the_context",
"try_number": 1,
"unixname": getuser(),
"dag_run_id": "TEST_DAG_RUN_ID",
"dag_version": mock.ANY,
"hitl_detail": None,
}
def test_should_respond_200_with_task_state_in_removed(self, test_client, session):
self.create_task_instances(
session, task_instances=[{"state": State.REMOVED}], update_extras=True, with_ti_history=True
)
response = test_client.get(
"/dags/example_python_operator/dagRuns/TEST_DAG_RUN_ID/taskInstances/print_the_context/tries/1",
)
assert response.status_code == 200
assert response.json() == {
"dag_id": "example_python_operator",
"dag_display_name": "example_python_operator",
"duration": 10000.0,
"end_date": "2020-01-03T00:00:00Z",
"executor": None,
"executor_config": "{}",
"hostname": "",
"map_index": -1,
"max_tries": 0,
"operator": "PythonOperator",
"operator_name": "PythonOperator",
"pid": 100,
"pool": "default_pool",
"pool_slots": 1,
"priority_weight": 9,
"queue": "default_queue",
"queued_when": None,
"scheduled_when": None,
"start_date": "2020-01-02T00:00:00Z",
"state": "removed",
"task_id": "print_the_context",
"task_display_name": "print_the_context",
"try_number": 1,
"unixname": getuser(),
"dag_run_id": "TEST_DAG_RUN_ID",
"dag_version": mock.ANY,
"hitl_detail": None,
}
def test_should_respond_200_with_hitl(
self, test_client, create_task_instance: CreateTaskInstance, session
):
ti = create_task_instance(dag_id="test_hitl_dag", task_id="sample_task_hitl")
ti.try_number = 1
session.add(ti)
hitl_detail = HITLDetail(
ti_id=ti.id,
options=["Approve", "Reject"],
subject="This is subject",
body="this is body",
defaults=["Approve"],
multiple=False,
params={"input_1": 1},
assignees=None,
)
session.add(hitl_detail)
session.commit()
# Record the TaskInstanceHistory
TaskInstanceHistory.record_ti(ti, session=session)
session.flush()
response = test_client.get(
f"/dags/{ti.dag_id}/dagRuns/{ti.run_id}/taskInstances/{ti.task_id}/tries/1",
)
assert response.status_code == 200
assert response.json() == {
"dag_id": "test_hitl_dag",
"dag_display_name": "test_hitl_dag",
"duration": None,
"end_date": mock.ANY,
"executor": None,
"executor_config": "{}",
"hostname": "",
"map_index": -1,
"max_tries": 0,
"operator": "EmptyOperator",
"operator_name": "EmptyOperator",
"pid": None,
"pool": "default_pool",
"pool_slots": 1,
"priority_weight": 1,
"queue": "default",
"queued_when": None,
"scheduled_when": None,
"start_date": None,
"state": None,
"task_id": "sample_task_hitl",
"task_display_name": "sample_task_hitl",
"try_number": 1,
"unixname": getuser(),
"dag_run_id": "test",
"dag_version": mock.ANY,
"hitl_detail": {
"assigned_users": [],
"body": "this is body",
"chosen_options": None,
"created_at": mock.ANY,
"defaults": ["Approve"],
"multiple": False,
"options": ["Approve", "Reject"],
"params": {"input_1": {"value": 1, "description": None, "schema": {}}},
"params_input": {},
"responded_at": None,
"responded_by_user": None,
"response_received": False,
"subject": "This is subject",
},
}
def test_should_respond_401(self, unauthenticated_test_client):
response = unauthenticated_test_client.get(
"/dags/example_python_operator/dagRuns/TEST_DAG_RUN_ID/taskInstances/print_the_context/tries/1",
)
assert response.status_code == 401
def test_should_respond_403(self, unauthorized_test_client):
response = unauthorized_test_client.get(
"/dags/example_python_operator/dagRuns/TEST_DAG_RUN_ID/taskInstances/print_the_context/tries/1",
)
assert response.status_code == 403
def test_raises_404_for_nonexistent_task_instance(self, test_client, session):
self.create_task_instances(session)
response = test_client.get(
"/dags/example_python_operator/dagRuns/TEST_DAG_RUN_ID/taskInstances/nonexistent_task/tries/0"
)
assert response.status_code == 404
assert response.json() == {
"detail": "The Task Instance with dag_id: `example_python_operator`, run_id: `TEST_DAG_RUN_ID`, task_id: `nonexistent_task`, try_number: `0` and map_index: `-1` was not found"
}
@pytest.mark.parametrize(
("run_id", "expected_version_number"),
[
("run1", 1),
("run2", 2),
("run3", 3),
],
)
@pytest.mark.usefixtures("make_dag_with_multiple_versions")
@mock.patch("airflow.api_fastapi.core_api.datamodels.dag_versions.hasattr")
def test_should_respond_200_with_versions(
self, mock_hasattr, test_client, run_id, expected_version_number, session
):
mock_hasattr.return_value = False
response = test_client.get(
f"/dags/dag_with_multiple_versions/dagRuns/{run_id}/taskInstances/task1/tries/0"
)
assert response.status_code == 200
assert response.json() == {
"task_id": "task1",
"dag_id": "dag_with_multiple_versions",
"dag_display_name": "dag_with_multiple_versions",
"dag_run_id": run_id,
"map_index": -1,
"start_date": None,
"end_date": mock.ANY,
"duration": None,
"state": None,
"try_number": 0,
"max_tries": 0,
"task_display_name": "task1",
"hostname": "",
"unixname": getuser(),
"pool": "default_pool",
"pool_slots": 1,
"queue": "default",
"priority_weight": 1,
"operator": "EmptyOperator",
"operator_name": "EmptyOperator",
"queued_when": None,
"scheduled_when": None,
"pid": None,
"executor": None,
"executor_config": "{}",
"dag_version": {
"id": mock.ANY,
"version_number": expected_version_number,
"dag_id": "dag_with_multiple_versions",
"bundle_name": "dag_maker",
"bundle_version": f"some_commit_hash{expected_version_number}",
"bundle_url": f"http://test_host.github.com/tree/some_commit_hash{expected_version_number}/dags",
"created_at": mock.ANY,
"dag_display_name": "dag_with_multiple_versions",
},
"hitl_detail": None,
}
@pytest.mark.parametrize(
("run_id", "expected_version_number"),
[
("run1", 1),
("run2", 2),
("run3", 3),
],
)
@pytest.mark.usefixtures("make_dag_with_multiple_versions")
def test_should_respond_200_with_versions_using_url_template(
self, test_client, run_id, expected_version_number, session
):
response = test_client.get(
f"/dags/dag_with_multiple_versions/dagRuns/{run_id}/taskInstances/task1/tries/0"
)
assert response.status_code == 200
assert response.json() == {
"task_id": "task1",
"dag_id": "dag_with_multiple_versions",
"dag_display_name": "dag_with_multiple_versions",
"dag_run_id": run_id,
"map_index": -1,
"start_date": None,
"end_date": mock.ANY,
"duration": None,
"state": None,
"try_number": 0,
"max_tries": 0,
"task_display_name": "task1",
"hostname": "",
"unixname": getuser(),
"pool": "default_pool",
"pool_slots": 1,
"queue": "default",
"priority_weight": 1,
"operator": "EmptyOperator",
"operator_name": "EmptyOperator",
"queued_when": None,
"scheduled_when": None,
"pid": None,
"executor": None,
"executor_config": "{}",
"dag_version": {
"id": mock.ANY,
"version_number": expected_version_number,
"dag_id": "dag_with_multiple_versions",
"bundle_name": "dag_maker",
"bundle_version": f"some_commit_hash{expected_version_number}",
"bundle_url": f"http://test_host.github.com/tree/some_commit_hash{expected_version_number}/dags",
"created_at": mock.ANY,
"dag_display_name": "dag_with_multiple_versions",
},
"hitl_detail": None,
}
def test_should_not_return_duplicate_runs(self, test_client, session):
"""
Test that ensures the task instances query doesn't return duplicates due to the updated join/filter logic.
"""
self.create_task_instances(session, task_instances=[{"state": State.SUCCESS}], with_ti_history=True)
self.create_task_instances(
session,
dag_id="example_bash_operator",
task_instances=[{"state": State.SUCCESS}],
with_ti_history=True,
)
response = test_client.get(
"/dags/example_python_operator/dagRuns/TEST_DAG_RUN_ID/taskInstances/print_the_context/tries"
)
assert response.status_code == 200
response = response.json()
assert response["total_entries"] == 2
| TestGetTaskInstanceTry |
python | tiangolo__fastapi | docs_src/dependencies/tutorial008b.py | {
"start": 238,
"end": 735
} | class ____(Exception):
pass
def get_username():
try:
yield "Rick"
except OwnerError as e:
raise HTTPException(status_code=400, detail=f"Owner error: {e}")
@app.get("/items/{item_id}")
def get_item(item_id: str, username: str = Depends(get_username)):
if item_id not in data:
raise HTTPException(status_code=404, detail="Item not found")
item = data[item_id]
if item["owner"] != username:
raise OwnerError(username)
return item
| OwnerError |
python | django__django | tests/model_fields/test_mixins.py | {
"start": 177,
"end": 283
} | class ____(FieldCacheMixin):
@cached_property
def cache_name(self):
return "example"
| Example |
python | jazzband__django-formtools | tests/wizard/namedwizardtests/tests.py | {
"start": 15609,
"end": 15886
} | class ____(NamedUrlCookieWizardView):
def dispatch(self, request, *args, **kwargs):
response = super().dispatch(request, *args, **kwargs)
return response, self
@override_settings(ROOT_URLCONF='tests.wizard.namedwizardtests.urls')
| TestNamedUrlCookieWizardView |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/runs.py | {
"start": 5712,
"end": 7110
} | class ____(GenericScalar, graphene.Scalar):
class Meta:
description = """This type is used when passing in a configuration object
for pipeline configuration. Can either be passed in as a string (the
YAML configuration object) or as the configuration object itself. In
either case, the object must conform to the constraints of the dagster config type system.
"""
name = "RunConfigData"
def parse_run_config_input(
run_config: Union[str, Mapping[str, object]], raise_on_error: bool
) -> Union[str, Mapping[str, object]]:
if run_config and isinstance(run_config, str):
try:
return load_run_config_yaml(run_config)
except yaml.YAMLError:
if raise_on_error:
raise UserFacingGraphQLError(
GraphenePythonError(serializable_error_info_from_exc_info(sys.exc_info()))
)
# Pass the config through as a string so that it will return a useful validation error
return run_config
return run_config
types = [
GrapheneLaunchRunResult,
GrapheneLaunchMultipleRunsResult,
GrapheneLaunchRunReexecutionResult,
GrapheneLaunchPipelineRunSuccess,
GrapheneLaunchRunSuccess,
GrapheneRunsOrError,
GrapheneRunConfigData,
GrapheneRunGroup,
GrapheneRunGroupOrError,
GrapheneRunGroups,
]
| GrapheneRunConfigData |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_cmath.py | {
"start": 668,
"end": 2691
} | class ____(importlib.abc.MetaPathFinder):
def find_spec(self, fullname, path, target=None):
# Check if the import is the problematic one
if fullname in redirect_imports:
try:
# Attempt to import the standalone module
name = fullname.removeprefix("test.")
r = importlib.import_module(name)
# Redirect the module in sys.modules
sys.modules[fullname] = r
# Return a module spec from the found module
return importlib.util.find_spec(name)
except ImportError:
return None
return None
# Add the custom finder to sys.meta_path
sys.meta_path.insert(0, RedirectImportFinder())
# ======= END DYNAMO PATCH =======
from test.support import requires_IEEE_754, cpython_only, import_helper
from test.test_math import parse_testfile, test_file
import test.test_math as test_math
import unittest
import cmath, math
from cmath import phase, polar, rect, pi
import platform
import sys
INF = float('inf')
NAN = float('nan')
complex_zeros = [complex(x, y) for x in [0.0, -0.0] for y in [0.0, -0.0]]
complex_infinities = [complex(x, y) for x, y in [
(INF, 0.0), # 1st quadrant
(INF, 2.3),
(INF, INF),
(2.3, INF),
(0.0, INF),
(-0.0, INF), # 2nd quadrant
(-2.3, INF),
(-INF, INF),
(-INF, 2.3),
(-INF, 0.0),
(-INF, -0.0), # 3rd quadrant
(-INF, -2.3),
(-INF, -INF),
(-2.3, -INF),
(-0.0, -INF),
(0.0, -INF), # 4th quadrant
(2.3, -INF),
(INF, -INF),
(INF, -2.3),
(INF, -0.0)
]]
complex_nans = [complex(x, y) for x, y in [
(NAN, -INF),
(NAN, -2.3),
(NAN, -0.0),
(NAN, 0.0),
(NAN, 2.3),
(NAN, INF),
(-INF, NAN),
(-2.3, NAN),
(-0.0, NAN),
(0.0, NAN),
(2.3, NAN),
(INF, NAN)
]]
| RedirectImportFinder |
python | huggingface__transformers | src/transformers/models/dinov3_vit/modular_dinov3_vit.py | {
"start": 5907,
"end": 9528
} | class ____(nn.Module):
inv_freq: torch.Tensor
def __init__(self, config: DINOv3ViTConfig):
super().__init__()
self.config = config
self.base = config.rope_theta
self.head_dim = config.hidden_size // config.num_attention_heads
self.num_patches_h = config.image_size // config.patch_size
self.num_patches_w = config.image_size // config.patch_size
inv_freq = 1 / self.base ** torch.arange(0, 1, 4 / self.head_dim, dtype=torch.float32) # (head_dim / 4,)
self.register_buffer("inv_freq", inv_freq, persistent=False)
def forward(self, pixel_values: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
_, _, height, width = pixel_values.shape
num_patches_h = height // self.config.patch_size
num_patches_w = width // self.config.patch_size
device = pixel_values.device
device_type = device.type if isinstance(device.type, str) and device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
# Although we could precompute static patch_coords from image_size and patch_size in the config,
# the model was trained with random_scale, so it can process images of varying sizes.
# Therefore, it's better to compute patch_coords dynamically (with lru_cache).
patch_coords = get_patches_center_coordinates(
num_patches_h, num_patches_w, dtype=torch.float32, device=device
)
if self.training:
patch_coords = augment_patches_center_coordinates(
patch_coords,
shift=self.config.pos_embed_shift,
jitter=self.config.pos_embed_jitter,
rescale=self.config.pos_embed_rescale,
)
# (height * width, 2, head_dim / 4) -> (height * width, head_dim / 2) -> (height * width, head_dim)
angles = 2 * math.pi * patch_coords[:, :, None] * self.inv_freq[None, None, :]
angles = angles.flatten(1, 2)
angles = angles.tile(2)
cos = torch.cos(angles)
sin = torch.sin(angles)
dtype = pixel_values.dtype
return cos.to(dtype=dtype), sin.to(dtype=dtype)
def apply_rotary_pos_emb(
q: torch.Tensor, k: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor, **kwargs
) -> tuple[torch.Tensor, torch.Tensor]:
"""Applies Rotary Position Embedding to the query and key tensors, but only to the patch tokens,
ignoring the prefix tokens (cls token and register tokens).
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
num_tokens = q.shape[-2]
num_patches = sin.shape[-2]
num_prefix_tokens = num_tokens - num_patches # cls token + register tokens
q_prefix_tokens, q_patches = q.split((num_prefix_tokens, num_patches), dim=-2)
k_prefix_tokens, k_patches = k.split((num_prefix_tokens, num_patches), dim=-2)
# apply rope only to patch tokens
q_patches = (q_patches * cos) + (rotate_half(q_patches) * sin)
k_patches = (k_patches * cos) + (rotate_half(k_patches) * sin)
q = torch.cat((q_prefix_tokens, q_patches), dim=-2)
k = torch.cat((k_prefix_tokens, k_patches), dim=-2)
return q, k
| DINOv3ViTRopePositionEmbedding |
python | great-expectations__great_expectations | tests/expectations/test_dataclass_serializable_dot_dict_pattern.py | {
"start": 809,
"end": 12436
} | class ____(SerializableDictDot):
alpha_var: int
beta_var: MyEnum
A_list: List[MyClassA]
B_list: List[MyClassB]
enum_list: List[MyEnum] = field(default_factory=list)
some_tuple: Optional[Tuple[MyClassA, MyClassB]] = None
@property
def num_As(self):
return len(self.A_list)
@property
def num_Bs(self):
return len(self.B_list)
@pytest.mark.unit
def test_access_using_dict_notation():
"Keys can be accessed using dict notation"
my_A = MyClassA(
**{
"foo": "a string",
"bar": 1,
}
)
assert my_A["foo"] == "a string"
assert my_A["bar"] == 1
@pytest.mark.unit
def test_has_keys():
"the .keys method works"
my_A = MyClassA(
**{
"foo": "a string",
"bar": 1,
}
)
assert set(my_A.keys()) == {"foo", "bar"}
@pytest.mark.unit
def test_has_items():
"the .items method works"
my_A = MyClassA(
**{
"foo": "a string",
"bar": 1,
}
)
items = list(my_A.items())
assert items == [("foo", "a string"), ("bar", 1)]
@pytest.mark.unit
def test_has_to_raw_dict():
"the .to_raw_dict method works"
my_A = MyClassA(
**{
"foo": "a string",
"bar": 1,
}
)
assert my_A.to_raw_dict() == {
"foo": "a string",
"bar": 1,
}
@pytest.mark.skip(reason="Not sure what our preferred pattern for this is")
@pytest.mark.unit
def test_incorrect_type():
"Throws an error if instantiated with an incorrect type"
MyClassA(**{"foo": "a string", "bar": "SHOULD BE AN INT", "baz": ["a", "b", "c"]})
@pytest.mark.unit
def test_renders_to_a_useful_str():
"Renders to a string that exposes the internals of the object"
assert (
MyClassA(
foo="a string",
bar=1,
).__str__()
== """MyClassA(foo='a string', bar=1)"""
)
@pytest.mark.skip(reason="hmmm. We should be able to make this work by default")
@pytest.mark.unit
def test_is_json_serializable():
assert MyClassA(
foo="a string",
bar=1,
).to_json_dict() == {"foo": "a string", "bar": 1}
@pytest.mark.unit
def test_missing_keys():
"Throws a TypeError if instantiated with missing arguments"
with raises(TypeError):
MyClassA()
with raises(TypeError):
MyClassA(
foo="a string",
)
with raises(TypeError):
MyClassA(bar=1)
@pytest.mark.unit
def test_extra_keys():
"Throws a TypeError if instantiated with extra arguments"
with raises(TypeError):
MyClassA(**{"foo": "a string", "bar": 1, "baz": ["a", "b", "c"]})
@pytest.mark.unit
def test_update_after_instantiation():
"Can be updated after instantiation, using both dot and dict notation"
my_A = MyClassA(
**{
"foo": "a string",
"bar": 1,
}
)
assert my_A["foo"] == "a string"
assert my_A["bar"] == 1
my_A.foo = "different string"
assert my_A["foo"] == "different string"
assert my_A.foo == "different string"
my_A["foo"] = "a third string"
assert my_A["foo"] == "a third string"
assert my_A.foo == "a third string"
@pytest.mark.unit
def test_can_be_subclassed():
my_B = MyClassB(
foo="a string",
bar=1,
baz=["a", "b", "c"],
qux=-100,
quux=43,
)
assert my_B["foo"] == "a string"
assert my_B["bar"] == 1
assert my_B["baz"] == ["a", "b", "c"]
assert my_B["qux"] == -100
assert my_B["quux"] == 43
assert my_B.foo == "a string"
assert my_B.bar == 1
assert my_B.baz == ["a", "b", "c"]
assert my_B.qux == -100
assert my_B.quux == 43
my_B = MyClassB(
**{
"foo": "a string",
"bar": 1,
"baz": ["a", "b", "c"],
"qux": -100,
"quux": 43,
}
)
assert my_B["foo"] == "a string"
assert my_B["bar"] == 1
assert my_B["baz"] == ["a", "b", "c"]
assert my_B["qux"] == -100
assert my_B["quux"] == 43
assert my_B.foo == "a string"
assert my_B.bar == 1
assert my_B.baz == ["a", "b", "c"]
assert my_B.qux == -100
assert my_B.quux == 43
@pytest.mark.unit
def test_can_have_derived_properties():
"Can have derived properties"
my_B = MyClassB(
**{
"foo": "a string",
"bar": 1,
"baz": ["a", "b", "c"],
"qux": -100,
}
)
assert my_B.num_bazzes == 3
@pytest.mark.unit
def test_can_have_optional_arguments():
"Can have optional arguments"
my_B = MyClassB(
**{
"foo": "a string",
"bar": 1,
"baz": ["a", "b", "c"],
# "qux": -100, #Optional property
}
)
assert my_B.qux is None
@pytest.mark.unit
def test_can_have_default_values():
"Can have default values"
my_B = MyClassB(
**{
"foo": "a string",
"bar": 1,
"baz": ["a", "b", "c"],
"qux": -100, # Optional property
}
)
assert my_B.quux == 42
@pytest.mark.unit
def test_can_use_positional_arguments():
"Can use a normal mix of positional and keyword arguments"
my_B = MyClassB(
"a string",
1,
baz=["a", "b", "c"],
qux=-100,
)
assert my_B.foo == "a string"
assert my_B.bar == 1
assert my_B.baz == ["a", "b", "c"]
assert my_B.qux == -100
my_B = MyClassB(
qux=-100,
baz=["a", "b", "c"],
bar=1,
foo="a string",
)
assert my_B.foo == "a string"
assert my_B.bar == 1
assert my_B.baz == ["a", "b", "c"]
assert my_B.qux == -100
@pytest.mark.unit
def test_can_be_nested():
"Objects of this type can be nested"
my_C = MyClassC(
alpha_var=20,
beta_var=MyEnum("x"),
A_list=[
MyClassA(
foo="A-1",
bar=101,
),
MyClassA(
**{
"foo": "A-2",
"bar": 102,
}
),
],
B_list=[
MyClassB(
**{
"foo": "B-1",
"bar": 201,
"baz": ["a", "b", "c"],
"qux": -100,
"quux": 43,
}
)
],
)
# Demonstrate that we can access sub-objects using a mix of dict and dot notation:
assert my_C.A_list[0].foo == "A-1"
assert my_C["A_list"][1].bar == 102
assert my_C["B_list"][0]["quux"] == 43
# Note: we don't currently support dot notation access within lists: `assert my_C["A_list"].1.bar == 102` # noqa: E501 # FIXME CoP
# Demonstrate that we can access Enum sub-objects
assert my_C["beta_var"] == MyEnum("x")
@pytest.mark.unit
def test_to_raw_dict_works_recursively():
"the .to_raw_dict method works recursively on both DotDicts and Enums"
my_C = MyClassC(
alpha_var=20,
beta_var=MyEnum("x"),
A_list=[
MyClassA(
foo="A-1",
bar=101,
),
MyClassA(
**{
"foo": "A-2",
"bar": 102,
}
),
],
B_list=[
MyClassB(
**{
"foo": "B-1",
"bar": 201,
"baz": ["a", "b", "c"],
"qux": -100,
"quux": 43,
}
)
],
enum_list=[
MyEnum("x"),
MyEnum("y"),
MyEnum("z"),
],
some_tuple=(
MyClassA(
foo="A-1",
bar=101,
),
MyClassB(
foo="B-1",
bar=201,
baz=["a", "b", "c"],
qux=-100,
quux=43,
),
),
)
C_dict = my_C.to_raw_dict()
# Make sure it's a dictionary, not a DictDot
assert type(C_dict) == dict # noqa: E721 # FIXME CoP
assert isinstance(C_dict, DictDot) is False
# Dictionaries don't support dot notation.
with raises(AttributeError):
C_dict.A_list # noqa: B018 # FIXME CoP
assert type(C_dict["A_list"][0]) == dict # noqa: E721 # FIXME CoP
assert type(C_dict["B_list"][0]) == dict # noqa: E721 # FIXME CoP
assert C_dict == {
"alpha_var": 20,
"beta_var": "x",
"A_list": [
{
"foo": "A-1",
"bar": 101,
},
{
"foo": "A-2",
"bar": 102,
},
],
"B_list": [
{
"foo": "B-1",
"bar": 201,
"baz": ["a", "b", "c"],
"qux": -100,
"quux": 43,
}
],
"enum_list": [
"x",
"y",
"z",
],
"some_tuple": [
{
"foo": "A-1",
"bar": 101,
},
{
"foo": "B-1",
"bar": 201,
"baz": ["a", "b", "c"],
"qux": -100,
"quux": 43,
},
],
}
@pytest.mark.unit
def test_instantiation_with_a_from_legacy_dict_method():
"""Can be instantiated from a legacy dictionary.
Note: This pattern is helpful for cases where we're migrating from dictionary-based objects to typed objects.
One especially thorny example is when the dictionary contains keys that are reserved words in python.
For example, test cases use the reserved word: "in" as one of their required fields.
""" # noqa: E501 # FIXME CoP
import inspect
import logging
@dataclass
class MyClassE(SerializableDictDot):
foo: str
bar: int
input: int
@classmethod
def from_legacy_dict(cls, dict):
"""This method is an adapter to allow typing of legacy my_class_e dictionary objects, without needing to immediately clean up every object.""" # noqa: E501 # FIXME CoP
temp_dict = {}
for k, v in dict.items():
# Ignore parameters that don't match the type definition
if k in inspect.signature(cls).parameters:
temp_dict[k] = v
else: # noqa: PLR5501 # FIXME CoP
if k == "in":
temp_dict["input"] = v
else:
logging.warning(
f"WARNING: Got extra parameter: {k} while instantiating MyClassE."
"This parameter will be ignored."
"You probably need to clean up a library_metadata object."
)
return cls(**temp_dict)
my_E = MyClassE.from_legacy_dict(
{
"foo": "a string",
"bar": 1,
"in": 10,
}
)
assert my_E["foo"] == "a string"
assert my_E["bar"] == 1
assert my_E["input"] == 10
assert my_E.input == 10
# Note that after instantiation, the class does NOT have an "in" property
with raises(AttributeError):
my_E["in"] == 10 # noqa: B015 # FIXME CoP
# Because `in` is a reserved word, this will raise a SyntaxError:
# my_F.in == 100
# Because `in` is a reserved word, this will also raise a SyntaxError:
# my_F.in = 100
| MyClassC |
python | doocs__leetcode | solution/1000-1099/1054.Distant Barcodes/Solution.py | {
"start": 0,
"end": 333
} | class ____:
def rearrangeBarcodes(self, barcodes: List[int]) -> List[int]:
cnt = Counter(barcodes)
barcodes.sort(key=lambda x: (-cnt[x], x))
n = len(barcodes)
ans = [0] * len(barcodes)
ans[::2] = barcodes[: (n + 1) // 2]
ans[1::2] = barcodes[(n + 1) // 2 :]
return ans
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-okta/unit_tests/test_streams.py | {
"start": 4402,
"end": 6080
} | class ____:
def test_next_page_token(self, oauth_config, users_instance, url_base, api_url, start_date):
stream = get_stream_by_name("users", config=oauth_config)
response = MagicMock(requests.Response)
response.links = {"next": {"url": f"{api_url}?param1=test_value1¶m2=test_value2"}}
response.headers = {}
inputs = {"response": response}
expected_token = {"next_page_token": "https://test_domain.okta.com?param1=test_value1¶m2=test_value2"}
result = stream.retriever._next_page_token(**inputs)
assert result == expected_token
def test_next_page_token_empty_params(self, oauth_config, users_instance, url_base, api_url, start_date):
stream = get_stream_by_name("users", config=oauth_config)
response = MagicMock(requests.Response)
response.links = {"next": {"url": f"{api_url}"}}
response.headers = {}
inputs = {"response": response}
expected_token = {"next_page_token": "https://test_domain.okta.com"}
result = stream.retriever._next_page_token(**inputs)
assert result == expected_token
def test_next_page_token_link_have_self_and_equal_next(self, oauth_config, users_instance, url_base, api_url, start_date):
stream = get_stream_by_name("users", config=oauth_config)
response = MagicMock(requests.Response)
response.links = {"next": {"url": f"{api_url}"}, "self": {"url": f"{api_url}"}}
response.headers = {}
inputs = {"response": response}
expected_token = None
result = stream.retriever._next_page_token(**inputs)
assert result == expected_token
| TestNextPageToken |
python | doocs__leetcode | solution/2600-2699/2685.Count the Number of Complete Components/Solution.py | {
"start": 0,
"end": 658
} | class ____:
def countCompleteComponents(self, n: int, edges: List[List[int]]) -> int:
def dfs(i: int) -> (int, int):
vis[i] = True
x, y = 1, len(g[i])
for j in g[i]:
if not vis[j]:
a, b = dfs(j)
x += a
y += b
return x, y
g = defaultdict(list)
for a, b in edges:
g[a].append(b)
g[b].append(a)
vis = [False] * n
ans = 0
for i in range(n):
if not vis[i]:
a, b = dfs(i)
ans += a * (a - 1) == b
return ans
| Solution |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/bigquery.py | {
"start": 87569,
"end": 93206
} | class ____(GoogleCloudBaseOperator):
"""
Update BigQuery Table Schema.
Updates fields on a table schema based on contents of the supplied schema_fields_updates
parameter. The supplied schema does not need to be complete, if the field
already exists in the schema you only need to supply keys & values for the
items you want to patch, just ensure the "name" key is set.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryUpdateTableSchemaOperator`
:param schema_fields_updates: a partial schema resource. see
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#TableSchema
.. code-block:: python
schema_fields_updates = [
{"name": "emp_name", "description": "Some New Description"},
{
"name": "salary",
"policyTags": {"names": ["some_new_policy_tag"]},
},
{
"name": "departments",
"fields": [
{"name": "name", "description": "Some New Description"},
{"name": "type", "description": "Some New Description"},
],
},
]
:param include_policy_tags: (Optional) If set to True policy tags will be included in
the update request which requires special permissions even if unchanged (default False)
see https://cloud.google.com/bigquery/docs/column-level-security#roles
:param dataset_id: A dotted
``(<project>.|<project>:)<dataset>`` that indicates which dataset
will be updated. (templated)
:param table_id: The table ID of the requested table. (templated)
:param project_id: The name of the project where we want to update the dataset.
Don't need to provide, if projectId in dataset_reference.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param location: The location used for the operation.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"schema_fields_updates",
"dataset_id",
"table_id",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
template_fields_renderers = {"schema_fields_updates": "json"}
ui_color = BigQueryUIColors.TABLE.value
operator_extra_links = (BigQueryTableLink(),)
def __init__(
self,
*,
schema_fields_updates: list[dict[str, Any]],
dataset_id: str,
table_id: str,
include_policy_tags: bool = False,
project_id: str = PROVIDE_PROJECT_ID,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
location: str | None = None,
**kwargs,
) -> None:
self.schema_fields_updates = schema_fields_updates
self.include_policy_tags = include_policy_tags
self.table_id = table_id
self.dataset_id = dataset_id
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.location = location
self._table: dict | None = None
super().__init__(**kwargs)
def execute(self, context: Context):
bq_hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain, location=self.location
)
# Save table as attribute for further use by OpenLineage
self._table = bq_hook.update_table_schema(
schema_fields_updates=self.schema_fields_updates,
include_policy_tags=self.include_policy_tags,
dataset_id=self.dataset_id,
table_id=self.table_id,
project_id=self.project_id,
)
if self._table:
BigQueryTableLink.persist(
context=context,
dataset_id=self._table["tableReference"]["datasetId"],
project_id=self._table["tableReference"]["projectId"],
table_id=self._table["tableReference"]["tableId"],
)
return self._table
def get_openlineage_facets_on_complete(self, _):
"""Implement _on_complete as we will use table resource returned by update method."""
from airflow.providers.common.compat.openlineage.facet import Dataset
from airflow.providers.google.cloud.openlineage.utils import (
BIGQUERY_NAMESPACE,
get_facets_from_bq_table,
)
from airflow.providers.openlineage.extractors import OperatorLineage
table = Table.from_api_repr(self._table)
output_dataset = Dataset(
namespace=BIGQUERY_NAMESPACE,
name=f"{table.project}.{table.dataset_id}.{table.table_id}",
facets=get_facets_from_bq_table(table),
)
return OperatorLineage(outputs=[output_dataset])
| BigQueryUpdateTableSchemaOperator |
python | ApeWorX__ape | src/ape/types/coverage.py | {
"start": 3399,
"end": 6912
} | class ____(BaseModel):
"""
The individual coverage of a function defined in a smart contact.
"""
name: str
"""
The display name of the function.
"""
full_name: str
"""
The unique name of the function.
"""
statements: list[CoverageStatement] = []
"""
For statement coverage, these are the individual items.
See :class:`~ape.types.coverage.CoverageStatement` for more details.
"""
hit_count: NonNegativeInt = 0
"""
The times this function was called.
**NOTE**: This is needed as a separate data point since not all methods may have
statements (such as auto-getters).
"""
@property
def lines_covered(self) -> NonNegativeInt:
"""
The number of lines with a hit counter greater than zero in this method.
"""
return len([x for x in self.statements if x.hit_count > 0])
@property
def lines_valid(self) -> NonNegativeInt:
"""
All lines valid for coverage in this method.
"""
return len(self.statements)
@property
def miss_count(self) -> NonNegativeInt:
"""
The number of lines missed.
"""
return self.lines_valid - self.lines_covered
@property
def line_rate(self) -> float:
"""
The number of lines hit divided by number of lines.
"""
if not self.statements:
# If there are no statements, rely on fn hit count only.
return 1.0 if self.hit_count > 0 else 0.0
# This function has hittable statements.
return self.lines_covered / self.lines_valid if self.lines_valid > 0 else 0
def model_dump(self, *args, **kwargs) -> dict:
attribs = super().model_dump(*args, **kwargs)
# Add coverage stats.
attribs["lines_covered"] = self.lines_covered
attribs["lines_valid"] = self.lines_valid
attribs["line_rate"] = self.line_rate
return attribs
def profile_statement(
self, pc: int, location: Optional[SourceLocation] = None, tag: Optional[str] = None
):
"""
Initialize a statement in the coverage profile with a hit count starting at zero.
This statement is ready to accumulate hits as tests execute.
Args:
pc (int): The program counter of the statement.
location (Optional[ethpm_types.source.SourceStatement]): The location of the statement,
if it exists.
tag (Optional[str]): Optionally provide more information about the statements being hit.
This is useful for builtin statements that may be missing context otherwise.
"""
for statement in self.statements:
if not location or (
location and statement.location and statement.location[0] != location[0]
):
# Starts on a different line.
continue
# Already tracking this location.
statement.pcs.add(pc)
if not statement.tag:
statement.tag = tag
return
if location:
# Adding a source-statement for the first time.
coverage_statement = CoverageStatement(location=location, pcs={pc}, tag=tag)
else:
# Adding a virtual statement.
coverage_statement = CoverageStatement(pcs={pc}, tag=tag)
if coverage_statement is not None:
self.statements.append(coverage_statement)
| FunctionCoverage |
python | scikit-image__scikit-image | tests/skimage/_shared/test_utils.py | {
"start": 7179,
"end": 16393
} | class ____:
@pytest.mark.skipif(not have_numpydoc, reason="requires numpydoc")
def test_docstring_removed_param(self):
# function name and doc are preserved
assert _func_deprecated_params.__name__ == "_func_deprecated_params"
if sys.flags.optimize < 2:
# if PYTHONOPTIMIZE is set to 2, docstrings are stripped
assert (
_func_deprecated_params.__doc__
== """Expected docstring.
Parameters
----------
arg0 : int
First unchanged parameter.
arg1 : int, optional
Second unchanged parameter.
Other Parameters
----------------
old0 : DEPRECATED
`old0` is deprecated.
.. deprecated:: 0.10
old1 : DEPRECATED
`old1` is deprecated.
.. deprecated:: 0.10
"""
)
@pytest.mark.skipif(not have_numpydoc, reason="requires numpydoc")
def test_docstring_replaced_param(self):
assert _func_replace_params.__name__ == "_func_replace_params"
if sys.flags.optimize < 2:
# if PYTHONOPTIMIZE is set to 2, docstrings are stripped
assert (
_func_replace_params.__doc__
== """Expected docstring.
Parameters
----------
arg0 : int
First unchanged parameter.
new0 : int, optional
First new parameter.
.. versionadded:: 0.10
new1 : int, optional
Second new parameter.
.. versionadded:: 0.10
arg1 : int, optional
Second unchanged parameter.
Other Parameters
----------------
old0 : DEPRECATED
Deprecated in favor of `new1`.
.. deprecated:: 0.10
old1 : DEPRECATED
Deprecated in favor of `new0`.
.. deprecated:: 0.10
"""
)
def test_warning_removed_param(self):
match = (
r".*`old[01]` is deprecated since version 0\.10 and will be removed "
r"in 0\.12.* see the documentation of .*_func_deprecated_params`."
)
with pytest.warns(FutureWarning, match=match):
assert _func_deprecated_params(1, 2) == (1, 2, DEPRECATED, None)
with pytest.warns(FutureWarning, match=match):
assert _func_deprecated_params(1, 2, 3) == (1, 2, 3, None)
with pytest.warns(FutureWarning, match=match):
assert _func_deprecated_params(1, old0=2) == (
1,
2,
DEPRECATED,
None,
)
with pytest.warns(FutureWarning, match=match):
assert _func_deprecated_params(1, old1=2) == (
1,
DEPRECATED,
2,
None,
)
with warnings.catch_warnings(record=True) as record:
assert _func_deprecated_params(1, arg1=3) == (1, DEPRECATED, DEPRECATED, 3)
assert len(record) == 0
def test_warning_replaced_param(self):
match = (
r".*`old[0,1]` is deprecated since version 0\.10 and will be removed "
r"in 0\.12.* see the documentation of .*_func_replace_params`."
)
with pytest.warns(FutureWarning, match=match):
assert _func_replace_params(1, 2) == (
1,
DEPRECATED,
DEPRECATED,
None,
2,
None,
)
with pytest.warns(FutureWarning, match=match) as records:
assert _func_replace_params(1, 2, 3) == (
1,
DEPRECATED,
DEPRECATED,
3,
2,
None,
)
assert len(records) == 2
assert "`old1` is deprecated" in records[0].message.args[0]
assert "`old0` is deprecated" in records[1].message.args[0]
with pytest.warns(FutureWarning, match=match):
assert _func_replace_params(1, old0=2) == (
1,
DEPRECATED,
DEPRECATED,
None,
2,
None,
)
with pytest.warns(FutureWarning, match=match):
assert _func_replace_params(1, old1=3) == (
1,
DEPRECATED,
DEPRECATED,
3,
None,
None,
)
# Otherwise, no warnings are emitted!
with warnings.catch_warnings(record=True) as record:
assert _func_replace_params(1, new0=2, new1=3) == (
1,
DEPRECATED,
DEPRECATED,
2,
3,
None,
)
assert len(record) == 0
def test_missing_DEPRECATED(self):
decorate = deprecate_parameter(
"old", start_version="0.10", stop_version="0.12", stacklevel=2
)
def foo(arg0, old=None):
return arg0, old
with pytest.raises(RuntimeError, match="Expected .* <DEPRECATED>"):
decorate(foo)
def bar(arg0, old=DEPRECATED):
return arg0
assert decorate(bar)(1) == 1
def test_new_keyword_only(self):
@deprecate_parameter(
"old",
new_name="new",
start_version="0.19",
stop_version="0.21",
)
def foo(arg0, old=DEPRECATED, *, new=1, arg3=None):
"""Expected docstring"""
return arg0, new, arg3
# Assert that nothing happens when the function is called with the
# new API
with warnings.catch_warnings(record=True) as recorded:
# No kwargs
assert foo(0) == (0, 1, None)
# Kwargs without deprecated argument
assert foo(0, new=1, arg3=2) == (0, 1, 2)
assert foo(0, new=2) == (0, 2, None)
assert foo(0, arg3=2) == (0, 1, 2)
assert len(recorded) == 0
def test_conflicting_old_and_new(self):
match = r".*`old[0,1]` is deprecated"
with pytest.warns(FutureWarning, match=match):
with pytest.raises(ValueError, match=".* avoid conflicting values"):
_func_replace_params(1, old0=2, new1=2)
with pytest.warns(FutureWarning, match=match):
with pytest.raises(ValueError, match=".* avoid conflicting values"):
_func_replace_params(1, old1=2, new0=2)
with pytest.warns(FutureWarning, match=match):
with pytest.raises(ValueError, match=".* avoid conflicting values"):
_func_replace_params(1, old0=1, old1=1, new0=1, new1=1)
def test_wrong_call_signature(self):
"""Check that normal errors for faulty calls are unchanged."""
with pytest.raises(
TypeError, match=r".* required positional argument\: 'arg0'"
):
_func_replace_params()
with pytest.warns(FutureWarning, match=r".*`old[0,1]` is deprecated"):
with pytest.raises(
TypeError, match=".* multiple values for argument 'old0'"
):
_func_deprecated_params(1, 2, old0=2)
def test_wrong_param_name(self):
with pytest.raises(ValueError, match="'old' not in parameters"):
@deprecate_parameter("old", start_version="0.10", stop_version="0.12")
def foo(arg0):
pass
with pytest.raises(ValueError, match="'new' not in parameters"):
@deprecate_parameter(
"old", new_name="new", start_version="0.10", stop_version="0.12"
)
def bar(arg0, old, arg1):
pass
def test_warning_location(self):
with pytest.warns(FutureWarning) as records:
_func_deprecated_params(1, old0=2, old1=2)
testing.assert_stacklevel(records)
assert len(records) == 2
def test_stacklevel(self):
@deprecate_parameter(
"old",
start_version="0.19",
stop_version="0.21",
)
def foo(arg0, old=DEPRECATED):
pass
regex = "Cannot determine stacklevel.*Set the stacklevel manually"
with pytest.raises(ValueError, match=regex):
foo(0, 1)
@deprecate_parameter(
"old",
start_version="0.19",
stop_version="0.21",
stacklevel=2,
)
def bar(arg0, old=DEPRECATED):
pass
with pytest.warns(FutureWarning, match="`old` is deprecated") as records:
bar(0, 1)
testing.assert_stacklevel(records)
def test_failed_estimation():
msg = 'Something went wrong with estimation'
fe = FailedEstimation(msg)
assert fe.message == msg
assert str(fe) == msg
assert repr(fe).startswith("FailedEstimation(")
assert bool(fe) is False
regex = re.compile('FailedEstimation is not callable.*Hint', flags=re.DOTALL)
with pytest.raises(FailedEstimationAccessError, match=regex):
fe(np.ones((10, 2)))
regex = re.compile(
"FailedEstimation has no attribute 'params'.*Hint", flags=re.DOTALL
)
with pytest.raises(FailedEstimationAccessError, match=regex):
fe.params
| Test_deprecate_parameter |
python | walkccc__LeetCode | solutions/1144. Decrease Elements To Make Array Zigzag/1144.py | {
"start": 0,
"end": 327
} | class ____:
def movesToMakeZigzag(self, nums: list[int]) -> int:
decreasing = [0] * 2
for i, num in enumerate(nums):
l = nums[i - 1] if i > 0 else 1001
r = nums[i + 1] if i + 1 < len(nums) else 1001
decreasing[i % 2] += max(0, num - min(l, r) + 1)
return min(decreasing[0], decreasing[1])
| Solution |
python | pyqtgraph__pyqtgraph | pyqtgraph/examples/jupyter_console_example.py | {
"start": 1654,
"end": 3601
} | class ____(QtWidgets.QMainWindow):
def __init__(self, dark_mode=True):
super().__init__()
central_dock_area = DockArea()
# create plot widget (and dock)
self.plot_widget = pg.PlotWidget()
plot_dock = Dock(name="Plot Widget Dock", closable=True)
plot_dock.addWidget(self.plot_widget)
central_dock_area.addDock(plot_dock)
# create jupyter console widget (and dock)
self.jupyter_console_widget = JupyterConsoleWidget()
jupyter_console_dock = Dock("Jupyter Console Dock")
jupyter_console_dock.addWidget(self.jupyter_console_widget)
central_dock_area.addDock(jupyter_console_dock)
self.setCentralWidget(central_dock_area)
app = QtWidgets.QApplication.instance()
app.aboutToQuit.connect(self.jupyter_console_widget.shutdown_kernel)
kernel = self.jupyter_console_widget.kernel_manager.kernel
kernel.shell.push(dict(np=np, pw=self.plot_widget))
# set dark mode
if dark_mode:
# Set Dark bg color via this relatively roundabout method
self.jupyter_console_widget.set_default_style(
"linux"
)
if __name__ == "__main__":
pg.mkQApp()
main = MainWindow(dark_mode=True)
main.show()
main.jupyter_console_widget.execute('print("hello world :D ")')
# plot a sine/cosine waves by printing to console
# this is equivalent to typing the commands into the console manually
main.jupyter_console_widget.execute("x = np.arange(0, 3 * np.pi, .1)")
main.jupyter_console_widget.execute("pw.plotItem.plot(np.sin(x), pen='r')")
main.jupyter_console_widget.execute(
"pw.plotItem.plot(np.cos(x),\
pen='cyan',\
symbol='o',\
symbolPen='m',\
symbolBrush=(0,0,255))"
)
main.jupyter_console_widget.execute("whos")
main.jupyter_console_widget.execute("")
pg.exec()
| MainWindow |
python | more-itertools__more-itertools | tests/test_more.py | {
"start": 50672,
"end": 54379
} | class ____(TestCase):
"""Tests for ``split_when()``"""
@staticmethod
def _split_when_before(iterable, pred):
return mi.split_when(iterable, lambda _, c: pred(c))
@staticmethod
def _split_when_after(iterable, pred):
return mi.split_when(iterable, lambda c, _: pred(c))
# split_before emulation
def test_before_emulation_starts_with_sep(self):
actual = list(self._split_when_before('xooxoo', lambda c: c == 'x'))
expected = [['x', 'o', 'o'], ['x', 'o', 'o']]
self.assertEqual(actual, expected)
def test_before_emulation_ends_with_sep(self):
actual = list(self._split_when_before('ooxoox', lambda c: c == 'x'))
expected = [['o', 'o'], ['x', 'o', 'o'], ['x']]
self.assertEqual(actual, expected)
def test_before_emulation_no_sep(self):
actual = list(self._split_when_before('ooo', lambda c: c == 'x'))
expected = [['o', 'o', 'o']]
self.assertEqual(actual, expected)
# split_after emulation
def test_after_emulation_starts_with_sep(self):
actual = list(self._split_when_after('xooxoo', lambda c: c == 'x'))
expected = [['x'], ['o', 'o', 'x'], ['o', 'o']]
self.assertEqual(actual, expected)
def test_after_emulation_ends_with_sep(self):
actual = list(self._split_when_after('ooxoox', lambda c: c == 'x'))
expected = [['o', 'o', 'x'], ['o', 'o', 'x']]
self.assertEqual(actual, expected)
def test_after_emulation_no_sep(self):
actual = list(self._split_when_after('ooo', lambda c: c == 'x'))
expected = [['o', 'o', 'o']]
self.assertEqual(actual, expected)
# edge cases
def test_empty_iterable(self):
actual = list(mi.split_when('', lambda a, b: a != b))
expected = []
self.assertEqual(actual, expected)
def test_one_element(self):
actual = list(mi.split_when('o', lambda a, b: a == b))
expected = [['o']]
self.assertEqual(actual, expected)
def test_one_element_is_second_item(self):
actual = list(self._split_when_before('x', lambda c: c == 'x'))
expected = [['x']]
self.assertEqual(actual, expected)
def test_one_element_is_first_item(self):
actual = list(self._split_when_after('x', lambda c: c == 'x'))
expected = [['x']]
self.assertEqual(actual, expected)
def test_max_split(self):
for args, expected in [
(
('a,b,c,d', lambda a, _: a == ',', -1),
[['a', ','], ['b', ','], ['c', ','], ['d']],
),
(
('a,b,c,d', lambda a, _: a == ',', 0),
[['a', ',', 'b', ',', 'c', ',', 'd']],
),
(
('a,b,c,d', lambda _, b: b == ',', 1),
[['a'], [',', 'b', ',', 'c', ',', 'd']],
),
(
('a,b,c,d', lambda a, _: a == ',', 2),
[['a', ','], ['b', ','], ['c', ',', 'd']],
),
(
('0124376', lambda a, b: a > b, -1),
[['0', '1', '2', '4'], ['3', '7'], ['6']],
),
(
('0124376', lambda a, b: a > b, 0),
[['0', '1', '2', '4', '3', '7', '6']],
),
(
('0124376', lambda a, b: a > b, 1),
[['0', '1', '2', '4'], ['3', '7', '6']],
),
(
('0124376', lambda a, b: a > b, 2),
[['0', '1', '2', '4'], ['3', '7'], ['6']],
),
]:
actual = list(mi.split_when(*args))
self.assertEqual(actual, expected, str(args))
| SplitWhenTests |
python | facebook__pyre-check | tools/upgrade/commands/tests/pysa_version_update_test.py | {
"start": 405,
"end": 1399
} | class ____(unittest.TestCase):
@patch("json.dumps")
@patch("json.loads")
@patch.object(Configuration, "find_parent_file")
@patch.object(Configuration, "set_pysa_version")
@patch.object(Configuration, "write")
@patch("builtins.open")
def test_run_pysa_version_update(
self,
open_mock,
configuration_write,
configuration_set_pysa_version,
find_parent_file,
json_loads,
json_dumps,
) -> None:
arguments = MagicMock()
arguments.hash = "new"
arguments.no_commit = False
mocks = [
mock_open(read_data='{"pysa_version": "old"}').return_value,
]
open_mock.side_effect = mocks
PysaVersionUpdate.from_arguments(arguments, repository).run()
configuration_set_pysa_version.assert_has_calls([call("new")])
configuration_write.assert_has_calls([call()])
find_parent_file.assert_any_call(".pysa_configuration")
| UpdatePysaVersionTest |
python | getsentry__sentry | src/sentry/overwatch/endpoints/overwatch_rpc.py | {
"start": 3749,
"end": 5971
} | class ____(Endpoint):
"""
Returns the resolved config for a Sentry organization.
GET /prevent/pr-review/configs/resolved?sentryOrgId={orgId}&gitOrgName={gitOrgName}&provider={provider}
"""
publish_status = {
"GET": ApiPublishStatus.EXPERIMENTAL,
}
owner = ApiOwner.CODECOV
authentication_classes = (OverwatchRpcSignatureAuthentication,)
permission_classes = ()
enforce_rate_limit = False
def get(self, request: Request) -> Response:
if not request.auth or not isinstance(
request.successful_authenticator, OverwatchRpcSignatureAuthentication
):
raise PermissionDenied
sentry_org_id_str = request.GET.get("sentryOrgId")
if not sentry_org_id_str:
raise ParseError("Missing required query parameter: sentryOrgId")
try:
sentry_org_id = int(sentry_org_id_str)
if sentry_org_id <= 0:
raise ParseError("sentryOrgId must be a positive integer")
except ValueError:
raise ParseError("sentryOrgId must be a valid integer")
git_org_name = request.GET.get("gitOrgName")
if not git_org_name:
raise ParseError("Missing required query parameter: gitOrgName")
provider = request.GET.get("provider")
if not provider:
raise ParseError("Missing required query parameter: provider")
github_org_integrations = integration_service.get_organization_integrations(
organization_id=sentry_org_id,
providers=[provider],
status=ObjectStatus.ACTIVE,
name=git_org_name,
)
if not github_org_integrations:
return Response({"detail": "GitHub integration not found"}, status=404)
config = PreventAIConfiguration.objects.filter(
organization_id=sentry_org_id,
integration_id=github_org_integrations[0].integration_id,
).first()
response_data: dict[str, Any] = deepcopy(PREVENT_AI_CONFIG_DEFAULT)
if config:
response_data["organization"] = config.data
return Response(data=response_data)
@region_silo_endpoint
| PreventPrReviewResolvedConfigsEndpoint |
python | django__django | tests/template_tests/test_custom.py | {
"start": 25075,
"end": 37260
} | class ____(TagTestCase):
def test_inclusion_tags(self):
c = Context({"value": 42})
templates = [
(
"{% load inclusion %}{% inclusion_no_params %}",
"inclusion_no_params - Expected result\n",
),
(
"{% load inclusion %}{% inclusion_one_param 37 %}",
"inclusion_one_param - Expected result: 37\n",
),
(
"{% load inclusion %}{% inclusion_explicit_no_context 37 %}",
"inclusion_explicit_no_context - Expected result: 37\n",
),
(
"{% load inclusion %}{% inclusion_no_params_with_context %}",
"inclusion_no_params_with_context - Expected result (context value: "
"42)\n",
),
(
"{% load inclusion %}{% inclusion_params_and_context 37 %}",
"inclusion_params_and_context - Expected result (context value: 42): "
"37\n",
),
(
"{% load inclusion %}{% inclusion_two_params 37 42 %}",
"inclusion_two_params - Expected result: 37, 42\n",
),
(
"{% load inclusion %}{% inclusion_one_default 37 %}",
"inclusion_one_default - Expected result: 37, hi\n",
),
(
'{% load inclusion %}{% inclusion_one_default 37 two="hello" %}',
"inclusion_one_default - Expected result: 37, hello\n",
),
(
'{% load inclusion %}{% inclusion_one_default one=99 two="hello" %}',
"inclusion_one_default - Expected result: 99, hello\n",
),
(
"{% load inclusion %}{% inclusion_one_default 37 42 %}",
"inclusion_one_default - Expected result: 37, 42\n",
),
(
"{% load inclusion %}{% inclusion_keyword_only_default kwarg=37 %}",
"inclusion_keyword_only_default - Expected result: 37\n",
),
(
"{% load inclusion %}{% inclusion_unlimited_args 37 %}",
"inclusion_unlimited_args - Expected result: 37, hi\n",
),
(
"{% load inclusion %}{% inclusion_unlimited_args 37 42 56 89 %}",
"inclusion_unlimited_args - Expected result: 37, 42, 56, 89\n",
),
(
"{% load inclusion %}{% inclusion_only_unlimited_args %}",
"inclusion_only_unlimited_args - Expected result: \n",
),
(
"{% load inclusion %}{% inclusion_only_unlimited_args 37 42 56 89 %}",
"inclusion_only_unlimited_args - Expected result: 37, 42, 56, 89\n",
),
(
"{% load inclusion %}"
'{% inclusion_unlimited_args_kwargs 37 40|add:2 56 eggs="scrambled" '
"four=1|add:3 %}",
"inclusion_unlimited_args_kwargs - Expected result: 37, 42, 56 / "
"eggs=scrambled, four=4\n",
),
]
for entry in templates:
t = self.engine.from_string(entry[0])
self.assertEqual(t.render(c), entry[1])
def test_inclusion_tag_errors(self):
errors = [
(
"'inclusion_one_default' received unexpected keyword argument 'three'",
"{% load inclusion %}"
'{% inclusion_one_default 99 two="hello" three="foo" %}',
),
(
"'inclusion_two_params' received too many positional arguments",
"{% load inclusion %}{% inclusion_two_params 37 42 56 %}",
),
(
"'inclusion_one_default' received too many positional arguments",
"{% load inclusion %}{% inclusion_one_default 37 42 56 %}",
),
(
"'inclusion_one_default' did not receive value(s) for the argument(s): "
"'one'",
"{% load inclusion %}{% inclusion_one_default %}",
),
(
"'inclusion_keyword_only_default' received multiple values "
"for keyword argument 'kwarg'",
"{% load inclusion %}{% inclusion_keyword_only_default "
"kwarg=37 kwarg=42 %}",
),
(
"'inclusion_unlimited_args' did not receive value(s) for the "
"argument(s): 'one'",
"{% load inclusion %}{% inclusion_unlimited_args %}",
),
(
"'inclusion_unlimited_args_kwargs' received some positional "
"argument(s) after some keyword argument(s)",
"{% load inclusion %}"
"{% inclusion_unlimited_args_kwargs 37 40|add:2 "
'eggs="boiled" 56 four=1|add:3 %}',
),
(
"'inclusion_unlimited_args_kwargs' received multiple values for "
"keyword argument 'eggs'",
"{% load inclusion %}"
"{% inclusion_unlimited_args_kwargs 37 "
'eggs="scrambled" eggs="scrambled" %}',
),
]
for entry in errors:
with self.assertRaisesMessage(TemplateSyntaxError, entry[0]):
self.engine.from_string(entry[1])
def test_include_tag_missing_context(self):
# The 'context' parameter must be present when takes_context is True
msg = (
"'inclusion_tag_without_context_parameter' is decorated with "
"takes_context=True so it must have a first argument of 'context'"
)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.from_string(
"{% load inclusion %}{% inclusion_tag_without_context_parameter 123 %}"
)
def test_include_tag_missing_context_no_params(self):
msg = (
"'inclusion_tag_takes_context_without_params' is decorated with "
"takes_context=True so it must have a first argument of 'context'"
)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.from_string(
"{% load inclusion %}{% inclusion_tag_takes_context_without_params %}"
)
def test_inclusion_tags_from_template(self):
c = Context({"value": 42})
templates = [
(
"{% load inclusion %}{% inclusion_no_params_from_template %}",
"inclusion_no_params_from_template - Expected result\n",
),
(
"{% load inclusion %}{% inclusion_one_param_from_template 37 %}",
"inclusion_one_param_from_template - Expected result: 37\n",
),
(
"{% load inclusion %}"
"{% inclusion_explicit_no_context_from_template 37 %}",
"inclusion_explicit_no_context_from_template - Expected result: 37\n",
),
(
"{% load inclusion %}"
"{% inclusion_no_params_with_context_from_template %}",
"inclusion_no_params_with_context_from_template - Expected result "
"(context value: 42)\n",
),
(
"{% load inclusion %}"
"{% inclusion_params_and_context_from_template 37 %}",
"inclusion_params_and_context_from_template - Expected result (context "
"value: 42): 37\n",
),
(
"{% load inclusion %}{% inclusion_two_params_from_template 37 42 %}",
"inclusion_two_params_from_template - Expected result: 37, 42\n",
),
(
"{% load inclusion %}{% inclusion_one_default_from_template 37 %}",
"inclusion_one_default_from_template - Expected result: 37, hi\n",
),
(
"{% load inclusion %}{% inclusion_one_default_from_template 37 42 %}",
"inclusion_one_default_from_template - Expected result: 37, 42\n",
),
(
"{% load inclusion %}{% inclusion_unlimited_args_from_template 37 %}",
"inclusion_unlimited_args_from_template - Expected result: 37, hi\n",
),
(
"{% load inclusion %}"
"{% inclusion_unlimited_args_from_template 37 42 56 89 %}",
"inclusion_unlimited_args_from_template - Expected result: 37, 42, 56, "
"89\n",
),
(
"{% load inclusion %}{% inclusion_only_unlimited_args_from_template %}",
"inclusion_only_unlimited_args_from_template - Expected result: \n",
),
(
"{% load inclusion %}"
"{% inclusion_only_unlimited_args_from_template 37 42 56 89 %}",
"inclusion_only_unlimited_args_from_template - Expected result: 37, "
"42, 56, 89\n",
),
]
for entry in templates:
t = self.engine.from_string(entry[0])
self.assertEqual(t.render(c), entry[1])
def test_inclusion_tag_registration(self):
# The decorators preserve the decorated function's docstring, name,
# and attributes.
self.verify_tag(inclusion.inclusion_no_params, "inclusion_no_params")
self.verify_tag(inclusion.inclusion_one_param, "inclusion_one_param")
self.verify_tag(
inclusion.inclusion_explicit_no_context, "inclusion_explicit_no_context"
)
self.verify_tag(
inclusion.inclusion_no_params_with_context,
"inclusion_no_params_with_context",
)
self.verify_tag(
inclusion.inclusion_params_and_context, "inclusion_params_and_context"
)
self.verify_tag(inclusion.inclusion_two_params, "inclusion_two_params")
self.verify_tag(inclusion.inclusion_one_default, "inclusion_one_default")
self.verify_tag(inclusion.inclusion_unlimited_args, "inclusion_unlimited_args")
self.verify_tag(
inclusion.inclusion_only_unlimited_args, "inclusion_only_unlimited_args"
)
self.verify_tag(
inclusion.inclusion_tag_without_context_parameter,
"inclusion_tag_without_context_parameter",
)
self.verify_tag(inclusion.inclusion_tag_use_l10n, "inclusion_tag_use_l10n")
self.verify_tag(
inclusion.inclusion_unlimited_args_kwargs, "inclusion_unlimited_args_kwargs"
)
def test_15070_use_l10n(self):
"""
Inclusion tag passes down `use_l10n` of context to the
Context of the included/rendered template as well.
"""
c = Context({})
t = self.engine.from_string("{% load inclusion %}{% inclusion_tag_use_l10n %}")
self.assertEqual(t.render(c).strip(), "None")
c.use_l10n = True
self.assertEqual(t.render(c).strip(), "True")
def test_no_render_side_effect(self):
"""
#23441 -- InclusionNode shouldn't modify its nodelist at render time.
"""
engine = Engine(app_dirs=True, libraries=LIBRARIES)
template = engine.from_string("{% load inclusion %}{% inclusion_no_params %}")
count = template.nodelist.get_nodes_by_type(Node)
template.render(Context({}))
self.assertEqual(template.nodelist.get_nodes_by_type(Node), count)
def test_render_context_is_cleared(self):
"""
#24555 -- InclusionNode should push and pop the render_context stack
when rendering. Otherwise, leftover values such as blocks from
extending can interfere with subsequent rendering.
"""
engine = Engine(app_dirs=True, libraries=LIBRARIES)
template = engine.from_string(
"{% load inclusion %}{% inclusion_extends1 %}{% inclusion_extends2 %}"
)
self.assertEqual(template.render(Context({})).strip(), "one\ntwo")
| InclusionTagTests |
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/tf_record_test_base.py | {
"start": 1208,
"end": 8444
} | class ____(test_base.DatasetTestBase):
"""Base class for testing TFRecord-based features."""
def setUp(self):
super(FeaturesTestBase, self).setUp()
self._num_files = 2
self._num_records = 7
self._filenames = self._createFiles()
def make_batch_feature(self,
filenames,
num_epochs,
batch_size,
label_key=None,
reader_num_threads=1,
parser_num_threads=1,
shuffle=False,
shuffle_seed=None,
drop_final_batch=False):
self.filenames = filenames
self.num_epochs = num_epochs
self.batch_size = batch_size
return readers.make_batched_features_dataset(
file_pattern=self.filenames,
batch_size=self.batch_size,
features={
"file": parsing_ops.FixedLenFeature([], dtypes.int64),
"record": parsing_ops.FixedLenFeature([], dtypes.int64),
"keywords": parsing_ops.VarLenFeature(dtypes.string),
"label": parsing_ops.FixedLenFeature([], dtypes.string),
},
label_key=label_key,
reader=core_readers.TFRecordDataset,
num_epochs=self.num_epochs,
shuffle=shuffle,
shuffle_seed=shuffle_seed,
reader_num_threads=reader_num_threads,
parser_num_threads=parser_num_threads,
drop_final_batch=drop_final_batch)
def _record(self, f, r, l):
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
"file":
feature_pb2.Feature(
int64_list=feature_pb2.Int64List(value=[f])),
"record":
feature_pb2.Feature(
int64_list=feature_pb2.Int64List(value=[r])),
"keywords":
feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(
value=self._get_keywords(f, r))),
"label":
feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(
value=[compat.as_bytes(l)]))
}))
return example.SerializeToString()
def _get_keywords(self, f, r):
num_keywords = 1 + (f + r) % 2
keywords = []
for index in range(num_keywords):
keywords.append(compat.as_bytes("keyword%d" % index))
return keywords
def _sum_keywords(self, num_files):
sum_keywords = 0
for i in range(num_files):
for j in range(self._num_records):
sum_keywords += 1 + (i + j) % 2
return sum_keywords
def _createFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i)
filenames.append(fn)
writer = python_io.TFRecordWriter(fn)
for j in range(self._num_records):
writer.write(self._record(i, j, "fake-label"))
writer.close()
return filenames
def _run_actual_batch(self, outputs, label_key_provided=False):
if label_key_provided:
# outputs would be a tuple of (feature dict, label)
features, label = self.evaluate(outputs())
else:
features = self.evaluate(outputs())
label = features["label"]
file_out = features["file"]
keywords_indices = features["keywords"].indices
keywords_values = features["keywords"].values
keywords_dense_shape = features["keywords"].dense_shape
record = features["record"]
return ([
file_out, keywords_indices, keywords_values, keywords_dense_shape,
record, label
])
def _next_actual_batch(self, label_key_provided=False):
return self._run_actual_batch(self.outputs, label_key_provided)
def _interleave(self, iterators, cycle_length):
pending_iterators = iterators
open_iterators = []
num_open = 0
for i in range(cycle_length):
if pending_iterators:
open_iterators.append(pending_iterators.pop(0))
num_open += 1
while num_open:
for i in range(min(cycle_length, len(open_iterators))):
if open_iterators[i] is None:
continue
try:
yield next(open_iterators[i])
except StopIteration:
if pending_iterators:
open_iterators[i] = pending_iterators.pop(0)
else:
open_iterators[i] = None
num_open -= 1
def _next_expected_batch(self,
file_indices,
batch_size,
num_epochs,
cycle_length=1):
def _next_record(file_indices):
for j in file_indices:
for i in range(self._num_records):
yield j, i, compat.as_bytes("fake-label")
def _next_record_interleaved(file_indices, cycle_length):
return self._interleave([_next_record([i]) for i in file_indices],
cycle_length)
file_batch = []
keywords_batch_indices = []
keywords_batch_values = []
keywords_batch_max_len = 0
record_batch = []
batch_index = 0
label_batch = []
for _ in range(num_epochs):
if cycle_length == 1:
next_records = _next_record(file_indices)
else:
next_records = _next_record_interleaved(file_indices, cycle_length)
for record in next_records:
f = record[0]
r = record[1]
label_batch.append(record[2])
file_batch.append(f)
record_batch.append(r)
keywords = self._get_keywords(f, r)
keywords_batch_values.extend(keywords)
keywords_batch_indices.extend(
[[batch_index, i] for i in range(len(keywords))])
batch_index += 1
keywords_batch_max_len = max(keywords_batch_max_len, len(keywords))
if len(file_batch) == batch_size:
yield [
file_batch, keywords_batch_indices, keywords_batch_values,
[batch_size, keywords_batch_max_len], record_batch, label_batch
]
file_batch = []
keywords_batch_indices = []
keywords_batch_values = []
keywords_batch_max_len = 0
record_batch = []
batch_index = 0
label_batch = []
if file_batch:
yield [
file_batch, keywords_batch_indices, keywords_batch_values,
[len(file_batch), keywords_batch_max_len], record_batch, label_batch
]
def _verify_records(self,
batch_size,
file_index=None,
num_epochs=1,
label_key_provided=False,
interleave_cycle_length=1):
if file_index is not None:
file_indices = [file_index]
else:
file_indices = range(self._num_files)
for expected_batch in self._next_expected_batch(
file_indices,
batch_size,
num_epochs,
cycle_length=interleave_cycle_length):
actual_batch = self._next_actual_batch(
label_key_provided=label_key_provided)
for i in range(len(expected_batch)):
self.assertAllEqual(expected_batch[i], actual_batch[i])
| FeaturesTestBase |
python | mlflow__mlflow | mlflow/gateway/config.py | {
"start": 5657,
"end": 5920
} | class ____(ConfigModel):
anthropic_api_key: str
anthropic_version: str = "2023-06-01"
@field_validator("anthropic_api_key", mode="before")
def validate_anthropic_api_key(cls, value):
return _resolve_api_key_from_input(value)
| AnthropicConfig |
python | Lightning-AI__lightning | src/lightning/pytorch/utilities/model_summary/model_summary.py | {
"start": 4957,
"end": 21335
} | class ____:
"""Generates a summary of all layers in a :class:`~lightning.pytorch.core.LightningModule`.
Args:
model: The model to summarize (also referred to as the root module).
max_depth: Maximum depth of modules to show. Use -1 to show all modules or 0 to show no
summary. Defaults to 1.
The string representation of this summary prints a table with columns containing
the name, type and number of parameters for each layer.
The root module may also have an attribute ``example_input_array`` as shown in the example below.
If present, the root module will be called with it as input to determine the
intermediate input- and output shapes of all layers. Supported are tensors and
nested lists and tuples of tensors. All other types of inputs will be skipped and show as `?`
in the summary table. The summary will also display `?` for layers not used in the forward pass.
If there are parameters not associated with any layers or modules, the count of those parameters
will be displayed in the table under `other params`. The summary will display `n/a` for module type,
in size, and out size.
Example::
>>> import lightning.pytorch as pl
>>> class LitModel(pl.LightningModule):
...
... def __init__(self):
... super().__init__()
... self.net = nn.Sequential(nn.Linear(256, 512), nn.BatchNorm1d(512))
... self.example_input_array = torch.zeros(10, 256) # optional
...
... def forward(self, x):
... return self.net(x)
...
>>> model = LitModel()
>>> ModelSummary(model, max_depth=1) # doctest: +NORMALIZE_WHITESPACE
| Name | Type | Params | Mode | FLOPs | In sizes | Out sizes
----------------------------------------------------------------------------
0 | net | Sequential | 132 K | train | 2.6 M | [10, 256] | [10, 512]
----------------------------------------------------------------------------
132 K Trainable params
0 Non-trainable params
132 K Total params
0.530 Total estimated model params size (MB)
3 Modules in train mode
0 Modules in eval mode
2.6 M Total Flops
>>> ModelSummary(model, max_depth=-1) # doctest: +NORMALIZE_WHITESPACE
| Name | Type | Params | Mode | FLOPs | In sizes | Out sizes
------------------------------------------------------------------------------
0 | net | Sequential | 132 K | train | 2.6 M | [10, 256] | [10, 512]
1 | net.0 | Linear | 131 K | train | 2.6 M | [10, 256] | [10, 512]
2 | net.1 | BatchNorm1d | 1.0 K | train | 0 | [10, 512] | [10, 512]
------------------------------------------------------------------------------
132 K Trainable params
0 Non-trainable params
132 K Total params
0.530 Total estimated model params size (MB)
3 Modules in train mode
0 Modules in eval mode
2.6 M Total Flops
"""
def __init__(self, model: "pl.LightningModule", max_depth: int = 1) -> None:
self._model = model
if not isinstance(max_depth, int) or max_depth < -1:
raise ValueError(f"`max_depth` can be -1, 0 or > 0, got {max_depth}.")
# The max-depth needs to be plus one because the root module is already counted as depth 0.
self._flop_counter = FlopCounterMode(
mods=None if _TORCH_GREATER_EQUAL_2_4 else self._model,
display=False,
depth=max_depth + 1,
)
self._max_depth = max_depth
self._layer_summary = self.summarize()
# 1 byte -> 8 bits
# TODO: how do we compute precision_megabytes in case of mixed precision?
precision_to_bits = {
"64": 64,
"32": 32,
"16": 16,
"bf16": 16,
"16-true": 16,
"bf16-true": 16,
"32-true": 32,
"64-true": 64,
}
if self._model._trainer and self._model.trainer.precision not in precision_to_bits:
rank_zero_warn(
f"Precision {self._model.trainer.precision} is not supported by the model summary. "
" Estimated model size in MB will not be accurate. Using 32 bits instead.",
category=UserWarning,
)
precision = precision_to_bits.get(self._model.trainer.precision, 32) if self._model._trainer else 32
self._precision_megabytes = (precision / 8.0) * 1e-6
@property
def named_modules(self) -> list[tuple[str, nn.Module]]:
mods: list[tuple[str, nn.Module]]
if self._max_depth == 0:
mods = []
elif self._max_depth == 1:
# the children are the top-level modules
mods = list(self._model.named_children())
else:
mods = self._model.named_modules()
mods = list(mods)[1:] # do not include root module (LightningModule)
return mods
@property
def layer_names(self) -> list[str]:
return list(self._layer_summary.keys())
@property
def layer_types(self) -> list[str]:
return [layer.layer_type for layer in self._layer_summary.values()]
@property
def in_sizes(self) -> list:
return [layer.in_size for layer in self._layer_summary.values()]
@property
def out_sizes(self) -> list:
return [layer.out_size for layer in self._layer_summary.values()]
@property
def param_nums(self) -> list[int]:
return [layer.num_parameters for layer in self._layer_summary.values()]
@property
def training_modes(self) -> list[bool]:
return [layer.training for layer in self._layer_summary.values()]
@property
def total_training_modes(self) -> dict[str, int]:
modes = [layer.training for layer in self._model.modules()]
modes = modes[1:] # exclude the root module
return {"train": modes.count(True), "eval": modes.count(False)}
@property
def total_parameters(self) -> int:
return sum(p.numel() if not _tensor_has_shape(p) else 0 for p in self._model.parameters())
@property
def trainable_parameters(self) -> int:
return sum(p.numel() if not _tensor_has_shape(p) else 0 for p in self._model.parameters() if p.requires_grad)
@property
def total_layer_params(self) -> int:
return sum(self.param_nums)
@property
def model_size(self) -> float:
return self.total_parameters * self._precision_megabytes
@property
def total_flops(self) -> int:
return self._flop_counter.get_total_flops()
@property
def flop_counts(self) -> dict[str, dict[Any, int]]:
flop_counts = self._flop_counter.get_flop_counts()
ret = {
name: flop_counts.get(
f"{type(self._model).__name__}.{name}",
{},
)
for name in self.layer_names
}
return ret
def summarize(self) -> dict[str, LayerSummary]:
summary = OrderedDict((name, LayerSummary(module)) for name, module in self.named_modules)
if self._model.example_input_array is not None:
self._forward_example_input()
for layer in summary.values():
layer.detach_hook()
if self._max_depth >= 1:
# remove summary entries with depth > max_depth
for k in [k for k in summary if k.count(".") >= self._max_depth]:
del summary[k]
return summary
def _forward_example_input(self) -> None:
"""Run the example input through each layer to get input- and output sizes."""
model = self._model
# the summary is supported without a trainer instance so we need to use the underscore property
trainer = self._model._trainer
input_ = model.example_input_array
input_ = model._on_before_batch_transfer(input_)
input_ = model._apply_batch_transfer_handler(input_)
mode = _ModuleMode()
mode.capture(model)
model.eval()
# FlopCounterMode does not support ScriptModules before torch 2.4.0, so we use a null context
flop_context = (
contextlib.nullcontext()
if (
not _TORCH_GREATER_EQUAL_2_4
and any(isinstance(m, torch.jit.ScriptModule) for m in self._model.modules())
)
else self._flop_counter
)
forward_context = contextlib.nullcontext() if trainer is None else trainer.precision_plugin.forward_context()
with torch.no_grad(), forward_context, flop_context:
# let the model hooks collect the input- and output shapes
if isinstance(input_, (list, tuple)):
model(*input_)
elif isinstance(input_, dict):
model(**input_)
else:
model(input_)
mode.restore(model)
def _get_summary_data(self) -> list[tuple[str, list[str]]]:
"""Makes a summary listing with:
Layer Name, Layer Type, Number of Parameters, Input Sizes, Output Sizes, Model Size
"""
arrays = [
(" ", list(map(str, range(len(self._layer_summary))))),
("Name", self.layer_names),
("Type", self.layer_types),
("Params", list(map(get_human_readable_count, self.param_nums))),
("Mode", ["train" if mode else "eval" for mode in self.training_modes]),
("FLOPs", list(map(get_human_readable_count, (sum(x.values()) for x in self.flop_counts.values())))),
]
if self._model.example_input_array is not None:
arrays.append(("In sizes", [str(x) for x in self.in_sizes]))
arrays.append(("Out sizes", [str(x) for x in self.out_sizes]))
total_leftover_params = self.total_parameters - self.total_layer_params
if total_leftover_params > 0:
self._add_leftover_params_to_summary(arrays, total_leftover_params)
return arrays
def _add_leftover_params_to_summary(self, arrays: list[tuple[str, list[str]]], total_leftover_params: int) -> None:
"""Add summary of params not associated with module or layer to model summary."""
layer_summaries = dict(arrays)
layer_summaries[" "].append(" ")
layer_summaries["Name"].append(LEFTOVER_PARAMS_NAME)
layer_summaries["Type"].append(NOT_APPLICABLE)
layer_summaries["Params"].append(get_human_readable_count(total_leftover_params))
layer_summaries["Mode"].append(NOT_APPLICABLE)
layer_summaries["FLOPs"].append(NOT_APPLICABLE)
if "In sizes" in layer_summaries:
layer_summaries["In sizes"].append(NOT_APPLICABLE)
if "Out sizes" in layer_summaries:
layer_summaries["Out sizes"].append(NOT_APPLICABLE)
def __str__(self) -> str:
arrays = self._get_summary_data()
total_parameters = self.total_parameters
trainable_parameters = self.trainable_parameters
model_size = self.model_size
total_training_modes = self.total_training_modes
total_flops = self.total_flops
return _format_summary_table(
total_parameters,
trainable_parameters,
model_size,
total_training_modes,
total_flops,
*arrays,
)
def __repr__(self) -> str:
return str(self)
def parse_batch_shape(batch: Any) -> Union[str, list]:
if hasattr(batch, "shape"):
return list(batch.shape)
if isinstance(batch, (list, tuple)):
return [parse_batch_shape(el) for el in batch]
return UNKNOWN_SIZE
def _format_summary_table(
total_parameters: int,
trainable_parameters: int,
model_size: float,
total_training_modes: dict[str, int],
total_flops: int,
*cols: tuple[str, list[str]],
) -> str:
"""Takes in a number of arrays, each specifying a column in the summary table, and combines them all into one big
string defining the summary table that are nicely formatted."""
n_rows = len(cols[0][1])
n_cols = 1 + len(cols)
# Get formatting width of each column
col_widths = []
for c in cols:
col_width = max(len(str(a)) for a in c[1]) if n_rows else 0
col_width = max(col_width, len(c[0])) # minimum length is header length
col_widths.append(col_width)
# Formatting
s = "{:<{}}"
total_width = sum(col_widths) + 3 * n_cols
header = [s.format(c[0], w) for c, w in zip(cols, col_widths)]
# Summary = header + divider + Rest of table
summary = " | ".join(header) + "\n" + "-" * total_width
for i in range(n_rows):
line = []
for c, w in zip(cols, col_widths):
line.append(s.format(str(c[1][i]), w))
summary += "\n" + " | ".join(line)
summary += "\n" + "-" * total_width
summary += "\n" + s.format(get_human_readable_count(trainable_parameters), 10)
summary += "Trainable params"
summary += "\n" + s.format(get_human_readable_count(total_parameters - trainable_parameters), 10)
summary += "Non-trainable params"
summary += "\n" + s.format(get_human_readable_count(total_parameters), 10)
summary += "Total params"
summary += "\n" + s.format(get_formatted_model_size(model_size), 10)
summary += "Total estimated model params size (MB)"
summary += "\n" + s.format(total_training_modes["train"], 10)
summary += "Modules in train mode"
summary += "\n" + s.format(total_training_modes["eval"], 10)
summary += "Modules in eval mode"
summary += "\n" + s.format(get_human_readable_count(total_flops), 10)
summary += "Total Flops"
return summary
def get_formatted_model_size(total_model_size: float) -> str:
return f"{total_model_size:,.3f}"
def get_human_readable_count(number: int) -> str:
"""Abbreviates an integer number with K, M, B, T for thousands, millions, billions and trillions, respectively.
Examples:
>>> get_human_readable_count(123)
'123 '
>>> get_human_readable_count(1234) # (one thousand)
'1.2 K'
>>> get_human_readable_count(2e6) # (two million)
'2.0 M'
>>> get_human_readable_count(3e9) # (three billion)
'3.0 B'
>>> get_human_readable_count(4e14) # (four hundred trillion)
'400 T'
>>> get_human_readable_count(5e15) # (more than trillion)
'5,000 T'
Args:
number: a positive integer number
Return:
A string formatted according to the pattern described above.
"""
assert number >= 0
labels = PARAMETER_NUM_UNITS
num_digits = int(math.floor(math.log10(number)) + 1 if number > 0 else 1)
num_groups = int(math.ceil(num_digits / 3))
num_groups = min(num_groups, len(labels)) # don't abbreviate beyond trillions
shift = -3 * (num_groups - 1)
number = number * (10**shift)
index = num_groups - 1
if index < 1 or number >= 100:
return f"{int(number):,d} {labels[index]}"
return f"{number:,.1f} {labels[index]}"
def _tensor_has_shape(p: Tensor) -> bool:
from torch.nn.parameter import UninitializedParameter
# DTensor is a subtype of `UninitializedParameter`, but the shape is known
if isinstance(p, UninitializedParameter) and not _is_dtensor(p):
warning_cache.warn(
"The total number of parameters detected may be inaccurate because the model contains"
" an instance of `UninitializedParameter`. To get an accurate number, set `self.example_input_array`"
" in your LightningModule."
)
return True
return False
def summarize(lightning_module: "pl.LightningModule", max_depth: int = 1) -> ModelSummary:
"""Summarize the LightningModule specified by `lightning_module`.
Args:
lightning_module: `LightningModule` to summarize.
max_depth: The maximum depth of layer nesting that the summary will include. A value of 0 turns the
layer summary off. Default: 1.
Return:
The model summary object
"""
return ModelSummary(lightning_module, max_depth=max_depth)
| ModelSummary |
python | pypa__warehouse | tests/unit/utils/test_static.py | {
"start": 96,
"end": 1214
} | class ____:
def test_returns_when_valid(self, monkeypatch):
monkeypatch.setattr(
ManifestCacheBuster,
"get_manifest",
lambda x: {"/the/path/style.css": "/the/busted/path/style.css"},
)
cb = ManifestCacheBuster("warehouse:static/dist/manifest.json")
result = cb(None, "/the/path/style.css", {"keyword": "arg"})
assert result == ("/the/busted/path/style.css", {"keyword": "arg"})
def test_passes_when_invalid(self, monkeypatch):
monkeypatch.setattr(ManifestCacheBuster, "get_manifest", lambda x: {})
cb = ManifestCacheBuster("warehouse:static/dist/manifest.json")
cb(None, "/the/path/style.css", {"keyword": "arg"})
def test_returns_when_invalid_and_not_strict(self, monkeypatch):
monkeypatch.setattr(ManifestCacheBuster, "get_manifest", lambda x: {})
cb = ManifestCacheBuster("warehouse:static/dist/manifest.json", strict=False)
result = cb(None, "/the/path/style.css", {"keyword": "arg"})
assert result == ("/the/path/style.css", {"keyword": "arg"})
| TestManifestCacheBuster |
python | plotly__plotly.py | plotly/graph_objs/violin/hoverlabel/_font.py | {
"start": 233,
"end": 17138
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "violin.hoverlabel"
_path_str = "violin.hoverlabel.font"
_valid_props = {
"color",
"colorsrc",
"family",
"familysrc",
"lineposition",
"linepositionsrc",
"shadow",
"shadowsrc",
"size",
"sizesrc",
"style",
"stylesrc",
"textcase",
"textcasesrc",
"variant",
"variantsrc",
"weight",
"weightsrc",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `family`.
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def linepositionsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`lineposition`.
The 'linepositionsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["linepositionsrc"]
@linepositionsrc.setter
def linepositionsrc(self, val):
self["linepositionsrc"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def shadowsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `shadow`.
The 'shadowsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["shadowsrc"]
@shadowsrc.setter
def shadowsrc(self, val):
self["shadowsrc"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def stylesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `style`.
The 'stylesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["stylesrc"]
@stylesrc.setter
def stylesrc(self, val):
self["stylesrc"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def textcasesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `textcase`.
The 'textcasesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textcasesrc"]
@textcasesrc.setter
def textcasesrc(self, val):
self["textcasesrc"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def variantsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `variant`.
The 'variantsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["variantsrc"]
@variantsrc.setter
def variantsrc(self, val):
self["variantsrc"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def weightsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `weight`.
The 'weightsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["weightsrc"]
@weightsrc.setter
def weightsrc(self, val):
self["weightsrc"] = val
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud for
`lineposition`.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
shadowsrc
Sets the source reference on Chart Studio Cloud for
`shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud for
`textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
lineposition=None,
linepositionsrc=None,
shadow=None,
shadowsrc=None,
size=None,
sizesrc=None,
style=None,
stylesrc=None,
textcase=None,
textcasesrc=None,
variant=None,
variantsrc=None,
weight=None,
weightsrc=None,
**kwargs,
):
"""
Construct a new Font object
Sets the font used in hover labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.violin.hoverlabel.Font`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud for
`lineposition`.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
shadowsrc
Sets the source reference on Chart Studio Cloud for
`shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud for
`textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.violin.hoverlabel.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.violin.hoverlabel.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("colorsrc", arg, colorsrc)
self._set_property("family", arg, family)
self._set_property("familysrc", arg, familysrc)
self._set_property("lineposition", arg, lineposition)
self._set_property("linepositionsrc", arg, linepositionsrc)
self._set_property("shadow", arg, shadow)
self._set_property("shadowsrc", arg, shadowsrc)
self._set_property("size", arg, size)
self._set_property("sizesrc", arg, sizesrc)
self._set_property("style", arg, style)
self._set_property("stylesrc", arg, stylesrc)
self._set_property("textcase", arg, textcase)
self._set_property("textcasesrc", arg, textcasesrc)
self._set_property("variant", arg, variant)
self._set_property("variantsrc", arg, variantsrc)
self._set_property("weight", arg, weight)
self._set_property("weightsrc", arg, weightsrc)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | scikit-image__scikit-image | benchmarks/benchmark_filters.py | {
"start": 536,
"end": 918
} | class ____:
"""Benchmark for 3d sobel filters."""
def setup(self):
try:
filters.sobel(np.ones((8, 8, 8)))
except ValueError:
raise NotImplementedError("3d sobel unavailable")
self.image3d = data.binary_blobs(length=256, n_dim=3).astype(float)
def time_sobel_3d(self):
_ = filters.sobel(self.image3d)
| FiltersSobel3D |
python | kamyu104__LeetCode-Solutions | Python/maximum-odd-binary-number.py | {
"start": 504,
"end": 719
} | class ____(object):
def maximumOddBinaryNumber(self, s):
"""
:type s: str
:rtype: str
"""
n = s.count('1')
return "".join(['1']*(n-1)+['0']*(len(s)-n)+['1'])
| Solution2 |
python | paramiko__paramiko | paramiko/auth_handler.py | {
"start": 37417,
"end": 43006
} | class ____(AuthHandler):
"""
AuthHandler, and just auth, no service requests!
.. versionadded:: 3.2
"""
# NOTE: this purposefully duplicates some of the parent class in order to
# modernize, refactor, etc. The intent is that eventually we will collapse
# this one onto the parent in a backwards incompatible release.
@property
def _client_handler_table(self):
my_table = super()._client_handler_table.copy()
del my_table[MSG_SERVICE_ACCEPT]
return my_table
def send_auth_request(self, username, method, finish_message=None):
"""
Submit a userauth request message & wait for response.
Performs the transport message send call, sets self.auth_event, and
will lock-n-block as necessary to both send, and wait for response to,
the USERAUTH_REQUEST.
Most callers will want to supply a callback to ``finish_message``,
which accepts a Message ``m`` and may call mutator methods on it to add
more fields.
"""
# Store a few things for reference in handlers, including auth failure
# handler (which needs to know if we were using a bad method, etc)
self.auth_method = method
self.username = username
# Generic userauth request fields
m = Message()
m.add_byte(cMSG_USERAUTH_REQUEST)
m.add_string(username)
m.add_string("ssh-connection")
m.add_string(method)
# Caller usually has more to say, such as injecting password, key etc
finish_message(m)
# TODO 4.0: seems odd to have the client handle the lock and not
# Transport; that _may_ have been an artifact of allowing user
# threading event injection? Regardless, we don't want to move _this_
# locking into Transport._send_message now, because lots of other
# untouched code also uses that method and we might end up
# double-locking (?) but 4.0 would be a good time to revisit.
with self.transport.lock:
self.transport._send_message(m)
# We have cut out the higher level event args, but self.auth_event is
# still required for self.wait_for_response to function correctly (it's
# the mechanism used by the auth success/failure handlers, the abort
# handler, and a few other spots like in gssapi.
# TODO: interestingly, wait_for_response itself doesn't actually
# enforce that its event argument and self.auth_event are the same...
self.auth_event = threading.Event()
return self.wait_for_response(self.auth_event)
def auth_none(self, username):
return self.send_auth_request(username, "none")
def auth_publickey(self, username, key):
key_type, bits = self._get_key_type_and_bits(key)
algorithm = self._finalize_pubkey_algorithm(key_type)
blob = self._get_session_blob(
key,
"ssh-connection",
username,
algorithm,
)
def finish(m):
# This field doesn't appear to be named, but is False when querying
# for permission (ie knowing whether to even prompt a user for
# passphrase, etc) or True when just going for it. Paramiko has
# never bothered with the former type of message, apparently.
m.add_boolean(True)
m.add_string(algorithm)
m.add_string(bits)
m.add_string(key.sign_ssh_data(blob, algorithm))
return self.send_auth_request(username, "publickey", finish)
def auth_password(self, username, password):
def finish(m):
# Unnamed field that equates to "I am changing my password", which
# Paramiko clientside never supported and serverside only sort of
# supported.
m.add_boolean(False)
m.add_string(b(password))
return self.send_auth_request(username, "password", finish)
def auth_interactive(self, username, handler, submethods=""):
"""
response_list = handler(title, instructions, prompt_list)
"""
# Unlike most siblings, this auth method _does_ require other
# superclass handlers (eg userauth info request) to understand
# what's going on, so we still set some self attributes.
self.auth_method = "keyboard_interactive"
self.interactive_handler = handler
def finish(m):
# Empty string for deprecated language tag field, per RFC 4256:
# https://www.rfc-editor.org/rfc/rfc4256#section-3.1
m.add_string("")
m.add_string(submethods)
return self.send_auth_request(username, "keyboard-interactive", finish)
# NOTE: not strictly 'auth only' related, but allows users to opt-in.
def _choose_fallback_pubkey_algorithm(self, key_type, my_algos):
msg = "Server did not send a server-sig-algs list; defaulting to something in our preferred algorithms list" # noqa
self._log(DEBUG, msg)
noncert_key_type = key_type.replace("-cert-v01@openssh.com", "")
if key_type in my_algos or noncert_key_type in my_algos:
actual = key_type if key_type in my_algos else noncert_key_type
msg = f"Current key type, {actual!r}, is in our preferred list; using that" # noqa
algo = actual
else:
algo = my_algos[0]
msg = f"{key_type!r} not in our list - trying first list item instead, {algo!r}" # noqa
self._log(DEBUG, msg)
return algo
| AuthOnlyHandler |
python | scrapy__scrapy | scrapy/extensions/feedexport.py | {
"start": 10326,
"end": 11715
} | class ____(BlockingFeedStorage):
def __init__(
self,
uri: str,
use_active_mode: bool = False,
*,
feed_options: dict[str, Any] | None = None,
):
u = urlparse(uri)
if not u.hostname:
raise ValueError(f"Got a storage URI without a hostname: {uri}")
self.host: str = u.hostname
self.port: int = int(u.port or "21")
self.username: str = u.username or ""
self.password: str = unquote(u.password or "")
self.path: str = u.path
self.use_active_mode: bool = use_active_mode
self.overwrite: bool = not feed_options or feed_options.get("overwrite", True)
@classmethod
def from_crawler(
cls,
crawler: Crawler,
uri: str,
*,
feed_options: dict[str, Any] | None = None,
) -> Self:
return cls(
uri,
use_active_mode=crawler.settings.getbool("FEED_STORAGE_FTP_ACTIVE"),
feed_options=feed_options,
)
def _store_in_thread(self, file: IO[bytes]) -> None:
ftp_store_file(
path=self.path,
file=file,
host=self.host,
port=self.port,
username=self.username,
password=self.password,
use_active_mode=self.use_active_mode,
overwrite=self.overwrite,
)
| FTPFeedStorage |
python | coleifer__peewee | tests/sqlite.py | {
"start": 86080,
"end": 86198
} | class ____(TestModel):
month = ForeignKeyField(CalendarMonth, backref='days')
value = IntegerField()
| CalendarDay |
python | openai__openai-python | src/openai/types/graders/multi_grader.py | {
"start": 600,
"end": 1018
} | class ____(BaseModel):
calculate_output: str
"""A formula to calculate the output based on grader results."""
graders: Graders
"""
A StringCheckGrader object that performs a string comparison between input and
reference using a specified operation.
"""
name: str
"""The name of the grader."""
type: Literal["multi"]
"""The object type, which is always `multi`."""
| MultiGrader |
python | huggingface__transformers | tests/utils/test_hf_argparser.py | {
"start": 2019,
"end": 2165
} | class ____:
foo: MixedTypeEnum = "toto"
def __post_init__(self):
self.foo = MixedTypeEnum(self.foo)
@dataclass
| MixedTypeEnumExample |
python | airbytehq__airbyte | airbyte-ci/connectors/metadata_service/lib/metadata_service/models/generated/ConnectorMetadataDefinitionV0.py | {
"start": 10788,
"end": 11484
} | class ____(BaseModel):
class Config:
extra = Extra.forbid
enabled: bool
name: Optional[str] = None
dockerRepository: Optional[str] = None
dockerImageTag: Optional[str] = None
supportsDbt: Optional[bool] = None
supportsNormalization: Optional[bool] = None
license: Optional[str] = None
documentationUrl: Optional[AnyUrl] = None
connectorSubtype: Optional[str] = None
allowedHosts: Optional[AllowedHosts] = None
normalizationConfig: Optional[NormalizationDestinationDefinitionConfig] = None
suggestedStreams: Optional[SuggestedStreams] = None
resourceRequirements: Optional[ActorDefinitionResourceRequirements] = None
| RegistryOverrides |
python | crytic__slither | slither/solc_parsing/variables/variable_declaration.py | {
"start": 808,
"end": 8341
} | class ____:
# pylint: disable=too-many-branches
def __init__(self, variable: Variable, variable_data: Dict) -> None:
"""
A variable can be declared through a statement, or directly.
If it is through a statement, the following children may contain
the init value.
It may be possible that the variable is declared through a statement,
but the init value is declared at the VariableDeclaration children level
"""
self._variable = variable
self._was_analyzed = False
self._elem_to_parse: Optional[Union[Dict, UnknownType]] = None
self._initializedNotParsed: Optional[Dict] = None
self._is_compact_ast = False
self._reference_id: Optional[int] = None
if "nodeType" in variable_data:
self._is_compact_ast = True
nodeType = variable_data["nodeType"]
if nodeType in [
"VariableDeclarationStatement",
"VariableDefinitionStatement",
]:
if len(variable_data["declarations"]) > 1:
raise MultipleVariablesDeclaration
init = None
if "initialValue" in variable_data:
init = variable_data["initialValue"]
self._init_from_declaration(variable_data["declarations"][0], init)
elif nodeType == "VariableDeclaration":
self._init_from_declaration(variable_data, variable_data.get("value", None))
else:
raise ParsingError(f"Incorrect variable declaration type {nodeType}")
else:
nodeType = variable_data["name"]
if nodeType in [
"VariableDeclarationStatement",
"VariableDefinitionStatement",
]:
if len(variable_data["children"]) == 2:
init = variable_data["children"][1]
elif len(variable_data["children"]) == 1:
init = None
elif len(variable_data["children"]) > 2:
raise MultipleVariablesDeclaration
else:
raise ParsingError(
"Variable declaration without children?" + str(variable_data)
)
declaration = variable_data["children"][0]
self._init_from_declaration(declaration, init)
elif nodeType == "VariableDeclaration":
self._init_from_declaration(variable_data, None)
else:
raise ParsingError(f"Incorrect variable declaration type {nodeType}")
@property
def underlying_variable(self) -> Variable:
return self._variable
@property
def reference_id(self) -> int:
"""
Return the solc id. It can be compared with the referencedDeclaration attr
Returns None if it was not parsed (legacy AST)
"""
assert self._reference_id
return self._reference_id
def _handle_comment(self, attributes: Dict) -> None:
if "documentation" in attributes and "text" in attributes["documentation"]:
candidates = attributes["documentation"]["text"].split(",")
for candidate in candidates:
if "@custom:security non-reentrant" in candidate:
self._variable.is_reentrant = False
write_protection = re.search(
r'@custom:security write-protection="([\w, ()]*)"', candidate
)
if write_protection:
if self._variable.write_protection is None:
self._variable.write_protection = []
self._variable.write_protection.append(write_protection.group(1))
def _analyze_variable_attributes(self, attributes: Dict) -> None:
if "visibility" in attributes:
self._variable.visibility = attributes["visibility"]
else:
self._variable.visibility = "internal"
def _init_from_declaration(
self, var: Dict, init: Optional[Dict]
) -> None: # pylint: disable=too-many-branches
if self._is_compact_ast:
attributes = var
self._typeName = attributes["typeDescriptions"]["typeString"]
else:
assert len(var["children"]) <= 2
assert var["name"] == "VariableDeclaration"
attributes = var["attributes"]
self._typeName = attributes["type"]
self._variable.name = attributes["name"]
# self._arrayDepth = 0
# self._isMapping = False
# self._mappingFrom = None
# self._mappingTo = False
# self._initial_expression = None
# self._type = None
# Only for comapct ast format
# the id can be used later if referencedDeclaration
# is provided
if "id" in var:
self._reference_id = var["id"]
if "constant" in attributes:
self._variable.is_constant = attributes["constant"]
if "mutability" in attributes:
# Note: this checked is not needed if "constant" was already in attribute, but we keep it
# for completion
if attributes["mutability"] == "constant":
self._variable.is_constant = True
if attributes["mutability"] == "immutable":
self._variable.is_immutable = True
self._handle_comment(attributes)
self._analyze_variable_attributes(attributes)
if self._is_compact_ast:
if var["typeName"]:
self._elem_to_parse = var["typeName"]
else:
self._elem_to_parse = UnknownType(var["typeDescriptions"]["typeString"])
else:
if not var["children"]:
# It happens on variable declared inside loop declaration
try:
self._variable.type = ElementaryType(self._typeName)
self._elem_to_parse = None
except NonElementaryType:
self._elem_to_parse = UnknownType(self._typeName)
else:
self._elem_to_parse = var["children"][0]
if self._is_compact_ast:
self._initializedNotParsed = init
if init:
self._variable.initialized = True
else:
if init: # there are two way to init a var local in the AST
assert len(var["children"]) <= 1
self._variable.initialized = True
self._initializedNotParsed = init
elif len(var["children"]) in [0, 1]:
self._variable.initialized = False
self._initializedNotParsed = None
else:
assert len(var["children"]) == 2
self._variable.initialized = True
self._initializedNotParsed = var["children"][1]
def analyze(self, caller_context: CallerContextExpression) -> None:
# Can be re-analyzed due to inheritance
if self._was_analyzed:
return
self._was_analyzed = True
if self._elem_to_parse:
self._variable.type = parse_type(self._elem_to_parse, caller_context)
self._elem_to_parse = None
if self._variable.initialized:
assert self._initializedNotParsed
self._variable.expression = parse_expression(self._initializedNotParsed, caller_context)
self._initializedNotParsed = None
| VariableDeclarationSolc |
python | huggingface__transformers | src/transformers/models/falcon_h1/modeling_falcon_h1.py | {
"start": 10000,
"end": 16354
} | class ____(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: FalconH1Config, device=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = inv_freq
@staticmethod
def compute_default_rope_parameters(
config: Optional[FalconH1Config] = None,
device: Optional["torch.device"] = None,
seq_len: Optional[int] = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
@use_kernel_func_from_hub("rotary_pos_emb")
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
| FalconH1RotaryEmbedding |
python | langchain-ai__langchain | libs/langchain_v1/tests/unit_tests/agents/test_responses_spec.py | {
"start": 407,
"end": 498
} | class ____(BaseSchema):
get_employee_role: int
get_employee_department: int
| ToolCalls |
python | kamyu104__LeetCode-Solutions | Python/bitwise-and-of-numbers-range.py | {
"start": 29,
"end": 232
} | class ____(object):
# @param m, an integer
# @param n, an integer
# @return an integer
def rangeBitwiseAnd(self, m, n):
while m < n:
n &= n - 1
return n
| Solution |
python | marshmallow-code__apispec | tests/schemas.py | {
"start": 1827,
"end": 1878
} | class ____(fields.String):
pass
| CustomStringField |
python | google__pytype | pytype/abstract/_typing.py | {
"start": 28029,
"end": 36157
} | class ____:
"""A late annotation.
A late annotation stores a string expression and a snapshot of the VM stack at
the point where the annotation was introduced. Once the expression is
resolved, the annotation pretends to be the resolved type; before that, it
pretends to be an unsolvable. This effect is achieved by delegating attribute
lookup with __getattribute__.
Note that for late annotation x, `isinstance(x, ...)` and `x.__class__` will
use the type that x is pretending to be; `type(x)` will reveal x's true type.
Use `x.is_late_annotation()` to check whether x is a late annotation.
"""
_RESOLVING: Any = object()
def __init__(
self, expr, stack, ctx: "context.Context", *, typing_imports=None
):
self.expr = expr
self.stack = stack
self.ctx = ctx
self.resolved = False
# Any new typing imports the annotation needs while resolving.
self._typing_imports = typing_imports or set()
self._type: _base.BaseValue = (
ctx.convert.unsolvable
) # the resolved type of `expr`
self._unresolved_instances = set()
self._resolved_instances = {}
# _attribute_names needs to be defined last! This contains the names of all
# of LateAnnotation's attributes, discovered by looking at
# LateAnnotation.__dict__ and self.__dict__. These names are used in
# __getattribute__ and __setattr__ to determine whether a given get/setattr
# call should operate on the LateAnnotation itself or its resolved type.
self._attribute_names = set(LateAnnotation.__dict__) | set(
super().__getattribute__("__dict__")
)
def flatten_expr(self):
"""Flattens the expression into a legal variable name if necessary.
Pytype stores parameterized recursive types in intermediate variables. If
self is such a type, this method flattens self.expr into a string that can
serve as a variable name. For example, 'MyRecursiveAlias[int, str]' is
flattened into '_MyRecursiveAlias_LBAR_int_COMMA_str_RBAR'.
Returns:
If self is a parameterized recursive type, a flattened version of
self.expr that is a legal variable name. Otherwise, self.expr unchanged.
"""
if "[" in self.expr and self.is_recursive():
# _DOT and _RBAR have no trailing underscore because they precede names
# that we already prefix an underscore to.
return "_" + self.expr.replace(".", "_DOT").replace(
"[", "_LBAR_"
).replace("]", "_RBAR").replace(", ", "_COMMA_")
return self.expr
def unflatten_expr(self):
"""Unflattens a flattened expression."""
if "_LBAR_" in self.expr:
mod, dot, rest = self.expr.rpartition(".")
# The [1:] slicing and trailing underscore in _DOT_ are to get rid of
# leading underscores added when flattening.
return (
mod
+ dot
+ rest[1:]
.replace("_DOT_", ".")
.replace("_LBAR_", "[")
.replace("_RBAR", "]")
.replace("_COMMA_", ", ")
)
return self.expr
def __repr__(self) -> str:
return "LateAnnotation({!r}, resolved={!r})".format(
self.expr, self._type if self.resolved else None
)
# __hash__ and __eq__ need to be explicitly defined for Python to use them in
# set/dict comparisons.
def __hash__(self) -> int:
return hash(self._type) if self.resolved else hash(self.expr)
def __eq__(self, other) -> bool:
return hash(self) == hash(other)
def __getattribute__(self, name):
# We use super().__getattribute__ directly for attribute access to avoid a
# performance penalty from this function recursively calling itself.
get = super().__getattribute__
if name == "_attribute_names" or name in get("_attribute_names"):
return get(name)
return get("_type").__getattribute__(name) # pytype: disable=attribute-error
def __setattr__(self, name, value):
if not hasattr(self, "_attribute_names") or name in self._attribute_names:
return super().__setattr__(name, value)
return self._type.__setattr__(name, value)
def __contains__(self, name):
return self.resolved and name in self._type
def resolve(
self,
node: "cfg.CFGNode",
f_globals: "_instances.LazyConcreteDict",
f_locals: "_instances.LazyConcreteDict",
) -> None:
"""Resolve the late annotation."""
if self.resolved:
return
# Sets resolved to a truthy value distinguishable from True so that
# 'if self.resolved' is True when self is partially resolved, but code that
# really needs to tell partially and fully resolved apart can do so.
self.resolved = LateAnnotation._RESOLVING
# Add implicit imports for typing, since we can have late annotations like
# `set[int]` which get converted to `typing.Set[int]`.
if self._typing_imports:
overlay = self.ctx.vm.import_module("typing", "typing", 0)
for v in self._typing_imports:
if v not in f_globals.members:
f_globals.members[v] = overlay.get_module(v).load_lazy_attribute(v)
var, errorlog = abstract_utils.eval_expr(
self.ctx, node, f_globals, f_locals, self.expr
)
if errorlog:
self.ctx.errorlog.copy_from(errorlog.errors, self.stack)
self._type = self.ctx.annotation_utils.extract_annotation(
node, var, None, self.stack
)
if self._type != self.ctx.convert.unsolvable:
# We may have tried to call __init__ on instances of this annotation.
# Since the annotation was unresolved at the time, we need to call
# __init__ again to define any instance attributes.
for instance in self._unresolved_instances:
if isinstance(instance.cls, Union):
# Having instance.cls be a Union type will crash in attribute.py.
# Setting it to Any picks up the annotation in another code path.
instance.cls = self.ctx.convert.unsolvable
else:
self.ctx.vm.reinitialize_if_initialized(node, instance)
self.resolved = True
log.info("Resolved late annotation %r to %r", self.expr, self._type)
def set_type(self, typ: _base.BaseValue) -> None:
# Used by annotation_utils.sub_one_annotation to substitute values into
# recursive aliases.
assert not self.resolved
self.resolved = True
self._type = typ
def to_variable(self, node: "cfg.CFGNode") -> "cfg.Variable":
if self.resolved:
return self._type.to_variable(node)
else:
return _base.BaseValue.to_variable(self, node) # pytype: disable=wrong-arg-types
def instantiate(
self,
node: "cfg.CFGNode",
container: (
_instance_base.SimpleValue | abstract_utils.DummyContainer | None
) = None,
) -> "cfg.Variable":
"""Instantiate the pointed-to class, or record a placeholder instance."""
if self.resolved:
key = (node, _get_container_type_key(container))
if key not in self._resolved_instances:
self._resolved_instances[key] = self._type.instantiate(node, container)
return self._resolved_instances[key]
else:
instance = _instance_base.Instance(self, self.ctx)
self._unresolved_instances.add(instance)
return instance.to_variable(node)
def get_special_attribute(
self, node: "cfg.CFGNode", name: str, valself: "cfg.Variable | None"
) -> "cfg.Variable | None":
if name == "__getitem__" and not self.resolved:
container = _base.BaseValue.to_annotation_container(self) # pytype: disable=wrong-arg-types
return container.get_special_attribute(node, name, valself)
return self._type.get_special_attribute(node, name, valself)
def is_late_annotation(self) -> bool:
return True
def is_recursive(self) -> bool:
"""Check whether this is a recursive type."""
if not self.resolved:
return False
seen = {id(self)}
stack = [self._type]
while stack:
t = stack.pop()
if t.is_late_annotation():
if id(t) in seen:
return True
seen.add(id(t))
if isinstance(t, mixin.NestedAnnotation):
stack.extend(child for _, child in t.get_inner_types())
return False
| LateAnnotation |
python | pytorch__pytorch | test/jit/test_convert_activation.py | {
"start": 848,
"end": 4326
} | class ____(JitTestCase):
def test_check_no_type_promotion(self):
dtypes = [
torch.bool,
torch.int8,
torch.int16,
torch.int32,
torch.int64,
torch.float32,
torch.float64,
]
# restore_mutation.h contains a mapping from activation operators
# to whether they allow type conversion. Use this checking to
# guard the mapping, and if any later change breaks the assumption
# we need to update the mapping correspondingly.
for activation, dtype in product(activations, dtypes):
inp = torch.normal(0, 5, size=(4, 4)).to(dtype)
try:
out = activation(inp)
self.assertEqual(dtype, out.dtype)
except RuntimeError:
# Skip the not implemented error
pass
def test_functional_to_inplace_activation(self):
for activation in activations:
def test_basic(x):
y = x + 1
z = activation(y)
return z
fn = torch.jit.script(test_basic)
self.run_pass("inline", fn.graph)
self.run_pass("constant_propagation", fn.graph)
FileCheck().check(f"aten::{activation.__name__}(").run(fn.graph)
self.run_pass("functional_to_inplace_activation", fn.graph)
FileCheck().check_not(f"aten::{activation.__name__}(").run(fn.graph)
FileCheck().check(f"aten::{activation.__name__}_").run(fn.graph)
inp = torch.rand([2, 2])
self.assertEqual(fn(inp), test_basic(inp))
def test_no_functional_to_inplace(self):
# inplace conversion should not happen because sigmoid may
# perform type conversion
def test1():
y = torch.ones([2, 2])
z = torch.sigmoid(y)
return z
fn = torch.jit.script(test1)
self.run_pass("functional_to_inplace_activation", fn.graph)
FileCheck().check_not("aten::sigmoid_").run(fn.graph)
# inplace conversion should not happen because y is alias
# the input x
def test2(x):
y = x[0]
z = torch.relu(y)
return z
fn = torch.jit.script(test2)
self.run_pass("functional_to_inplace_activation", fn.graph)
FileCheck().check_not("aten::relu_").run(fn.graph)
# inplace conversion should not happen because self.x is
# at the global scope
class Test3(nn.Module):
def __init__(self, x):
super().__init__()
self.x = x
def forward(self):
y = torch.relu(self.x)
return y
fn = torch.jit.script(Test3(torch.rand([2, 2])).eval())
self.run_pass("functional_to_inplace_activation", fn.graph)
FileCheck().check_not("aten::relu_").run(fn.graph)
@skipIfNoTorchVision
def test_resnet18_correctness(self):
model = torchvision.models.resnet18()
frozen_model = torch.jit.freeze(torch.jit.script(model.eval()))
(
N,
C,
H,
W,
) = (
10,
3,
224,
224,
)
inp = torch.randn(N, C, H, W)
self.run_pass("functional_to_inplace_activation", frozen_model.graph)
self.assertEqual(model(inp), frozen_model(inp))
| TestFunctionalToInplaceActivation |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/overloadImpl1.py | {
"start": 2038,
"end": 2496
} | class ____: ...
T_CD = TypeVar("T_CD", ClassC, ClassD)
@overload
def func7(cls: type[ClassC], var: int) -> ClassC: ...
@overload
def func7(cls: type[ClassD], var: str) -> ClassD: ...
def func7(cls: type[T_CD], var: int | str) -> T_CD:
return cls()
T_str = TypeVar("T_str", bound=str)
@overload
def func8(foo: int) -> int: ...
@overload
def func8(foo: T_str) -> tuple[T_str]: ...
def func8(foo: T_str | int) -> tuple[T_str] | int: ...
| ClassD |
python | getsentry__sentry | tests/sentry/users/api/endpoints/test_user_identity_config.py | {
"start": 345,
"end": 1571
} | class ____(APITestCase):
def setUp(self) -> None:
super().setUp()
self.superuser = self.create_user(is_superuser=True)
self.staff_user = self.create_user(is_staff=True)
self.slack_idp = self.create_identity_provider(type="slack", external_id="A")
self.github_idp = self.create_identity_provider(type="github", external_id="B")
self.google_idp = self.create_identity_provider(type="google", external_id="C")
self.org_provider = AuthProvider.objects.create(
organization_id=self.organization.id, provider="dummy"
)
self.login_as(self.user)
def _setup_identities(self) -> tuple[UserSocialAuth, Identity, AuthIdentity]:
social_obj = UserSocialAuth.objects.create(provider="github", user=self.user)
global_obj = Identity.objects.create(user=self.user, idp=self.github_idp)
org_obj = AuthIdentity.objects.create(user=self.user, auth_provider=self.org_provider)
return (social_obj, global_obj, org_obj)
def mock_is_login_provider_effect(provider_key: str) -> bool:
# Mimics behavior from getsentry repo
return provider_key in ("github", "vsts", "google")
@control_silo_test
| UserIdentityConfigTest |
python | ray-project__ray | rllib/offline/tests/test_json_reader.py | {
"start": 224,
"end": 1282
} | class ____(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
ray.init()
@classmethod
def tearDownClass(cls) -> None:
ray.shutdown()
def test_itr_batches(self):
"""Test that the json reader iterates over batches of rows correctly."""
rllib_dir = Path(__file__).parent.parent.parent.parent
print("rllib dir={}".format(rllib_dir))
data_file = os.path.join(
rllib_dir, "rllib/offline/tests/data/pendulum/large.json"
)
print("data_file={} exists={}".format(data_file, os.path.isfile(data_file)))
ioctx = IOContext(
config=(
AlgorithmConfig()
.training(train_batch_size=1200)
.offline_data(actions_in_input_normalized=True)
),
worker_index=0,
)
reader = JsonReader([data_file], ioctx)
assert len(reader.next()) == 1200
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
| TestJsonReader |
python | getsentry__sentry | src/sentry/workflow_engine/handlers/condition/existing_high_priority_issue_handler.py | {
"start": 367,
"end": 869
} | class ____(DataConditionHandler[WorkflowEventData]):
group = DataConditionHandler.Group.WORKFLOW_TRIGGER
comparison_json_schema = {"type": "boolean"}
@staticmethod
def evaluate_value(event_data: WorkflowEventData, comparison: Any) -> bool:
state = event_data.group_state
if state is None or state["is_new"]:
return False
return bool(event_data.has_escalated) and event_data.group.priority == PriorityLevel.HIGH
| ExistingHighPriorityIssueConditionHandler |
python | pytorch__pytorch | tools/testing/target_determination/heuristics/filepath.py | {
"start": 2999,
"end": 4405
} | class ____(HeuristicInterface):
# Heuristic based on folders in the file path. Takes each folder of each
# changed file and attempts to find matches based on those folders
def __init__(self, **kwargs: dict[str, Any]) -> None:
super().__init__(**kwargs)
def get_prediction_confidence(self, tests: list[str]) -> TestPrioritizations:
try:
changed_files = query_changed_files()
except Exception as e:
warn(f"Can't query changed test files due to {e}")
changed_files = []
test_ratings = get_freq_dict(tests, changed_files)
test_ratings = {
TestRun(k): float(v) for (k, v) in test_ratings.items() if k in tests
}
return TestPrioritizations(
tests, normalize_ratings(test_ratings, 0.25, min_value=0.125)
)
if __name__ == "__main__":
# Quick thing so you can call the heuristic from the command line with a sha
import os
import sys
from tools.testing.discover_tests import TESTS
git_diff = f"git diff --name-only {sys.argv[1]} {sys.argv[1]}^"
changed_files = os.popen(git_diff).read().split("\n")
freq_dict = get_freq_dict(
TESTS, [x for x in changed_files if x != "" and not x.startswith("test")]
)
for k, v in sorted(freq_dict.items(), key=lambda x: x[1], reverse=False):
print(k, v)
print(changed_files)
| Filepath |
python | getsentry__sentry | tests/sentry/profiles/consumers/test_process.py | {
"start": 551,
"end": 3608
} | class ____(TestCase):
@staticmethod
def processing_factory() -> ProcessProfileStrategyFactory:
return ProcessProfileStrategyFactory()
@patch("sentry.profiles.consumers.process.factory.process_profile_task.delay")
def test_basic_profile_to_task(self, process_profile_task: MagicMock) -> None:
processing_strategy = self.processing_factory().create_with_partitions(
commit=Mock(), partitions={}
)
message_dict = {
"organization_id": 1,
"project_id": 1,
"key_id": 1,
"received": int(timezone.now().timestamp()),
"payload": json.dumps({"platform": "android", "profile": ""}),
}
payload = msgpack.packb(message_dict)
processing_strategy.submit(
Message(
BrokerValue(
KafkaPayload(
b"key",
payload,
[],
),
Partition(Topic("profiles"), 1),
1,
datetime.now(),
)
)
)
processing_strategy.poll()
processing_strategy.join(1)
processing_strategy.terminate()
process_profile_task.assert_called_with(
payload=b64encode(payload).decode("utf-8"),
sampled=True,
)
def test_adjust_instruction_addr_sample_format() -> None:
original_frames = [
{"instruction_addr": "0xdeadbeef"},
{"instruction_addr": "0xbeefdead"},
{"instruction_addr": "0xfeedface"},
]
profile: dict[str, Any] = {
"version": "1",
"platform": "cocoa",
"profile": {
"frames": original_frames.copy(),
"stacks": [[1, 0], [0, 1, 2]],
},
"debug_meta": {"images": []},
}
_, stacktraces, _ = _prepare_frames_from_profile(profile, profile["platform"])
assert profile["profile"]["stacks"] == [[3, 0], [4, 1, 2]]
frames = stacktraces[0]["frames"]
for i in range(3):
assert frames[i] == original_frames[i]
assert frames[3] == {"instruction_addr": "0xbeefdead", "adjust_instruction_addr": False}
assert frames[4] == {"instruction_addr": "0xdeadbeef", "adjust_instruction_addr": False}
def test_adjust_instruction_addr_original_format() -> None:
profile = {
"platform": "cocoa",
"sampled_profile": {
"samples": [
{
"frames": [
{"instruction_addr": "0xdeadbeef", "platform": "native"},
{"instruction_addr": "0xbeefdead", "platform": "native"},
],
}
]
},
"debug_meta": {"images": []},
}
_, stacktraces, _ = _prepare_frames_from_profile(profile, str(profile["platform"]))
frames = stacktraces[0]["frames"]
assert not frames[0]["adjust_instruction_addr"]
assert "adjust_instruction_addr" not in frames[1]
| TestProcessProfileConsumerStrategy |
python | huggingface__transformers | src/transformers/models/gemma3n/modular_gemma3n.py | {
"start": 84204,
"end": 88748
} | class ____(Gemma3Attention):
def __init__(self, config: Gemma3nTextConfig, layer_idx: int):
super().__init__(config, layer_idx)
self.is_causal = True
del self.attn_logit_softcapping
self.scaling = 1.0
self.v_norm = Gemma3nRMSNorm(dim=config.head_dim, eps=config.rms_norm_eps, with_scale=False)
first_kv_shared_layer_idx = self.config.num_hidden_layers - self.config.num_kv_shared_layers
self.is_kv_shared_layer = layer_idx >= first_kv_shared_layer_idx > 0
prev_layers = config.layer_types[:first_kv_shared_layer_idx]
if self.is_kv_shared_layer:
# For shared layers, find the last non-shared layer of the same type before sharing starts
self.kv_shared_layer_index = len(prev_layers) - 1 - prev_layers[::-1].index(config.layer_types[layer_idx])
self.store_full_length_kv = False
else:
self.kv_shared_layer_index = None
# For non-shared layers, store full-length kv if this is the last non-shared layer of its type
self.store_full_length_kv = layer_idx == len(prev_layers) - 1 - prev_layers[::-1].index(
config.layer_types[layer_idx]
)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: torch.Tensor = None,
attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.config.head_dim)
cos, sin = position_embeddings
query_states = self.q_proj(hidden_states).view(hidden_shape)
query_states = self.q_norm(query_states)
query_states = apply_rotary_pos_emb(query_states, cos, sin, unsqueeze_dim=2)
query_states = query_states.transpose(1, 2)
# For layers with shared KV (from kv sharing point onwards), we reuse the same keys/values states as the last non-sharing layer
if self.is_kv_shared_layer and past_key_values is not None:
key_states, value_states = past_key_values.shared_layers[self.kv_shared_layer_index]
# Device of past layer may be different from current one
key_states = key_states.to(query_states.device)
value_states = value_states.to(query_states.device)
else:
key_states = self.k_proj(hidden_states).view(hidden_shape)
key_states = self.k_norm(key_states)
key_states = apply_rotary_pos_emb(key_states, cos, sin, unsqueeze_dim=2)
key_states = key_states.transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape)
value_states = self.v_norm(value_states)
value_states = value_states.transpose(1, 2)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {
"sin": sin,
"cos": cos,
"cache_position": cache_position,
"sliding_window": self.sliding_window,
}
if not self.is_kv_shared_layer:
key_states, value_states = past_key_values.update(
key_states, value_states, self.layer_idx, cache_kwargs
)
if self.store_full_length_kv:
if not hasattr(past_key_values, "shared_layers"):
past_key_values.shared_layers = {}
past_key_values.shared_layers[self.layer_idx] = key_states, value_states
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=self.attention_dropout if self.training else 0.0,
scaling=self.scaling,
sliding_window=self.sliding_window,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
| Gemma3nTextAttention |
python | huggingface__transformers | src/transformers/models/aimv2/modeling_aimv2.py | {
"start": 7594,
"end": 9914
} | class ____(nn.Module):
def __init__(self, config: Aimv2TextConfig):
super().__init__()
embed_dim = config.hidden_size
self.token_embedding = nn.Embedding(config.vocab_size, embed_dim)
self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer(
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
) -> torch.Tensor:
seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2]
max_position_embedding = self.position_embedding.weight.shape[0]
if seq_length > max_position_embedding:
raise ValueError(
f"Sequence length must be less than max_position_embeddings (got `sequence length`: "
f"{seq_length} and max_position_embeddings: {max_position_embedding}"
)
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if inputs_embeds is None:
inputs_embeds = self.token_embedding(input_ids)
position_embeddings = self.position_embedding(position_ids)
embeddings = inputs_embeds + position_embeddings
return embeddings
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs,
):
attn_weights = torch.matmul(query, key.transpose(-1, -2)) * scaling
if attention_mask is not None:
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
| Aimv2TextEmbeddings |
python | doocs__leetcode | solution/3600-3699/3663.Find The Least Frequent Digit/Solution.py | {
"start": 0,
"end": 316
} | class ____:
def getLeastFrequentDigit(self, n: int) -> int:
cnt = [0] * 10
while n:
n, x = divmod(n, 10)
cnt[x] += 1
ans, f = 0, inf
for x, v in enumerate(cnt):
if 0 < v < f:
f = v
ans = x
return ans
| Solution |
python | apache__airflow | task-sdk/src/airflow/sdk/definitions/asset/decorators.py | {
"start": 2318,
"end": 5036
} | class ____(PythonOperator):
def __init__(self, *, definition_name: str, uri: str | None = None, **kwargs) -> None:
super().__init__(**kwargs)
self._definition_name = definition_name
@classmethod
def from_definition(cls, definition: AssetDefinition | MultiAssetDefinition) -> Self:
_validate_asset_function_arguments(definition._function)
return cls(
task_id=definition._function.__name__,
inlets=[
Asset.ref(name=inlet_asset_name)
for inlet_asset_name, param in inspect.signature(definition._function).parameters.items()
if inlet_asset_name not in ("self", "context") and param.default is inspect.Parameter.empty
],
outlets=[v for _, v in definition.iter_assets()],
python_callable=definition._function,
definition_name=definition.name,
)
def _iter_kwargs(self, context: Mapping[str, Any]) -> Iterator[tuple[str, Any]]:
from airflow.sdk.execution_time.comms import ErrorResponse, GetAssetByName
from airflow.sdk.execution_time.task_runner import SUPERVISOR_COMMS
def _fetch_asset(name: str) -> Asset:
resp = SUPERVISOR_COMMS.send(GetAssetByName(name=name))
if resp is None:
raise RuntimeError("Empty non-error response received")
if isinstance(resp, ErrorResponse):
raise AirflowRuntimeError(resp)
return Asset(**resp.model_dump(exclude={"type"}))
value: Any
for key, param in inspect.signature(self.python_callable).parameters.items():
if param.default is not inspect.Parameter.empty:
value = param.default
elif key == "self":
value = _fetch_asset(self._definition_name)
elif key == "context":
value = context
else:
value = _fetch_asset(key)
yield key, value
def determine_kwargs(self, context: Mapping[str, Any]) -> Mapping[str, Any]:
return dict(self._iter_kwargs(context))
def _instantiate_task(definition: AssetDefinition | MultiAssetDefinition) -> None:
decorated_operator = cast("_TaskDecorator", definition._function)
if getattr(decorated_operator, "_airflow_is_task_decorator", False):
if "outlets" in decorated_operator.kwargs:
raise TypeError("@task decorator with 'outlets' argument is not supported in @asset")
decorated_operator.kwargs["outlets"] = [v for _, v in definition.iter_assets()]
decorated_operator()
else:
_AssetMainOperator.from_definition(definition)
@attrs.define(kw_only=True)
| _AssetMainOperator |
python | pytorch__pytorch | torch/_inductor/cpu_vec_isa.py | {
"start": 1109,
"end": 5412
} | class ____:
_bit_width: int
_macro: list[str]
_arch_flags: str
_dtype_nelements: dict[torch.dtype, int]
# Note [Checking for Vectorized Support in Inductor]
# TorchInductor CPU vectorization reuses PyTorch vectorization utility functions
# Hence, TorchInductor would depend on Sleef* to accelerate mathematical functions
# like exp, pow, sin, cos and etc.
# But PyTorch and TorchInductor might use different compilers to build code. If
# PyTorch uses gcc-7/g++-7 to build the release package, the libtorch_cpu.so
# will not expose the Sleef* AVX512 symbols since gcc-7/g++-7 cannot pass
# avx512 check in CMake - FindAVX.cmake. But TorchInductor install the latest
# gcc/g++ compiler by default while it could support the AVX512 compilation.
# Therefore, there would be a conflict sleef version between PyTorch and
# TorchInductor. Hence, we dry-compile the following code to check whether current
# HW platform and PyTorch both could support AVX512 or AVX2. And suppose ARM
# also needs the logic
# In fbcode however, we are using the same compiler for pytorch and for inductor codegen,
# making the runtime check unnecessary.
_avx_code = """
#if defined(CPU_CAPABILITY_AVX512) || defined(CPU_CAPABILITY_AVX2) || defined(CPU_CAPABILITY_ZVECTOR) || defined(CPU_CAPABILITY_NEON) || defined(CPU_CAPABILITY_VSX) || defined(CPU_CAPABILITY_SVE)
#include <ATen/cpu/vec/functional.h>
#include <ATen/cpu/vec/vec.h>
#endif
alignas(64) float in_out_ptr0[16] = {0.0};
extern "C" void __avx_chk_kernel() {
auto tmp0 = at::vec::Vectorized<float>(1);
auto tmp1 = tmp0.exp();
tmp1.store(in_out_ptr0);
}
""" # noqa: B950
_avx_py_load = """
import torch
from ctypes import cdll
cdll.LoadLibrary("__lib_path__")
"""
def bit_width(self) -> int:
return self._bit_width
def nelements(self, dtype: torch.dtype = torch.float) -> int:
return self._dtype_nelements[dtype]
def build_macro(self) -> list[str]:
return self._macro
def build_arch_flags(self) -> str:
return self._arch_flags
def __hash__(self) -> int:
return hash(str(self))
def check_build(self, code: str) -> bool:
from torch._inductor.codecache import get_lock_dir, LOCK_TIMEOUT, write
from torch._inductor.cpp_builder import (
CppBuilder,
CppTorchOptions,
normalize_path_separator,
)
key, input_path = write(
code,
"cpp",
extra=_get_isa_dry_compile_fingerprint(self._arch_flags),
)
from torch.utils._filelock import FileLock
lock_dir = get_lock_dir()
lock = FileLock(os.path.join(lock_dir, key + ".lock"), timeout=LOCK_TIMEOUT)
with lock:
output_dir = os.path.dirname(input_path)
buid_options = CppTorchOptions(vec_isa=self, warning_all=False)
x86_isa_help_builder = CppBuilder(
key,
[input_path],
buid_options,
output_dir,
)
try:
# Check if the output file exist, and compile when not.
output_path = normalize_path_separator(
x86_isa_help_builder.get_target_file_path()
)
if not os.path.isfile(output_path):
x86_isa_help_builder.build()
# Check build result
subprocess.check_call(
[
sys.executable,
"-c",
VecISA._avx_py_load.replace("__lib_path__", output_path),
],
cwd=output_dir,
stderr=subprocess.DEVNULL,
env=python_subprocess_env(),
)
except Exception:
return False
return True
def __bool__(self) -> bool:
return self.__bool__impl(config.cpp.vec_isa_ok)
@functools.cache # noqa: B019
def __bool__impl(self, vec_isa_ok) -> bool:
if vec_isa_ok is not None:
return vec_isa_ok
if config.is_fbcode():
return True
return self.check_build(VecISA._avx_code)
@dataclasses.dataclass
| VecISA |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 20182,
"end": 20288
} | class ____(BaseModel, extra="forbid"):
context: "ContextInput" = Field(..., description="")
| ContextQuery |
python | doocs__leetcode | solution/0200-0299/0235.Lowest Common Ancestor of a Binary Search Tree/Solution.py | {
"start": 164,
"end": 514
} | class ____:
def lowestCommonAncestor(
self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode'
) -> 'TreeNode':
while 1:
if root.val < min(p.val, q.val):
root = root.right
elif root.val > max(p.val, q.val):
root = root.left
else:
return root
| Solution |
python | pennersr__django-allauth | allauth/headless/contrib/rest_framework/authentication.py | {
"start": 361,
"end": 1125
} | class ____(authentication.BaseAuthentication):
"""
This authentication class uses the X-Session-Token that django-allauth
is using for authentication purposes.
"""
def authenticate(self, request: HttpRequest):
token = self.get_session_token(request)
if token:
return authenticate_by_x_session_token(token)
return None
def get_session_token(self, request: HttpRequest) -> typing.Optional[str]:
"""
Returns the session token for the given request, by looking up the
``X-Session-Token`` header. Override this if you want to extract the token
from e.g. the ``Authorization`` header.
"""
return request.headers.get("X-Session-Token")
| XSessionTokenAuthentication |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/functions.py | {
"start": 43542,
"end": 47191
} | class ____(FunctionElement[_T]):
r"""Describe a named SQL function.
The :class:`.Function` object is typically generated from the
:data:`.func` generation object.
:param \*clauses: list of column expressions that form the arguments
of the SQL function call.
:param type\_: optional :class:`.TypeEngine` datatype object that will be
used as the return value of the column expression generated by this
function call.
:param packagenames: a string which indicates package prefix names
to be prepended to the function name when the SQL is generated.
The :data:`.func` generator creates these when it is called using
dotted format, e.g.::
func.mypackage.some_function(col1, col2)
.. seealso::
:ref:`tutorial_functions` - in the :ref:`unified_tutorial`
:data:`.func` - namespace which produces registered or ad-hoc
:class:`.Function` instances.
:class:`.GenericFunction` - allows creation of registered function
types.
"""
__visit_name__ = "function"
_traverse_internals = FunctionElement._traverse_internals + [
("packagenames", InternalTraversal.dp_plain_obj),
("name", InternalTraversal.dp_string),
("type", InternalTraversal.dp_type),
]
name: str
identifier: str
type: TypeEngine[_T]
"""A :class:`_types.TypeEngine` object which refers to the SQL return
type represented by this SQL function.
This datatype may be configured when generating a
:class:`_functions.Function` object by passing the
:paramref:`_functions.Function.type_` parameter, e.g.::
>>> select(func.lower("some VALUE", type_=String))
The small number of built-in classes of :class:`_functions.Function` come
with a built-in datatype that's appropriate to the class of function and
its arguments. For functions that aren't known, the type defaults to the
"null type".
"""
@overload
def __init__(
self,
name: str,
*clauses: _ColumnExpressionOrLiteralArgument[_T],
type_: None = ...,
packagenames: Optional[Tuple[str, ...]] = ...,
) -> None: ...
@overload
def __init__(
self,
name: str,
*clauses: _ColumnExpressionOrLiteralArgument[Any],
type_: _TypeEngineArgument[_T] = ...,
packagenames: Optional[Tuple[str, ...]] = ...,
) -> None: ...
def __init__(
self,
name: str,
*clauses: _ColumnExpressionOrLiteralArgument[Any],
type_: Optional[_TypeEngineArgument[_T]] = None,
packagenames: Optional[Tuple[str, ...]] = None,
) -> None:
"""Construct a :class:`.Function`.
The :data:`.func` construct is normally used to construct
new :class:`.Function` instances.
"""
self.packagenames = packagenames or ()
self.name = name
# if type is None, we get NULLTYPE, which is our _T. But I don't
# know how to get the overloads to express that correctly
self.type = type_api.to_instance(type_) # type: ignore
FunctionElement.__init__(self, *clauses)
def _bind_param(
self,
operator: OperatorType,
obj: Any,
type_: Optional[TypeEngine[_T]] = None,
expanding: bool = False,
**kw: Any,
) -> BindParameter[_T]:
return BindParameter(
self.name,
obj,
_compared_to_operator=operator,
_compared_to_type=self.type,
type_=type_,
unique=True,
expanding=expanding,
**kw,
)
| Function |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.