language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | airbytehq__airbyte | airbyte-integrations/connectors/source-linnworks/source_linnworks/streams.py | {
"start": 6178,
"end": 9039
} | class ____(LinnworksGenericPagedResult, IncrementalLinnworksStream):
# https://apps.linnworks.net/Api/Method/ProcessedOrders-SearchProcessedOrders
# Response: SearchProcessedOrdersResponse https://apps.linnworks.net/Api/Class/API_Linnworks-Controllers-ProcessedOrders-Responses-SearchProcessedOrdersResponse
# Allows 150 calls per minute
primary_key = "nOrderId"
cursor_field = "dProcessedOn"
page_size = 500
use_cache = True
def path(self, **kwargs) -> str:
return "/api/ProcessedOrders/SearchProcessedOrders"
def stream_slices(self, stream_state: Mapping[str, Any] = None, **kwargs) -> Iterable[Optional[Mapping[str, any]]]:
if not stream_state:
stream_state = {}
from_date = pendulum.parse(stream_state.get(self.cursor_field, self.start_date))
end_date = max(from_date, pendulum.tomorrow("UTC"))
date_diff = end_date - from_date
if date_diff.years > 0:
interval = pendulum.duration(months=1)
elif date_diff.months > 0:
interval = pendulum.duration(weeks=1)
elif date_diff.weeks > 0:
interval = pendulum.duration(days=1)
else:
interval = pendulum.duration(hours=1)
while True:
to_date = min(from_date + interval, end_date)
yield {"FromDate": from_date.isoformat(), "ToDate": to_date.isoformat()}
from_date = to_date
if from_date >= end_date:
break
def request_body_data(
self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, any] = None, next_page_token: Mapping[str, Any] = None
) -> MutableMapping[str, Any]:
request = {
"DateField": "processed",
"FromDate": stream_slice["FromDate"],
"ToDate": stream_slice["ToDate"],
"PageNumber": 1 if not next_page_token else next_page_token["PageNumber"],
"ResultsPerPage": self.page_size,
"SearchSorting": {"SortField": "dProcessedOn", "SortDirection": "ASC"},
}
return {
"request": json.dumps(request, separators=(",", ":")),
}
def paged_result(self, response: requests.Response) -> Mapping[str, Any]:
return response.json()["ProcessedOrders"]
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
for record in self.paged_result(response)["Data"]:
yield record
def request_cache(self) -> Cassette:
try:
os.remove(self.cache_filename)
except FileNotFoundError:
pass
return vcr.use_cassette(
self.cache_filename,
record_mode="new_episodes",
serializer="yaml",
match_on=["method", "scheme", "host", "port", "path", "query", "body"],
)
| ProcessedOrders |
python | django-debug-toolbar__django-debug-toolbar | tests/test_integration_async.py | {
"start": 1282,
"end": 9595
} | class ____(BaseTestCase):
_is_async = True
def test_show_toolbar(self):
"""
Just to verify that show_toolbar() works with an ASGIRequest too
"""
self.assertTrue(show_toolbar(self.request))
async def test_show_toolbar_INTERNAL_IPS(self):
with self.settings(INTERNAL_IPS=[]):
self.assertFalse(show_toolbar(self.request))
@patch("socket.gethostbyname", return_value="127.0.0.255")
async def test_show_toolbar_docker(self, mocked_gethostbyname):
with self.settings(INTERNAL_IPS=[]):
# Is true because REMOTE_ADDR is 127.0.0.1 and the 255
# is shifted to be 1.
self.assertFalse(show_toolbar(self.request))
self.assertTrue(show_toolbar_with_docker(self.request))
mocked_gethostbyname.assert_called_once_with("host.docker.internal")
async def test_not_iterating_over_INTERNAL_IPS(self):
"""
Verify that the middleware does not iterate over INTERNAL_IPS in some way.
Some people use iptools.IpRangeList for their INTERNAL_IPS. This is a class
that can quickly answer the question if the setting contain a certain IP address,
but iterating over this object will drain all performance / blow up.
"""
class FailOnIteration:
def __iter__(self):
raise RuntimeError(
"The testcase failed: the code should not have iterated over INTERNAL_IPS"
)
def __contains__(self, x):
return True
with self.settings(INTERNAL_IPS=FailOnIteration()):
response = await self.async_client.get("/regular/basic/")
self.assertEqual(response.status_code, 200)
self.assertContains(response, "djDebug") # toolbar
async def test_middleware_response_insertion(self):
async def get_response(request):
return regular_view(request, "İ")
response = await DebugToolbarMiddleware(get_response)(self.request)
# check toolbar insertion before "</body>"
self.assertContains(response, "</div>\n</body>")
async def test_middleware_no_injection_when_encoded(self):
async def get_response(request):
response = HttpResponse("<html><body></body></html>")
response["Content-Encoding"] = "something"
return response
response = await DebugToolbarMiddleware(get_response)(self.request)
self.assertEqual(response.content, b"<html><body></body></html>")
async def test_cache_page(self):
# Clear the cache before testing the views. Other tests that use cached_view
# may run earlier and cause fewer cache calls.
cache.clear()
response = await self.async_client.get("/cached_view/")
self.assertEqual(len(response.toolbar.get_panel_by_id("CachePanel").calls), 3)
response = await self.async_client.get("/cached_view/")
self.assertEqual(len(response.toolbar.get_panel_by_id("CachePanel").calls), 2)
@override_settings(ROOT_URLCONF="tests.urls_use_package_urls")
async def test_include_package_urls(self):
"""Test urlsconf that uses the debug_toolbar.urls in the include call"""
# Clear the cache before testing the views. Other tests that use cached_view
# may run earlier and cause fewer cache calls.
cache.clear()
response = await self.async_client.get("/cached_view/")
self.assertEqual(len(response.toolbar.get_panel_by_id("CachePanel").calls), 3)
response = await self.async_client.get("/cached_view/")
self.assertEqual(len(response.toolbar.get_panel_by_id("CachePanel").calls), 2)
async def test_low_level_cache_view(self):
"""Test cases when low level caching API is used within a request."""
response = await self.async_client.get("/cached_low_level_view/")
self.assertEqual(len(response.toolbar.get_panel_by_id("CachePanel").calls), 2)
response = await self.async_client.get("/cached_low_level_view/")
self.assertEqual(len(response.toolbar.get_panel_by_id("CachePanel").calls), 1)
async def test_cache_disable_instrumentation(self):
"""
Verify that middleware cache usages before and after
DebugToolbarMiddleware are not counted.
"""
self.assertIsNone(cache.set("UseCacheAfterToolbar.before", None))
self.assertIsNone(cache.set("UseCacheAfterToolbar.after", None))
response = await self.async_client.get("/execute_sql/")
self.assertEqual(cache.get("UseCacheAfterToolbar.before"), 1)
self.assertEqual(cache.get("UseCacheAfterToolbar.after"), 1)
self.assertEqual(len(response.toolbar.get_panel_by_id("CachePanel").calls), 0)
async def test_is_toolbar_request(self):
request = arf.get("/__debug__/render_panel/")
self.assertTrue(self.toolbar.is_toolbar_request(request))
request = arf.get("/invalid/__debug__/render_panel/")
self.assertFalse(self.toolbar.is_toolbar_request(request))
request = arf.get("/render_panel/")
self.assertFalse(self.toolbar.is_toolbar_request(request))
@override_settings(ROOT_URLCONF="tests.urls_invalid")
async def test_is_toolbar_request_without_djdt_urls(self):
"""Test cases when the toolbar urls aren't configured."""
request = arf.get("/__debug__/render_panel/")
self.assertFalse(self.toolbar.is_toolbar_request(request))
request = arf.get("/render_panel/")
self.assertFalse(self.toolbar.is_toolbar_request(request))
@override_settings(ROOT_URLCONF="tests.urls_invalid")
async def test_is_toolbar_request_override_request_urlconf(self):
"""Test cases when the toolbar URL is configured on the request."""
request = arf.get("/__debug__/render_panel/")
self.assertFalse(self.toolbar.is_toolbar_request(request))
# Verify overriding the urlconf on the request is valid.
request.urlconf = "tests.urls"
self.assertTrue(self.toolbar.is_toolbar_request(request))
async def test_is_toolbar_request_with_script_prefix(self):
"""
Test cases when Django is running under a path prefix, such as via the
FORCE_SCRIPT_NAME setting.
"""
request = arf.get("/__debug__/render_panel/", SCRIPT_NAME="/path/")
self.assertTrue(self.toolbar.is_toolbar_request(request))
request = arf.get("/invalid/__debug__/render_panel/", SCRIPT_NAME="/path/")
self.assertFalse(self.toolbar.is_toolbar_request(request))
request = arf.get("/render_panel/", SCRIPT_NAME="/path/")
self.assertFalse(self.toolbar.is_toolbar_request(self.request))
async def test_data_gone(self):
response = await self.async_client.get(
"/__debug__/render_panel/?request_id=GONE&panel_id=RequestPanel"
)
self.assertIn("Please reload the page and retry.", response.json()["content"])
async def test_sql_page(self):
response = await self.async_client.get("/execute_sql/")
self.assertEqual(
len(response.toolbar.get_panel_by_id("SQLPanel").get_stats()["queries"]), 1
)
async def test_async_sql_page(self):
response = await self.async_client.get("/async_execute_sql/")
self.assertEqual(
len(response.toolbar.get_panel_by_id("SQLPanel").get_stats()["queries"]), 2
)
# Concurrent database queries are not fully supported by Django's backend with
# current integrated database drivers like psycopg2
# (considering postgresql as an example) and
# support for async drivers like psycopg3 isn't integrated yet.
# As a result, regardless of ASGI/async or WSGI/sync or any other attempts to make
# concurrent database queries like tests/views/async_db_concurrent,
# Django will still execute them synchronously.
# Check out the following links for more information:
# https://forum.djangoproject.com/t/are-concurrent-database-queries-in-asgi-a-thing/24136/2
# https://github.com/django-commons/django-debug-toolbar/issues/1828
# Work that is done so far for asynchrounous database backend
# https://github.com/django/deps/blob/main/accepted/0009-async.rst#the-orm
@override_settings(DEBUG=True)
| DebugToolbarTestCase |
python | airbytehq__airbyte | airbyte-integrations/bases/base-normalization/normalization/transform_catalog/table_name_registry.py | {
"start": 336,
"end": 788
} | class ____:
"""
A record of names collected by the TableNameRegistry
"""
def __init__(self, intermediate_schema: str, schema: str, json_path: List[str], stream_name: str, table_name: str):
self.intermediate_schema: str = intermediate_schema
self.schema: str = schema
self.json_path: List[str] = json_path
self.stream_name: str = stream_name
self.table_name: str = table_name
| NormalizedNameMetadata |
python | getsentry__sentry | src/sentry/flags/providers.py | {
"start": 2002,
"end": 2159
} | class ____(Exception):
"""The request body could not be deserialized."""
def __init__(self, errors):
self.errors = errors
| DeserializationError |
python | Lightning-AI__lightning | tests/tests_pytorch/checkpointing/test_model_checkpoint.py | {
"start": 37903,
"end": 38150
} | class ____(BoringModel):
def configure_gradient_clipping(self, optimizer, gradient_clip_val=None, gradient_clip_algorithm=None):
if self.current_epoch == 1:
raise RuntimeError("Trouble!")
| TroubledModelConfigureGradienClipping |
python | apache__airflow | providers/cncf/kubernetes/src/airflow/providers/cncf/kubernetes/utils/pod_manager.py | {
"start": 7882,
"end": 8017
} | class ____(AirflowException):
"""When pod does not leave the ``Pending`` phase within specified timeout."""
| PodLaunchTimeoutException |
python | django__django | tests/i18n/models.py | {
"start": 216,
"end": 541
} | class ____(models.Model):
name = models.CharField(max_length=50)
date_added = models.DateTimeField(default=datetime(1799, 1, 31, 23, 59, 59, 0))
cents_paid = models.DecimalField(max_digits=4, decimal_places=2)
products_delivered = models.IntegerField()
class Meta:
verbose_name = _("Company")
| Company |
python | django-import-export__django-import-export | tests/core/tests/test_widgets.py | {
"start": 1835,
"end": 3406
} | class ____(TestCase, RowDeprecationTestMixin):
def setUp(self):
self.widget = widgets.BooleanWidget()
def test_clean(self):
self.assertTrue(self.widget.clean("1"))
self.assertTrue(self.widget.clean(1))
self.assertTrue(self.widget.clean("TRUE"))
self.assertTrue(self.widget.clean("True"))
self.assertTrue(self.widget.clean("true"))
self.assertFalse(self.widget.clean("0"))
self.assertFalse(self.widget.clean(0))
self.assertFalse(self.widget.clean("FALSE"))
self.assertFalse(self.widget.clean("False"))
self.assertFalse(self.widget.clean("false"))
self.assertEqual(self.widget.clean(""), None)
self.assertEqual(self.widget.clean("NONE"), None)
self.assertEqual(self.widget.clean("None"), None)
self.assertEqual(self.widget.clean("none"), None)
self.assertEqual(self.widget.clean("NULL"), None)
self.assertEqual(self.widget.clean("null"), None)
def test_render(self):
self.assertEqual(self.widget.render(True), "1")
self.assertEqual(self.widget.render(False), "0")
self.assertEqual(self.widget.render(None), "")
def test_render_coerce_to_string_is_False(self):
self.widget = widgets.BooleanWidget(coerce_to_string=False)
self.assertTrue(self.widget.render(True))
self.assertFalse(self.widget.render(False))
self.assertIsNone(self.widget.render(None))
def test_render_invalid_type(self):
self.assertEqual(self.widget.render("a"), "")
| BooleanWidgetTest |
python | crytic__slither | slither/tools/upgradeability/checks/functions_ids.py | {
"start": 1095,
"end": 3275
} | class ____(AbstractCheck):
ARGUMENT = "function-id-collision"
IMPACT = CheckClassification.HIGH
HELP = "Functions ids collision"
WIKI = "https://github.com/crytic/slither/wiki/Upgradeability-Checks#functions-ids-collisions"
WIKI_TITLE = "Functions ids collisions"
# region wiki_description
WIKI_DESCRIPTION = """
Detect function id collision between the contract and the proxy.
"""
# endregion wiki_description
# region wiki_exploit_scenario
WIKI_EXPLOIT_SCENARIO = """
```solidity
contract Contract{
function gsf() public {
// ...
}
}
contract Proxy{
function tgeo() public {
// ...
}
}
```
`Proxy.tgeo()` and `Contract.gsf()` have the same function id (0x67e43e43).
As a result, `Proxy.tgeo()` will shadow Contract.gsf()`.
"""
# endregion wiki_exploit_scenario
# region wiki_recommendation
WIKI_RECOMMENDATION = """
Rename the function. Avoid public functions in the proxy.
"""
# endregion wiki_recommendation
REQUIRE_CONTRACT = True
REQUIRE_PROXY = True
def _check(self):
signatures_implem = get_signatures(self.contract)
signatures_proxy = get_signatures(self.proxy)
signatures_ids_implem = {get_function_id(s): s for s in signatures_implem}
signatures_ids_proxy = {get_function_id(s): s for s in signatures_proxy}
results = []
for (k, _) in signatures_ids_implem.items():
if k in signatures_ids_proxy:
if signatures_ids_implem[k] != signatures_ids_proxy[k]:
implem_function = _get_function_or_variable(
self.contract, signatures_ids_implem[k]
)
proxy_function = _get_function_or_variable(self.proxy, signatures_ids_proxy[k])
info = [
"Function id collision found: ",
implem_function,
" ",
proxy_function,
"\n",
]
json = self.generate_result(info)
results.append(json)
return results
| IDCollision |
python | kamyu104__LeetCode-Solutions | Python/campus-bikes-ii.py | {
"start": 257,
"end": 1277
} | class ____(object): # this is slower than Solution2 in python
def assignBikes(self, workers, bikes):
"""
:type workers: List[List[int]]
:type bikes: List[List[int]]
:rtype: int
"""
def manhattan(p1, p2):
return abs(p1[0] - p2[0]) + abs(p1[1] - p2[1])
dp = [[float("inf")]*((1<<len(bikes))) for _ in xrange(2)]
dp[0][0] = 0
for i in xrange(len(workers)):
dp[(i+1)%2] = [float("inf")] * ((1<<len(bikes)))
for j in xrange(len(bikes)):
for taken in xrange((1<<len(bikes))):
if taken & (1<<j):
continue
dp[(i+1)%2][taken|(1<<j)] = \
min(dp[(i+1)%2][taken|(1<<j)],
dp[i%2][taken] +
manhattan(workers[i], bikes[j]))
return min(dp[len(workers)%2])
# Time: O((w * b * 2^b) * log(w * b * 2^b))
# Space: O(w * b * 2^b)
import heapq
| Solution |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/logs/events.py | {
"start": 1809,
"end": 2039
} | class ____(graphene.Interface):
label = graphene.String()
description = graphene.String()
metadataEntries = non_null_list(GrapheneMetadataEntry)
class Meta:
name = "DisplayableEvent"
| GrapheneDisplayableEvent |
python | doocs__leetcode | solution/0300-0399/0308.Range Sum Query 2D - Mutable/Solution2.py | {
"start": 1295,
"end": 1923
} | class ____:
def __init__(self, matrix: List[List[int]]):
self.trees = [SegmentTree(row) for row in matrix]
def update(self, row: int, col: int, val: int) -> None:
tree = self.trees[row]
tree.modify(1, col + 1, val)
def sumRegion(self, row1: int, col1: int, row2: int, col2: int) -> int:
return sum(
self.trees[row].query(1, col1 + 1, col2 + 1)
for row in range(row1, row2 + 1)
)
# Your NumMatrix object will be instantiated and called as such:
# obj = NumMatrix(matrix)
# obj.update(row,col,val)
# param_2 = obj.sumRegion(row1,col1,row2,col2)
| NumMatrix |
python | hyperopt__hyperopt | hyperopt/tests/integration/test_mongoexp.py | {
"start": 13431,
"end": 14195
} | class ____:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
# -- assert that the test raises a ReserveTimeout within 5 seconds
@pytest.mark.timeout(10) # XXX: this needs a suspiciously long timeout
@with_mongo_trials
def test_main_worker(trials):
with pytest.raises(ReserveTimeout):
options = FakeOptions(
max_jobs=1,
# XXX: sync this with TempMongo
mongo=as_mongo_str("localhost:22334/foodb"),
reserve_timeout=1,
poll_interval=0.5,
workdir=None,
exp_key="foo",
last_job_timeout=None,
)
# -- check that it runs
# and that the reserve timeout is respected
main_worker_helper(options, ())
| FakeOptions |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_builtins/A003.py | {
"start": 0,
"end": 332
} | class ____:
ImportError = 4
id: int
dir = "/"
def __init__(self):
self.float = 5 # is fine
self.id = 10
self.dir = "."
def int(self):
pass
def str(self):
pass
def method_usage(self) -> str:
pass
def attribute_usage(self) -> id:
pass
| MyClass |
python | walkccc__LeetCode | solutions/1591. Strange Printer II/1591.py | {
"start": 85,
"end": 1347
} | class ____:
def isPrintable(self, targetGrid: list[list[int]]) -> bool:
MAX_COLOR = 60
m = len(targetGrid)
n = len(targetGrid[0])
# graph[u] := {v1, v2} means v1 and v2 cover u
graph = [set() for _ in range(MAX_COLOR + 1)]
for color in range(1, MAX_COLOR + 1):
# Get the rectangle of the current color.
minI = m
minJ = n
maxI = -1
maxJ = -1
for i in range(m):
for j in range(n):
if targetGrid[i][j] == color:
minI = min(minI, i)
minJ = min(minJ, j)
maxI = max(maxI, i)
maxJ = max(maxJ, j)
# Add any color covering the current as the children.
for i in range(minI, maxI + 1):
for j in range(minJ, maxJ + 1):
if targetGrid[i][j] != color:
graph[color].add(targetGrid[i][j])
states = [State.INIT] * (MAX_COLOR + 1)
def hasCycle(u: int) -> bool:
if states[u] == State.VISITING:
return True
if states[u] == State.VISITED:
return False
states[u] = State.VISITING
if any(hasCycle(v) for v in graph[u]):
return True
states[u] = State.VISITED
return False
return not (any(hasCycle(i) for i in range(1, MAX_COLOR + 1)))
| Solution |
python | django__django | tests/model_fields/test_promises.py | {
"start": 592,
"end": 5427
} | class ____(SimpleTestCase):
def test_AutoField(self):
lazy_func = lazy(lambda: 1, int)
self.assertIsInstance(
AutoField(primary_key=True).get_prep_value(lazy_func()), int
)
def test_BinaryField(self):
lazy_func = lazy(lambda: b"", bytes)
self.assertIsInstance(BinaryField().get_prep_value(lazy_func()), bytes)
def test_BooleanField(self):
lazy_func = lazy(lambda: True, bool)
self.assertIsInstance(BooleanField().get_prep_value(lazy_func()), bool)
def test_CharField(self):
lazy_func = lazy(lambda: "", str)
self.assertIsInstance(CharField().get_prep_value(lazy_func()), str)
lazy_func = lazy(lambda: 0, int)
self.assertIsInstance(CharField().get_prep_value(lazy_func()), str)
def test_DateField(self):
lazy_func = lazy(lambda: datetime.date.today(), datetime.date)
self.assertIsInstance(DateField().get_prep_value(lazy_func()), datetime.date)
def test_DateTimeField(self):
lazy_func = lazy(lambda: datetime.datetime.now(), datetime.datetime)
self.assertIsInstance(
DateTimeField().get_prep_value(lazy_func()), datetime.datetime
)
def test_DecimalField(self):
lazy_func = lazy(lambda: Decimal("1.2"), Decimal)
self.assertIsInstance(DecimalField().get_prep_value(lazy_func()), Decimal)
def test_EmailField(self):
lazy_func = lazy(lambda: "mailbox@domain.com", str)
self.assertIsInstance(EmailField().get_prep_value(lazy_func()), str)
def test_FileField(self):
lazy_func = lazy(lambda: "filename.ext", str)
self.assertIsInstance(FileField().get_prep_value(lazy_func()), str)
lazy_func = lazy(lambda: 0, int)
self.assertIsInstance(FileField().get_prep_value(lazy_func()), str)
def test_FilePathField(self):
lazy_func = lazy(lambda: "tests.py", str)
self.assertIsInstance(FilePathField().get_prep_value(lazy_func()), str)
lazy_func = lazy(lambda: 0, int)
self.assertIsInstance(FilePathField().get_prep_value(lazy_func()), str)
def test_FloatField(self):
lazy_func = lazy(lambda: 1.2, float)
self.assertIsInstance(FloatField().get_prep_value(lazy_func()), float)
def test_ImageField(self):
lazy_func = lazy(lambda: "filename.ext", str)
self.assertIsInstance(ImageField().get_prep_value(lazy_func()), str)
def test_IntegerField(self):
lazy_func = lazy(lambda: 1, int)
self.assertIsInstance(IntegerField().get_prep_value(lazy_func()), int)
def test_IPAddressField(self):
lazy_func = lazy(lambda: "127.0.0.1", str)
self.assertIsInstance(IPAddressField().get_prep_value(lazy_func()), str)
lazy_func = lazy(lambda: 0, int)
self.assertIsInstance(IPAddressField().get_prep_value(lazy_func()), str)
def test_GenericIPAddressField(self):
lazy_func = lazy(lambda: "127.0.0.1", str)
self.assertIsInstance(GenericIPAddressField().get_prep_value(lazy_func()), str)
lazy_func = lazy(lambda: 0, int)
self.assertIsInstance(GenericIPAddressField().get_prep_value(lazy_func()), str)
def test_PositiveIntegerField(self):
lazy_func = lazy(lambda: 1, int)
self.assertIsInstance(PositiveIntegerField().get_prep_value(lazy_func()), int)
def test_PositiveSmallIntegerField(self):
lazy_func = lazy(lambda: 1, int)
self.assertIsInstance(
PositiveSmallIntegerField().get_prep_value(lazy_func()), int
)
def test_PositiveBigIntegerField(self):
lazy_func = lazy(lambda: 1, int)
self.assertIsInstance(
PositiveBigIntegerField().get_prep_value(lazy_func()), int
)
def test_SlugField(self):
lazy_func = lazy(lambda: "slug", str)
self.assertIsInstance(SlugField().get_prep_value(lazy_func()), str)
lazy_func = lazy(lambda: 0, int)
self.assertIsInstance(SlugField().get_prep_value(lazy_func()), str)
def test_SmallIntegerField(self):
lazy_func = lazy(lambda: 1, int)
self.assertIsInstance(SmallIntegerField().get_prep_value(lazy_func()), int)
def test_TextField(self):
lazy_func = lazy(lambda: "Abc", str)
self.assertIsInstance(TextField().get_prep_value(lazy_func()), str)
lazy_func = lazy(lambda: 0, int)
self.assertIsInstance(TextField().get_prep_value(lazy_func()), str)
def test_TimeField(self):
lazy_func = lazy(lambda: datetime.datetime.now().time(), datetime.time)
self.assertIsInstance(TimeField().get_prep_value(lazy_func()), datetime.time)
def test_URLField(self):
lazy_func = lazy(lambda: "http://domain.com", str)
self.assertIsInstance(URLField().get_prep_value(lazy_func()), str)
| PromiseTest |
python | graphql-python__graphene | graphene/utils/tests/test_dataloader.py | {
"start": 522,
"end": 804
} | class ____(ObjectType):
name = String()
sibling = Field(lambda: CharacterType)
async def resolve_sibling(character, info):
if character["sibling"]:
return await info.context.character_loader.load(character["sibling"])
return None
| CharacterType |
python | huggingface__transformers | src/transformers/models/gemma/configuration_gemma.py | {
"start": 1295,
"end": 8674
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`GemmaModel`]. It is used to instantiate an Gemma
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the Gemma-7B.
e.g. [google/gemma-7b](https://huggingface.co/google/gemma-7b)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 256000):
Vocabulary size of the Gemma model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`GemmaModel`]
hidden_size (`int`, *optional*, defaults to 3072):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 24576):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 28):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*, defaults to 16):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
head_dim (`int`, *optional*, defaults to 256):
The attention head dimension.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
The legacy activation function. It is overwritten by the `hidden_activation`.
max_position_embeddings (`int`, *optional*, defaults to 8192):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id.
eos_token_id (`int`, *optional*, defaults to 1):
End of stream token id.
bos_token_id (`int`, *optional*, defaults to 2):
Beginning of stream token id.
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
Whether to tie weight embeddings
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
use_bidirectional_attention (`bool`, *optional*):
If True, the model will attend to all text tokens instead of using a causal mask.
```python
>>> from transformers import GemmaModel, GemmaConfig
>>> # Initializing a Gemma gemma-7b style configuration
>>> configuration = GemmaConfig()
>>> # Initializing a model from the gemma-7b style configuration
>>> model = GemmaModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "gemma"
keys_to_ignore_at_inference = ["past_key_values"]
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
def __init__(
self,
vocab_size: Optional[int] = 256000,
hidden_size: Optional[int] = 3072,
intermediate_size: Optional[int] = 24576,
num_hidden_layers: Optional[int] = 28,
num_attention_heads: Optional[int] = 16,
num_key_value_heads: Optional[int] = 16,
head_dim: Optional[int] = 256,
hidden_act: Optional[str] = "gelu_pytorch_tanh",
max_position_embeddings: Optional[int] = 8192,
initializer_range: Optional[float] = 0.02,
rms_norm_eps: Optional[int] = 1e-6,
use_cache: Optional[bool] = True,
pad_token_id: Optional[int] = 0,
eos_token_id: Optional[int] = 1,
bos_token_id: Optional[int] = 2,
tie_word_embeddings: Optional[bool] = True,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
attention_bias: Optional[bool] = False,
attention_dropout: Optional[float] = 0.0,
use_bidirectional_attention: Optional[bool] = None,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.head_dim = head_dim
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.use_bidirectional_attention = use_bidirectional_attention
self.rope_parameters = rope_parameters
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
__all__ = ["GemmaConfig"]
| GemmaConfig |
python | django-guardian__django-guardian | guardian/testapp/tests/test_shortcuts.py | {
"start": 7390,
"end": 10015
} | class ____(ObjectPermissionTestCase):
"""
Tests assignment of permission to multiple users or groups
"""
def setUp(self):
super().setUp()
self.users_list = jim, bob = [
User.objects.create_user(username="jim"),
User.objects.create_user(username="bob"),
]
self.groups_list = jim_group, bob_group = [
Group.objects.create(name="jimgroup"),
Group.objects.create(name="bobgroup"),
]
jim_group.user_set.add(jim)
bob_group.user_set.add(bob)
self.users_qs = User.objects.exclude(username="AnonymousUser")
self.groups_qs = Group.objects.all()
def test_assign_to_many_users_queryset(self):
assign_perm("add_contenttype", self.users_qs, self.ctype)
assign_perm(self.get_permission("delete_contenttype"), self.users_qs, self.ctype)
for user in self.users_list:
self.assertTrue(user.has_perm("add_contenttype", self.ctype))
self.assertTrue(user.has_perm("delete_contenttype", self.ctype))
def test_assign_to_many_users_list(self):
assign_perm("add_contenttype", self.users_list, self.ctype)
assign_perm(self.get_permission("delete_contenttype"), self.users_list, self.ctype)
for user in self.users_list:
self.assertTrue(user.has_perm("add_contenttype", self.ctype))
self.assertTrue(user.has_perm("delete_contenttype", self.ctype))
def test_assign_to_many_groups_queryset(self):
assign_perm("add_contenttype", self.groups_qs, self.ctype)
assign_perm(self.get_permission("delete_contenttype"), self.groups_qs, self.ctype)
for user in self.users_list:
self.assertTrue(user.has_perm("add_contenttype", self.ctype))
self.assertTrue(user.has_perm("delete_contenttype", self.ctype))
def test_assign_to_many_groups_list(self):
assign_perm("add_contenttype", self.groups_list, self.ctype)
assign_perm(self.get_permission("delete_contenttype"), self.groups_list, self.ctype)
for user in self.users_list:
self.assertTrue(user.has_perm("add_contenttype", self.ctype))
self.assertTrue(user.has_perm("delete_contenttype", self.ctype))
def test_assign_to_multiple_identity_and_obj(self):
with self.assertRaises(MultipleIdentityAndObjectError):
assign_perm("add_contenttype", self.users_list, self.ctype_qset)
with self.assertRaises(MultipleIdentityAndObjectError):
assign_perm("add_contenttype", self.users_qs, self.ctype_qset)
| MultipleIdentitiesOperationsTest |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 278894,
"end": 279221
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
node = sgqlc.types.Field(DeploymentRequest, graphql_name="node")
| DeploymentRequestEdge |
python | pydata__xarray | asv_bench/benchmarks/reindexing.py | {
"start": 1238,
"end": 1382
} | class ____(Reindex):
def setup(self):
requires_dask()
super().setup()
self.ds = self.ds.chunk({"time": 100})
| ReindexDask |
python | explosion__spaCy | spacy/lang/bn/__init__.py | {
"start": 540,
"end": 1177
} | class ____(Language):
lang = "bn"
Defaults = BengaliDefaults
@Bengali.factory(
"lemmatizer",
assigns=["token.lemma"],
default_config={
"model": None,
"mode": "rule",
"overwrite": False,
"scorer": {"@scorers": "spacy.lemmatizer_scorer.v1"},
},
default_score_weights={"lemma_acc": 1.0},
)
def make_lemmatizer(
nlp: Language,
model: Optional[Model],
name: str,
mode: str,
overwrite: bool,
scorer: Optional[Callable],
):
return Lemmatizer(
nlp.vocab, model, name, mode=mode, overwrite=overwrite, scorer=scorer
)
__all__ = ["Bengali"]
| Bengali |
python | pytorch__pytorch | torch/distributed/_tools/runtime_estimator.py | {
"start": 697,
"end": 16729
} | class ____(TorchDispatchMode):
"""
Estimates the GPU runtime in milliseconds using various estimation methods under the ``FakeTensorMode``.
This class provides a ``TorchDispatchMode`` based context manager that can be used to estimate the eager
runtime of PyTorch functions. It supports two estimation modes, benchmarking (`operator-level-benchmark`) and
roofline cost modeling (`operator-level-cost-model`).
For modules executed under this context manager, it aggregates the forward and backward operation runtimes
and also records their execution orders.
Attributes:
mod_runtimes (Dict[str, Dict[str, float]]): A dictionary of module runtimes. The key to the outer dictionary
is the fully qualified name (FQN) of the module. For each module the forward and backward runtimes of the
operations are aggregated in the inner dictionary keyed by 'fw' and 'bw'.
mod_fw_pre_order (List[str]): List of module FQNs in pre-forward execution order.
mod_bw_pre_order (List[str]): List of module FQNs in pre-backward execution order.
mod_fw_post_order (List[str]): List of module FQNs in post-forward execution order.
mod_bw_post_order (List[str]): List of module FQNs in post-backward execution order.
total_runtime (float): The total estimated runtime in milliseconds.
Note:
1) The benchmarking estimate mode will execute kernels on GPU and assumes that every operation can run in
isolation without causing an OOM error. It is also designed to be used only under ``FakeTensorMode``.
2) Currently wrapper tensor sub-classes such as ``DTensor`` won't produce correct estimates. We plan to support
them in future PRs.
3) We only estimate the compute time, if your code has communication, it will not be considered. Again, we will
support this in future PRs.
Example usage:
.. code-block:: python
runtime_estimator = RuntimeEstimator()
with FakeTensorMode():
module = ...
optimizer = ...
inp = ...
with runtime_estimator(estimate_mode_type="operator-level-cost-model"):
loss = module(inp)
loss.backward()
optimizer.step()
optimizer.zero_grad()
runtime_estimator.display_modulewise_stats()
"""
_no_fallback_kernel: set[torch._ops._OpNamespace] = set()
fake_mode: FakeTensorMode
def __init__(self) -> None:
super().__init__()
self._estimate: Callable
self._estimate_mode_type: str
self._mod_tracker = ModTracker()
self.mod_runtimes: dict[str, dict[str, float]] = defaultdict(
lambda: defaultdict(lambda: 0.0)
)
self.mod_fw_pre_order: list[str] = []
self.mod_bw_pre_order: list[str] = []
self.mod_fw_post_order: list[str] = []
self.mod_bw_post_order: list[str] = []
self.total_runtime: float = 0.0
# Adapted from: https://github.com/pytorch/pytorch/blob/9b902b3ee3bd608a19543362b66bf06c373dd374/torch/_subclasses/fake_tensor.py#L1969 # noqa: PGH004,B950
# NB: returns fake tensors
@classmethod
def _maybe_run_and_benchmark_fallback_kernel( # type: ignore[no-untyped-def]
cls,
func,
args,
kwargs,
orig_not_implemented_exception,
):
"""
Runs and benchmarks a fallback kernel for a given function.
Args:
func (Callable): The function to benchmark.
args (Tuple): The arguments to pass to the function.
kwargs (Dict[str, Any]): The keyword arguments to pass to the function.
orig_not_implemented_exception (Exception): The original exception to raise if the fallback kernel
is not implemented.
Returns:
Tuple[Any, float]: A tuple containing the result of the function and
the mean operation time in milliseconds.
"""
# these should all be supported, just to be safe
# avoid fallback for operators which inplace modify metadata
# because the input fake tensors would be umodified
if torch.Tag.inplace_view in func.tags: # type: ignore[attr-defined]
raise orig_not_implemented_exception
inp_impls = {}
flat_args, args_spec = pytree.tree_flatten((args, kwargs))
# Don't use in_kernel_invocation_manager(fake_mode) as we want to do
# REAL compute (not with meta device)
with no_dispatch():
def to_real_tensor(e): # type: ignore[no-untyped-def]
if cls.fake_mode.is_our_fake(e):
if e.dtype in _FLOAT_TYPES:
out = torch.rand_like(e, device=e.fake_device)
else:
out = torch.ones_like(e, device=e.fake_device)
if e.is_sparse:
out._coalesced_(e.is_coalesced())
inp_impls[id(out)] = e
return out
return e
flat_args = [to_real_tensor(a) for a in flat_args]
args, kwargs = pytree.tree_unflatten(flat_args, args_spec)
r = func(*args, **kwargs)
warmup_iters, actual_iters = 2, 3
for _ in range(warmup_iters):
func(*args, **kwargs)
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
start_event.record(torch.cuda.current_stream())
for _ in range(actual_iters):
func(*args, **kwargs)
end_event.record(torch.cuda.current_stream())
torch.cuda.synchronize()
cuda_time = start_event.elapsed_time(end_event)
mean_op_time = cuda_time / actual_iters
storages = set()
for e in flat_args:
if isinstance(e, torch.Tensor):
if not e.is_sparse:
storages.add(e._typed_storage()._cdata)
# TODO: also check metadata change on inputs
# proper aliasing/metadata relationship between outputs and inputs will
# not be set up, bc of conversion to device, unless we can reuse an
# input impl
def map_out(e): # type: ignore[no-untyped-def]
if id(e) not in inp_impls and (
isinstance(e, torch.Tensor)
and not e.is_sparse
and e._typed_storage()._cdata in storages
):
raise orig_not_implemented_exception
if isinstance(e, torch.Tensor):
if id(e) in inp_impls:
return inp_impls[id(e)]
else:
return cls.fake_mode.fake_tensor_converter.from_real_tensor(
cls.fake_mode, e
)
else:
return e
return (pytree.tree_map(map_out, r), mean_op_time)
@classmethod
def _benchmark_estimate(cls, func, args, kwargs) -> tuple[Any, float]: # type: ignore[no-untyped-def]
"""
Estimates the runtime of a function using benchmarking.
Args:
func: The function to estimate.
args: The arguments to pass to the function.
kwargs: The keyword arguments to pass to the function.
res: The result of the function.
Returns:
Tuple[Any, float]: A tuple containing the result of the function and
the mean operation time in milliseconds.
"""
assert isinstance(cls.fake_mode, FakeTensorMode), (
"Initialize/Assign FakeTensorMode before using this function"
)
mean_op_time = 0.0
if func._overloadpacket not in _VIEW_OPS:
try:
res, mean_op_time = cls._maybe_run_and_benchmark_fallback_kernel(
func,
args,
kwargs,
NotImplementedError,
)
return (res, mean_op_time)
except NotImplementedError:
cls._no_fallback_kernel.add(func._overloadpacket)
res = func(*args, **kwargs or {})
return (res, mean_op_time)
# Adapted from: https://github.com/pytorch/pytorch/blob/9b902b3ee3bd608a19543362b66bf06c373dd374/torch/_inductor/scheduler.py#L589 # noqa: PGH004,B950
@classmethod
def _roofline_estimate(cls, func, args, kwargs) -> tuple[Any, float]: # type: ignore[no-untyped-def]
"""
Estimates the runtime of a function using a roofline cost model.
Args:
func: The function to estimate.
args: The arguments to pass to the function.
kwargs: The keyword arguments to pass to the function.
out: The output of the function.
Returns:
Tuple[Any, float]: A tuple containing the result of the function and
the mean operation time in milliseconds.
"""
assert torch.cuda.is_available(), (
"Roofline estimation needs to access CUDA capabilities to make estimations"
)
# Roofline Cost Model Explanation
# The roofline cost model estimates the execution time of an operator based on
# the device's empirical maximum FLOPs/sec (pi) and device DRAM bandwidth (beta).
# Variables:
# - pi: Maximum empirical FLOPs/sec of the device
# - beta: Maximum empirical device DRAM bandwidth (bytes/sec) of the device
# - I: Arithmetic intensity of the operator (FLOPs/bytes)
# - op_flops: FLOPs required by the operator
# - op_bytes: Bytes transferred to and from DRAM for the operator
# Calculation Steps:
# 1. Calculate arithmetic intensity: I = op_flops / op_bytes
# 2. Calculate estimated FLOPs/sec: est_flops_sec = min(pi, beta * I)
# 3. Calculate estimated operator time: estimated_op_time = op_flops / est_flops_sec
# This simplifies to: estimated_op_time = max(op_flops / pi, op_flops / (beta * I))
# Further simplifying: estimated_op_time = max(op_flops / pi, op_bytes / beta)
# Simplified Formulas:
# - compute_time = op_flops / pi
# - transfer_time = op_bytes / beta
# - estimated_op_time = max(compute_time, transfer_time)
kwargs = kwargs if kwargs else {}
out = func(*args, **kwargs)
op_time = 0.0
func_packet = func._overloadpacket
if func_packet not in _IGNORE_OPS:
flat_args_kwargs, args_spec = pytree.tree_flatten((args, kwargs))
flat_outs, out_spec = pytree.tree_flatten(out)
transfer_time = get_transfer_time(flat_args_kwargs, flat_outs)
out_dtypes = {
t.dtype
for t in flat_outs
if isinstance(t, torch.Tensor) and t.dtype in _FLOAT_TYPES
}
args, kwargs = pytree.tree_unflatten(flat_args_kwargs, args_spec)
out = pytree.tree_unflatten(flat_outs, out_spec)
compute_time = get_compute_time(func_packet, args, kwargs, out, out_dtypes)
# We get the estimated time as the max of the transfer time and
# compute time. We divide by 1e6 to get the time in ms
op_time = max(transfer_time, compute_time) / 1e6
return (out, op_time)
def display_modulewise_stats(self, depth: int = 2) -> None:
"""
Displays module-wise statistics collected by ``RuntimeEstimator``.
Prints the pre-forward and pre-backward execution orders.
Displays the module-wise forward and backward runtimes in milliseconds.
Args:
depth (int): The maximum depth of module hierarchy to display (default to 2).
"""
print("Pre-Forward Execution Order: ")
for mod_fqn in self.mod_fw_pre_order:
mod_depth = mod_fqn.count(".") + 1
if mod_depth > depth:
continue
print(mod_fqn)
print("Pre-Backward Execution Order: ")
for mod_fqn in self.mod_bw_pre_order:
mod_depth = mod_fqn.count(".") + 1
if mod_depth > depth:
continue
print(mod_fqn)
for mod_fqn, runtimes in self.mod_runtimes.items():
mod_depth = mod_fqn.count(".") + 1
if mod_depth > depth:
continue
print(
f"{mod_fqn} fw: {runtimes.get('fw', 0.0):.3f}ms bw: {runtimes.get('bw', 0.0):.3f}ms"
)
def __torch_dispatch__(self, func, types, args=..., kwargs=None): # type: ignore[no-untyped-def]
# TODO: @sanketpurandare: Flatten tensors by desugaring the tensor subclasses
# TODO: @sanketpurandare: Add logic for incorporating communication time
res, op_time = self._estimate(func, args, kwargs)
for par in self._mod_tracker.parents:
if self._mod_tracker.is_bw:
self.mod_runtimes[par]["bw"] += op_time
else:
self.mod_runtimes[par]["fw"] += op_time
self.total_runtime += op_time
return res
def __call__(self, estimate_mode_type: str) -> Self:
"""
Sets the estimate mode type.
Currently supported modes:
- "operator-level-benchmark": Estimates runtime using operator benchmarking.
- "operator-level-cost-model": Estimates runtime using roofline cost model.
Args:
estimate_mode_type (str): The type of estimate mode to use.
Returns:
RuntimeEstimator: The runtime estimator instance.
Raises:
NotImplementedError: If the estimate mode type is not supported.
"""
if estimate_mode_type == "operator-level-benchmark":
self._estimate = RuntimeEstimator._benchmark_estimate
elif estimate_mode_type == "operator-level-cost-model":
self._estimate = RuntimeEstimator._roofline_estimate
else:
raise NotImplementedError(
f"estimate_mode_type {estimate_mode_type} not supported"
)
self._estimate_mode_type = estimate_mode_type
return self
def __enter__(self) -> Self:
fake_mode = active_fake_mode()
assert isinstance(fake_mode, FakeTensorMode), (
"No FakeTensorMode found, designed to used under FakeTensorMode"
)
RuntimeEstimator.fake_mode = fake_mode
self.total_runtime = 0.0
self.mod_runtimes = defaultdict(lambda: defaultdict(lambda: 0.0))
self.mod_fw_pre_order.clear()
self.mod_bw_pre_order.clear()
self.mod_fw_post_order.clear()
self.mod_bw_post_order.clear()
self._mod_tracker.register_user_hooks(
pre_fw_hook=lambda mod, inp: self.mod_fw_pre_order.append(
self._mod_tracker.get_known_fqn(mod)
),
pre_bw_hook=lambda mod, g_out: self.mod_bw_pre_order.append(
self._mod_tracker.get_known_fqn(mod)
),
post_fw_hook=lambda mod, inp, out: self.mod_fw_post_order.append(
self._mod_tracker.get_known_fqn(mod)
),
post_bw_hook=lambda mod, g_inp: self.mod_bw_post_order.append(
self._mod_tracker.get_known_fqn(mod)
),
)
self._mod_tracker.__enter__()
super().__enter__()
return self
# pyrefly: ignore [bad-override]
def __exit__(self, *args: Any) -> None:
print(
f"Estimated ({self._estimate_mode_type})"
f"total_time: {self.total_runtime:.3f} ms"
)
if len(self._no_fallback_kernel) > 0:
print("no_fallback_kernel: ", list(self._no_fallback_kernel))
super().__exit__(*args)
self._mod_tracker.clear_user_hooks()
self._mod_tracker.__exit__()
| RuntimeEstimator |
python | facebookresearch__faiss | tests/test_index.py | {
"start": 25032,
"end": 25587
} | class ____(unittest.TestCase):
def test_range_search(self):
# test for https://github.com/facebookresearch/faiss/issues/1889
d = 256
nq = 16
nb = 1000000
# faiss.cvar.distance_compute_blas_threshold = 10
faiss.omp_set_num_threads(1)
index = faiss.IndexFlatL2(d)
xb = np.zeros((nb, d), dtype="float32")
index.add(xb)
xq = np.zeros((nq, d), dtype="float32")
lims, D, I = index.range_search(xq, 1.0)
assert len(D) == len(xb) * len(xq)
| TestLargeRangeSearch |
python | fsspec__filesystem_spec | fsspec/spec.py | {
"start": 652,
"end": 3090
} | class ____(type):
"""
Metaclass for caching file system instances.
Notes
-----
Instances are cached according to
* The values of the class attributes listed in `_extra_tokenize_attributes`
* The arguments passed to ``__init__``.
This creates an additional reference to the filesystem, which prevents the
filesystem from being garbage collected when all *user* references go away.
A call to the :meth:`AbstractFileSystem.clear_instance_cache` must *also*
be made for a filesystem instance to be garbage collected.
"""
def __init__(cls, *args, **kwargs):
super().__init__(*args, **kwargs)
# Note: we intentionally create a reference here, to avoid garbage
# collecting instances when all other references are gone. To really
# delete a FileSystem, the cache must be cleared.
if conf.get("weakref_instance_cache"): # pragma: no cover
# debug option for analysing fork/spawn conditions
cls._cache = weakref.WeakValueDictionary()
else:
cls._cache = {}
cls._pid = os.getpid()
def __call__(cls, *args, **kwargs):
kwargs = apply_config(cls, kwargs)
extra_tokens = tuple(
getattr(cls, attr, None) for attr in cls._extra_tokenize_attributes
)
strip_tokenize_options = {
k: kwargs.pop(k) for k in cls._strip_tokenize_options if k in kwargs
}
token = tokenize(
cls, cls._pid, threading.get_ident(), *args, *extra_tokens, **kwargs
)
skip = kwargs.pop("skip_instance_cache", False)
if os.getpid() != cls._pid:
cls._cache.clear()
cls._pid = os.getpid()
if not skip and cls.cachable and token in cls._cache:
cls._latest = token
return cls._cache[token]
else:
obj = super().__call__(*args, **kwargs, **strip_tokenize_options)
# Setting _fs_token here causes some static linters to complain.
obj._fs_token_ = token
obj.storage_args = args
obj.storage_options = kwargs
if obj.async_impl and obj.mirror_sync_methods:
from .asyn import mirror_sync_methods
mirror_sync_methods(obj)
if cls.cachable and not skip:
cls._latest = token
cls._cache[token] = obj
return obj
| _Cached |
python | RaRe-Technologies__gensim | gensim/corpora/mmcorpus.py | {
"start": 427,
"end": 4260
} | class ____(matutils.MmReader, IndexedCorpus):
"""Corpus serialized using the `sparse coordinate Matrix Market format
<https://math.nist.gov/MatrixMarket/formats.html>`_.
Wrap a term-document matrix on disk (in matrix-market format), and present it
as an object which supports iteration over the matrix rows (~documents).
Notes
-----
The file is read into memory one document at a time, not the whole matrix at once,
unlike e.g. `scipy.io.mmread` and other implementations. This allows you to **process corpora which are larger
than the available RAM**, in a streamed manner.
Example
--------
.. sourcecode:: pycon
>>> from gensim.corpora.mmcorpus import MmCorpus
>>> from gensim.test.utils import datapath
>>>
>>> corpus = MmCorpus(datapath('test_mmcorpus_with_index.mm'))
>>> for document in corpus:
... pass
"""
def __init__(self, fname):
"""
Parameters
----------
fname : {str, file-like object}
Path to file in MM format or a file-like object that supports `seek()`
(e.g. a compressed file opened by `smart_open <https://github.com/RaRe-Technologies/smart_open>`_).
"""
# avoid calling super(), too confusing
IndexedCorpus.__init__(self, fname)
matutils.MmReader.__init__(self, fname)
def __iter__(self):
"""Iterate through all documents.
Yields
------
list of (int, numeric)
Document in the `sparse Gensim bag-of-words format <intro.rst#core-concepts>`__.
Notes
------
The total number of vectors returned is always equal to the number of rows specified in the header.
Empty documents are inserted and yielded where appropriate, even if they are not explicitly stored in the
(sparse) Matrix Market file.
"""
for doc_id, doc in super(MmCorpus, self).__iter__():
yield doc # get rid of doc id, return the sparse vector only
@staticmethod
def save_corpus(fname, corpus, id2word=None, progress_cnt=1000, metadata=False):
"""Save a corpus to disk in the sparse coordinate Matrix Market format.
Parameters
----------
fname : str
Path to file.
corpus : iterable of list of (int, number)
Corpus in Bow format.
id2word : dict of (int, str), optional
Mapping between word_id -> word. Used to retrieve the total vocabulary size if provided.
Otherwise, the total vocabulary size is estimated based on the highest feature id encountered in `corpus`.
progress_cnt : int, optional
How often to report (log) progress.
metadata : bool, optional
Writes out additional metadata?
Warnings
--------
This function is automatically called by :class:`~gensim.corpora.mmcorpus.MmCorpus.serialize`, don't
call it directly, call :class:`~gensim.corpora.mmcorpus.MmCorpus.serialize` instead.
Example
-------
.. sourcecode:: pycon
>>> from gensim.corpora.mmcorpus import MmCorpus
>>> from gensim.test.utils import datapath
>>>
>>> corpus = MmCorpus(datapath('test_mmcorpus_with_index.mm'))
>>>
>>> MmCorpus.save_corpus("random", corpus) # Do not do it, use `serialize` instead.
[97, 121, 169, 201, 225, 249, 258, 276, 303]
"""
logger.info("storing corpus in Matrix Market format to %s", fname)
num_terms = len(id2word) if id2word is not None else None
return matutils.MmWriter.write_corpus(
fname, corpus, num_terms=num_terms, index=True, progress_cnt=progress_cnt, metadata=metadata
)
| MmCorpus |
python | getsentry__sentry | tests/sentry/monitors/migrations/test_0009_backfill_monitor_detectors.py | {
"start": 601,
"end": 2383
} | class ____(TestMigrations):
migrate_from = "0008_fix_processing_error_keys"
migrate_to = "0009_backfill_monitor_detectors"
app = "monitors"
connection = "secondary"
def setup_initial_state(self) -> None:
self.no_detector = self.create_monitor()
self.has_detector = self.create_monitor()
self.invalid_project = self.create_monitor(project=Project(id=40000000000))
self.invalid_team = self.create_monitor(owner_team_id=4560090495334, owner_user_id=None)
self.invalid_status = self.create_monitor(status=ObjectStatus.PENDING_DELETION)
ensure_cron_detector(self.has_detector)
assert get_detector_for_monitor(self.no_detector) is None
assert get_detector_for_monitor(self.invalid_project) is None
assert get_detector_for_monitor(self.invalid_status) is None
self.existing_detector = get_detector_for_monitor(self.has_detector)
def test(self) -> None:
assert get_detector_for_monitor(self.invalid_project) is None
assert get_detector_for_monitor(self.invalid_team) is None
assert get_detector_for_monitor(self.invalid_status) is None
new_detector = get_detector_for_monitor(self.no_detector)
assert new_detector is not None
assert self.existing_detector is not None
assert new_detector.id != self.existing_detector.id
new_existing_detector = get_detector_for_monitor(self.has_detector)
assert (
new_existing_detector is not None
and new_existing_detector.id == self.existing_detector.id
)
assert DataSourceDetector.objects.all().count() == 2
assert DataSource.objects.all().count() == 2
assert Detector.objects.all().count() == 2
| BackfillMonitorDetectorsTest |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/transfers/http_to_s3.py | {
"start": 1298,
"end": 7559
} | class ____(BaseOperator):
"""
Calls an endpoint on an HTTP system to execute an action and store the result in S3.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:HttpToS3Operator`
:param http_conn_id: The :ref:`http connection<howto/connection:http>` to run
the operator against
:param endpoint: The relative part of the full url. (templated)
:param method: The HTTP method to use, default = "POST"
:param data: The data to pass. POST-data in POST/PUT and params
in the URL for a GET request. (templated)
:param headers: The HTTP headers to be added to the GET request
:param response_check: A check against the 'requests' response object.
The callable takes the response object as the first positional argument
and optionally any number of keyword arguments available in the context dictionary.
It should return True for 'pass' and False otherwise.
:param response_filter: A function allowing you to manipulate the response
text. e.g response_filter=lambda response: json.loads(response.text).
The callable takes the response object as the first positional argument
and optionally any number of keyword arguments available in the context dictionary.
:param extra_options: Extra options for the 'requests' library, see the
'requests' documentation (options to modify timeout, ssl, etc.)
:param log_response: Log the response (default: False)
:param auth_type: The auth type for the service
:param tcp_keep_alive: Enable TCP Keep Alive for the connection.
:param tcp_keep_alive_idle: The TCP Keep Alive Idle parameter (corresponds to ``socket.TCP_KEEPIDLE``).
:param tcp_keep_alive_count: The TCP Keep Alive count parameter (corresponds to ``socket.TCP_KEEPCNT``)
:param tcp_keep_alive_interval: The TCP Keep Alive interval parameter (corresponds to
``socket.TCP_KEEPINTVL``)
:param s3_bucket: Name of the S3 bucket where to save the object. (templated)
It should be omitted when ``s3_key`` is provided as a full s3:// url.
:param s3_key: The key of the object to be created. (templated)
It can be either full s3:// style url or relative path from root level.
When it's specified as a full s3:// url, please omit ``s3_bucket``.
:param replace: If True, it will overwrite the key if it already exists
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:param acl_policy: String specifying the canned ACL policy for the file being
uploaded to the S3 bucket.
:param aws_conn_id: Connection id of the S3 connection to use
:param verify: Whether or not to verify SSL certificates for S3 connection.
By default SSL certificates are verified.
You can provide the following values:
- False: do not validate SSL certificates. SSL will still be used,
but SSL certificates will not be
verified.
- path/to/cert/bundle.pem: A filename of the CA cert bundle to uses.
You can specify this argument if you want to use a different
CA cert bundle than the one used by botocore.
"""
template_fields: Sequence[str] = ("http_conn_id", "endpoint", "data", "headers", "s3_bucket", "s3_key")
template_fields_renderers = {"headers": "json", "data": "py"}
template_ext: Sequence[str] = ()
ui_color = "#f4a460"
def __init__(
self,
*,
endpoint: str | None = None,
method: str = "GET",
data: Any = None,
headers: dict[str, str] | None = None,
extra_options: dict[str, Any] | None = None,
http_conn_id: str = "http_default",
log_response: bool = False,
auth_type: type[AuthBase] | None = None,
tcp_keep_alive: bool = True,
tcp_keep_alive_idle: int = 120,
tcp_keep_alive_count: int = 20,
tcp_keep_alive_interval: int = 30,
s3_bucket: str | None = None,
s3_key: str,
replace: bool = False,
encrypt: bool = False,
acl_policy: str | None = None,
aws_conn_id: str | None = "aws_default",
verify: str | bool | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.http_conn_id = http_conn_id
self.method = method
self.endpoint = endpoint
self.headers = headers or {}
self.data = data or {}
self.extra_options = extra_options or {}
self.log_response = log_response
self.auth_type = auth_type
self.tcp_keep_alive = tcp_keep_alive
self.tcp_keep_alive_idle = tcp_keep_alive_idle
self.tcp_keep_alive_count = tcp_keep_alive_count
self.tcp_keep_alive_interval = tcp_keep_alive_interval
self.s3_bucket = s3_bucket
self.s3_key = s3_key
self.replace = replace
self.encrypt = encrypt
self.acl_policy = acl_policy
self.aws_conn_id = aws_conn_id
self.verify = verify
@cached_property
def http_hook(self) -> HttpHook:
"""Create and return an HttpHook."""
return HttpHook(
self.method,
http_conn_id=self.http_conn_id,
auth_type=self.auth_type,
tcp_keep_alive=self.tcp_keep_alive,
tcp_keep_alive_idle=self.tcp_keep_alive_idle,
tcp_keep_alive_count=self.tcp_keep_alive_count,
tcp_keep_alive_interval=self.tcp_keep_alive_interval,
)
@cached_property
def s3_hook(self) -> S3Hook:
"""Create and return an S3Hook."""
return S3Hook(
aws_conn_id=self.aws_conn_id,
verify=self.verify,
)
def execute(self, context: Context):
self.log.info("Calling HTTP method")
response = self.http_hook.run(self.endpoint, self.data, self.headers, self.extra_options)
self.s3_hook.load_bytes(
response.content,
self.s3_key,
self.s3_bucket,
self.replace,
self.encrypt,
self.acl_policy,
)
| HttpToS3Operator |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/migrate_to_manifest_only/declarative_component_schema.py | {
"start": 41724,
"end": 42449
} | class ____(BaseModel):
auth_flow_type: Optional[AuthFlowType] = Field(None, description="The type of auth to use", title="Auth flow type")
predicate_key: Optional[List[str]] = Field(
None,
description="JSON path to a field in the connectorSpecification that should exist for the advanced auth to be applicable.",
examples=[["credentials", "auth_type"]],
title="Predicate key",
)
predicate_value: Optional[str] = Field(
None,
description="Value of the predicate_key fields for the advanced auth to be applicable.",
examples=["Oauth"],
title="Predicate value",
)
oauth_config_specification: Optional[OAuthConfigSpecification] = None
| AuthFlow |
python | great-expectations__great_expectations | contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_alabama_zip.py | {
"start": 742,
"end": 1743
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_alabama_zip"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_alabama_zip(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
| ColumnValuesToBeValidAlabamaZip |
python | pytorch__pytorch | test/test_cuda.py | {
"start": 265713,
"end": 283486
} | class ____(TestCase):
@unittest.skipIf(not TEST_CUDA, "No CUDA")
def test_compile_kernel(self):
# Simple vector addition kernel
kernel_source = """
__global__ void add_tensors(const float* a, const float* b, float* c, int n) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < n)
c[i] = a[i] + b[i];
}
"""
# Compile the kernel
from torch.cuda import _compile_kernel
add_kernel = _compile_kernel(kernel_source, "add_tensors")
# Prepare data
N = 1024
a = torch.rand(N, device="cuda")
b = torch.rand(N, device="cuda")
c = torch.empty_like(a)
# Calculate grid and block dimensions
threads_per_block = 256
blocks_per_grid = (N + threads_per_block - 1) // threads_per_block
# Launch kernel
add_kernel(
grid=(blocks_per_grid, 1, 1),
block=(threads_per_block, 1, 1),
args=[a, b, c, N],
)
# Verify results
expected = a + b
self.assertEqual(c, expected)
# Test with different tensor types
a_int = torch.randint(0, 100, (N,), device="cuda", dtype=torch.int32)
b_int = torch.randint(0, 100, (N,), device="cuda", dtype=torch.int32)
c_int = torch.empty_like(a_int)
# Integer addition kernel
int_kernel_source = """
__global__ void add_int_tensors(const int* a, const int* b, int* c, int n) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < n)
c[i] = a[i] + b[i];
}
"""
from torch.cuda import _compile_kernel
add_int_kernel = _compile_kernel(int_kernel_source, "add_int_tensors")
# Launch kernel
add_int_kernel(
grid=(blocks_per_grid, 1, 1),
block=(threads_per_block, 1, 1),
args=[a_int, b_int, c_int, N],
)
# Verify results
expected_int = a_int + b_int
self.assertEqual(c_int, expected_int)
# Test with header code
scale_kernel_source = """
#define SCALE_FACTOR 2.0f
__device__ float scale_value(float val) {
return val * SCALE_FACTOR;
}
__global__ void scale_tensors(const float* input, float* output, int n) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < n)
output[i] = scale_value(input[i]);
}
"""
scale_kernel = _compile_kernel(scale_kernel_source, "scale_tensors")
input_tensor = torch.rand(N, device="cuda")
output_tensor = torch.empty_like(input_tensor)
scale_kernel(
grid=(blocks_per_grid, 1, 1),
block=(threads_per_block, 1, 1),
args=[input_tensor, output_tensor, N],
)
# Verify scaling
expected_scaled = input_tensor * 2.0
self.assertEqual(output_tensor, expected_scaled)
# Test error handling with invalid kernel
invalid_kernel_source = """
__global__ void invalid_kernel(float* a) {
undeclared_variable = 10; // This will cause a compilation error
}
"""
with self.assertRaises(RuntimeError):
_compile_kernel(invalid_kernel_source, "invalid_kernel")
@unittest.skipIf(not TEST_CUDA, "No CUDA")
def test_compile_kernel_large_shared_memory(self):
kernel_source = """
__global__ void large_shared_memory_kernel(const float* input, float* output, int n) {
extern __shared__ float shared_data[];
int tid = threadIdx.x;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// Load data into shared memory
if (idx < n) {
shared_data[tid] = input[idx];
} else {
shared_data[tid] = 0.0f;
}
__syncthreads();
// Perform reduction in shared memory
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) {
if (tid < stride) {
shared_data[tid] += shared_data[tid + stride];
}
__syncthreads();
}
// Write result
if (tid == 0) {
output[blockIdx.x] = shared_data[0];
}
}
"""
from torch.cuda import _compile_kernel, get_device_properties
kernel = _compile_kernel(kernel_source, "large_shared_memory_kernel")
threads_per_block = 1024 # 1024 threads * 4 bytes = 4KB, but we'll request 64KB
shared_mem_size = 64 * 1024 # 64KB
kernel.set_shared_memory_config(shared_mem_size)
N = 4096
input_data = torch.ones(N, device="cuda", dtype=torch.float32)
output_data = torch.zeros(4, device="cuda", dtype=torch.float32) # 4 blocks
kernel(
grid=(4, 1, 1),
block=(threads_per_block, 1, 1),
args=[input_data, output_data, N],
shared_mem=shared_mem_size,
)
# Each block should sum 1024 ones = 1024
expected = torch.full((4,), 1024.0, dtype=torch.float32)
self.assertEqual(output_data.cpu(), expected)
# Test error handling with more than supported shared memory size
if torch.version.hip:
max_smem = (
65536
if get_device_properties().gcnArchName not in ["gfx950"]
else 160 * 1024
)
else:
max_smem = get_device_properties().shared_memory_per_block_optin
excessive_shared_mem = max_smem * 2
with self.assertRaises(RuntimeError):
kernel.set_shared_memory_config(excessive_shared_mem)
@skipIfRocmArch(MI300_ARCH)
@tf32_on_and_off(0.005)
@unittest.skipIf(not TEST_CUDA, "No CUDA")
def test_compile_kernel_advanced(self):
# Test matrix multiplication
matmul_kernel_source = """
__global__ void matrix_multiply(const float* A, const float* B, float* C, int M, int N, int K) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < M && col < N) {
float sum = 0.0f;
for (int i = 0; i < K; i++) {
sum += A[row * K + i] * B[i * N + col];
}
C[row * N + col] = sum;
}
}
"""
from torch.cuda import _compile_kernel
matmul_kernel = _compile_kernel(matmul_kernel_source, "matrix_multiply")
# Matrix dimensions
M, K, N = 64, 32, 48
# Create matrices
A = torch.rand((M, K), device="cuda")
B = torch.rand((K, N), device="cuda")
C = torch.zeros((M, N), device="cuda")
# Calculate grid and block dimensions
block_dim = (16, 16, 1)
grid_dim = (
(N + block_dim[0] - 1) // block_dim[0],
(M + block_dim[1] - 1) // block_dim[1],
1,
)
# Launch kernel
matmul_kernel(
grid=grid_dim,
block=block_dim,
args=[A.contiguous(), B.contiguous(), C, M, N, K],
)
# Verify results
expected = torch.matmul(A, B)
self.assertEqual(C, expected)
# Test with different compute capability if specified
device_props = torch.cuda.get_device_properties(torch.cuda.current_device())
if not torch.version.hip:
compute_cap = f"{device_props.major}{device_props.minor}"
else:
compute_cap = f"{device_props.gcnArchName}"
# Recompile with explicit compute capability
matmul_kernel_explicit = _compile_kernel(
matmul_kernel_source, "matrix_multiply", compute_capability=compute_cap
)
C_explicit = torch.zeros((M, N), device="cuda")
# Launch kernel
matmul_kernel_explicit(
grid=grid_dim,
block=block_dim,
args=[A.contiguous(), B.contiguous(), C_explicit, M, N, K],
)
# Verify results
self.assertEqual(C_explicit, expected)
@unittest.skipIf(not TEST_CUDA, "No CUDA")
def test_compile_kernel_as_custom_op(self):
# Define a simple vector addition kernel
kernel_source = """
__global__ void vector_add(const float* a, const float* b, float* c, int n) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n) {
c[idx] = a[idx] + b[idx];
}
}
"""
@torch.library.custom_op("test_compile_kernel::vector_add", mutates_args=())
def vector_add_op(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
from torch.cuda import _compile_kernel
# Validate that tensors are 1-dimensional and have the same size
torch._check(
a.dim() == 1,
lambda: f"Expected tensor 'a' to be 1-dimensional, but got {a.dim()} dimensions",
)
torch._check(
b.dim() == 1,
lambda: f"Expected tensor 'b' to be 1-dimensional, but got {b.dim()} dimensions",
)
torch._check(
a.size() == b.size(),
lambda: f"Expected tensors to have the same size, but got a.size()={a.size()} and b.size()={b.size()}",
)
compiled_kernel = _compile_kernel(kernel_source, "vector_add")
c = torch.empty_like(a)
n = a.numel()
threads_per_block = 256
blocks_per_grid = (n + threads_per_block - 1) // threads_per_block
compiled_kernel(
grid=(blocks_per_grid, 1, 1),
block=(threads_per_block, 1, 1),
args=[a, b, c, n],
)
return c
@vector_add_op.register_fake
def _(a, b):
return torch.empty_like(a)
device = torch.device("cuda:0")
size = (1024,)
a = torch.randn(size, device=device, dtype=torch.float32)
b = torch.randn(size, device=device, dtype=torch.float32)
result = vector_add_op(a, b)
expected = a + b
torch.testing.assert_close(result, expected, rtol=1e-5, atol=1e-5)
@unittest.skipIf(not TEST_CUDA, "No CUDA")
def test_compile_kernel_custom_op_validation(self):
kernel_source = """
__global__ void add_scalar(const float* input, float* output, double scalar, int n) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n) {
output[idx] = input[idx] + scalar;
}
}
"""
@torch.library.custom_op("test_compile_kernel::add_scalar", mutates_args=())
def add_scalar_op(input_tensor: torch.Tensor, scalar: float) -> torch.Tensor:
from torch.cuda import _compile_kernel
compiled_kernel = _compile_kernel(kernel_source, "add_scalar")
output = torch.empty_like(input_tensor)
n = input_tensor.numel()
threads_per_block = 256
blocks_per_grid = (n + threads_per_block - 1) // threads_per_block
compiled_kernel(
grid=(blocks_per_grid, 1, 1),
block=(threads_per_block, 1, 1),
args=[input_tensor, output, scalar, n],
)
return output
@add_scalar_op.register_fake
def _(input_tensor, scalar):
return torch.empty_like(input_tensor)
# Test with opcheck
device = torch.device("cuda:0")
input_data = torch.randn((64,), device=device, dtype=torch.float32)
scalar_val = 3.14
# Run opcheck validation
torch.library.opcheck(add_scalar_op, (input_data, scalar_val), {})
# Also test the actual functionality
result = add_scalar_op(input_data, scalar_val)
expected = input_data + scalar_val
torch.testing.assert_close(result, expected, rtol=1e-5, atol=1e-5)
@unittest.skipIf(not TEST_CUDA, "No CUDA")
def test_compile_kernel_double_precision(self):
"""Test that Python floats are correctly handled as doubles in kernels."""
kernel_source = """
__global__ void test_double_precision(double* output, double value, int n) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n) {
output[idx] = value;
}
}
"""
from torch.cuda import _compile_kernel
compiled_kernel = _compile_kernel(kernel_source, "test_double_precision")
# Test with high precision value that would lose precision if cast to float32
# float32 has 7 digits of precision, so we use a value with 15 digits
high_precision_value = 1.23456789012345
n = 10
output = torch.zeros(n, device="cuda", dtype=torch.float64)
compiled_kernel(
grid=(1, 1, 1),
block=(256, 1, 1),
args=[output, high_precision_value, n],
)
# Verify high precision is preserved (would fail with old float32 casting)
expected = torch.full(
(n,), high_precision_value, device="cuda", dtype=torch.float64
)
torch.testing.assert_close(output, expected, rtol=1e-14, atol=1e-14)
@unittest.skipIf(not TEST_CUDA, "No CUDA")
def test_compile_kernel_cuda_headers(self):
"""Test that kernels can include and use CUDA headers like cuda_fp16.h."""
kernel_source = """
#ifndef __HIPCC__
#include <cuda_fp16.h>
#endif
extern "C"
__global__ void half_precision_kernel(__half* output, double input_value, int n) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n) {
output[idx] = __float2half((float)input_value);
}
}
"""
from torch.cuda import _compile_kernel
compiled_kernel = _compile_kernel(kernel_source, "half_precision_kernel")
n = 100
test_value = 3.14159
output = torch.zeros(n, device="cuda", dtype=torch.float16)
compiled_kernel(
grid=(1, 1, 1),
block=(256, 1, 1),
args=[output, test_value, n],
)
expected = torch.full((n,), test_value, device="cuda", dtype=torch.float16)
torch.testing.assert_close(output, expected, rtol=1e-3, atol=1e-3)
@unittest.skipIf(not TEST_CUDA, "No CUDA")
def test_compile_kernel_template(self):
kernel_source = """
template<typename T>
__global__ void add_tensors(const T* a, const T* b, T* c, int n) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < n)
c[i] = a[i] + b[i];
}
"""
# Compile the kernel
from torch.cuda import _compile_kernel
add_kernel_float = _compile_kernel(kernel_source, "add_tensors<float>")
# Prepare data
N = 1024
a = torch.rand(N, device="cuda")
b = torch.rand(N, device="cuda")
c = torch.empty_like(a)
# Calculate grid and block dimensions
threads_per_block = 256
blocks_per_grid = (N + threads_per_block - 1) // threads_per_block
# Launch kernel
add_kernel_float(
grid=(blocks_per_grid, 1, 1),
block=(threads_per_block, 1, 1),
args=[a, b, c, N],
)
# Verify results
expected = a + b
self.assertEqual(c, expected)
# do again with different dtype
add_kernel_int = _compile_kernel(kernel_source, "add_tensors<int>")
# Prepare data
N = 1024
a = torch.randint(-1000, 1000, size=(N,), dtype=torch.int, device="cuda")
b = torch.randint(-1000, 1000, size=(N,), dtype=torch.int, device="cuda")
c = torch.empty_like(a)
# Calculate grid and block dimensions
threads_per_block = 256
blocks_per_grid = (N + threads_per_block - 1) // threads_per_block
# Launch kernel
add_kernel_int(
grid=(blocks_per_grid, 1, 1),
block=(threads_per_block, 1, 1),
args=[a, b, c, N],
)
# Verify results
expected = a + b
self.assertEqual(c, expected)
@unittest.skipIf(not TEST_CUDA, "No CUDA")
def test_compile_kernel_dlpack(self):
"""Test that compile_kernel works with tensors created via DLPack."""
kernel_source = """
__global__ void add_tensors(const float* a, const float* b, float* c, int n) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < n)
c[i] = a[i] + b[i];
}
"""
from torch.cuda import _compile_kernel
add_kernel = _compile_kernel(kernel_source, "add_tensors")
N = 512
a = torch.rand(N, device="cuda", dtype=torch.float32)
b = torch.rand(N, device="cuda", dtype=torch.float32)
a_dlpack = torch.utils.dlpack.from_dlpack(torch.utils.dlpack.to_dlpack(a))
b_dlpack = torch.utils.dlpack.from_dlpack(torch.utils.dlpack.to_dlpack(b))
c = torch.empty_like(a)
threads_per_block = 256
blocks_per_grid = (N + threads_per_block - 1) // threads_per_block
add_kernel(
grid=(blocks_per_grid, 1, 1),
block=(threads_per_block, 1, 1),
args=[a_dlpack, b_dlpack, c, N],
)
self.assertEqual(c, a + b)
a_dlpack[0] = 42.0
self.assertEqual(a[0].item(), 42.0, "DLPack tensors should share memory")
@unittest.skipIf(not TEST_CUDA, "CUDA not available, skipping tests")
| TestCompileKernel |
python | Netflix__metaflow | metaflow/plugins/aws/step_functions/step_functions.py | {
"start": 52358,
"end": 53822
} | class ____(object):
def __init__(self, name):
self.name = name
tree = lambda: defaultdict(tree)
self.payload = tree()
self.payload["Type"] = "Map"
self.payload["MaxConcurrency"] = 0
def iterator(self, workflow):
self.payload["Iterator"] = workflow.payload
return self
def next(self, state):
self.payload["Next"] = state
return self
def items_path(self, items_path):
self.payload["ItemsPath"] = items_path
return self
def parameter(self, name, value):
self.payload["Parameters"][name] = value
return self
def max_concurrency(self, max_concurrency):
self.payload["MaxConcurrency"] = max_concurrency
return self
def output_path(self, output_path):
self.payload["OutputPath"] = output_path
return self
def result_path(self, result_path):
self.payload["ResultPath"] = result_path
return self
def item_reader(self, item_reader):
self.payload["ItemReader"] = item_reader.payload
return self
def result_writer(self, bucket, prefix):
if bucket is not None and prefix is not None:
self.payload["ResultWriter"] = {
"Resource": "arn:aws:states:::s3:putObject",
"Parameters": {
"Bucket": bucket,
"Prefix": prefix,
},
}
return self
| Map |
python | pytorch__pytorch | benchmarks/operator_benchmark/pt/channel_shuffle_test.py | {
"start": 807,
"end": 1626
} | class ____(op_bench.TorchBenchmarkBase):
def init(self, batch_size, channels_per_group, height, width, groups, channel_last):
channels = channels_per_group * groups
data_shape = (batch_size, channels, height, width)
input_data = torch.rand(data_shape)
if channel_last:
input_data = input_data.contiguous(memory_format=torch.channels_last)
self.inputs = {"input_data": input_data, "groups": groups}
self.set_module_name("channel_shuffle")
def forward(self, input_data, groups: int):
return torch.channel_shuffle(input_data, groups)
op_bench.generate_pt_test(
channel_shuffle_short_configs + channel_shuffle_long_configs,
ChannelSHuffleBenchmark,
)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| ChannelSHuffleBenchmark |
python | pytorch__pytorch | torch/_inductor/codegen/simd_kernel_features.py | {
"start": 17818,
"end": 18267
} | class ____:
"""Memory usage stats for single loop in the generated kernel"""
# load/store ops
count_per_thread: int = 0
bytes_per_thread: int = 0
def __add__(self, other: typing.Self) -> StatsForLoop:
return StatsForLoop(
count_per_thread=self.count_per_thread + other.count_per_thread,
bytes_per_thread=self.bytes_per_thread + other.bytes_per_thread,
)
@dataclasses.dataclass
| StatsForLoop |
python | keon__algorithms | tests/test_strings.py | {
"start": 8177,
"end": 8582
} | class ____(unittest.TestCase):
"""[summary]
Test for the file merge_string_checker.py
Arguments:
unittest {[type]} -- [description]
"""
def test_is_merge_recursive(self):
self.assertTrue(is_merge_recursive("codewars", "cdw", "oears"))
def test_is_merge_iterative(self):
self.assertTrue(is_merge_iterative("codewars", "cdw", "oears"))
| TestMergeStringChecker |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/supers.py | {
"start": 1153,
"end": 2127
} | class ____(B, A):
def g1(self):
super().f1()
def g2(self):
super(E, self).f1()
def g3(self):
super(B, self).f1()
def attribute_B_not_overwritten():
B().g1()
def attribute_B_overwritten():
b = B()
b.attribute = "1"
b.g1()
def B_overwrite_both():
b = B()
b.g2()
def B_overwrite_partial():
b = B()
b.g3()
def B_standard():
b = B()
b.g4()
def attribute_C_not_overwritten():
C().g1()
def attribute_C_overwritten():
c = C()
c.attribute = "1"
c.g1()
def attribute_D_not_overwritten():
d = D()
d.g1()
def attribute_D_overwritten():
d = D()
d.attribute = "1"
d.g1()
def attribute_E_not_overwritten():
e = E()
e.g1()
e.g2()
def attribute_E_not_overwritten_RCE():
# TODO(T108231862): Support Diamond Inheritance.
e = E()
e.g3()
def attribute_E_overwritten():
e = E()
e.attribute = "1"
e.g1()
e.g2()
e.g3()
| E |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 865746,
"end": 866128
} | class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("PublicKey", graphql_name="node")
"""The item at the end of the edge."""
| PublicKeyEdge |
python | ansible__ansible | lib/ansible/modules/user.py | {
"start": 70680,
"end": 76288
} | class ____(User):
"""
This is a NetBSD User manipulation class.
Main differences are that NetBSD:-
- has no concept of "system" account.
- has no force delete user
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'NetBSD'
distribution = None
SHADOWFILE = '/etc/master.passwd'
def create_user(self):
cmd = [self.module.get_bin_path('useradd', True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
groups = self.get_groups_set()
if len(groups) > 16:
self.module.fail_json(msg="Too many groups (%d) NetBSD allows for 16 max." % len(groups))
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
cmd.append('-L')
cmd.append(self.login_class)
if self.password is not None:
cmd.append('-p')
cmd.append(self.password)
if self.inactive is not None:
cmd.append('-f')
cmd.append(self.inactive)
if self.create_home:
cmd.append('-m')
if self.skeleton is not None:
cmd.append('-k')
cmd.append(self.skeleton)
if self.umask is not None:
cmd.append('-K')
cmd.append('UMASK=' + self.umask)
if self.uid_min is not None:
cmd.append('-K')
cmd.append('UID_MIN=' + str(self.uid_min))
if self.uid_max is not None:
cmd.append('-K')
cmd.append('UID_MAX=' + str(self.uid_max))
cmd.append(self.name)
return self.execute_command(cmd)
def remove_user_userdel(self):
cmd = [self.module.get_bin_path('userdel', True)]
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def modify_user(self):
cmd = [self.module.get_bin_path('usermod', True)]
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups_need_mod = False
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set(names_only=True)
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups = set(current_groups).union(groups)
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
if len(groups) > 16:
self.module.fail_json(msg="Too many groups (%d) NetBSD allows for 16 max." % len(groups))
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
if self.move_home:
cmd.append('-m')
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
cmd.append('-L')
cmd.append(self.login_class)
if self.inactive is not None:
cmd.append('-f')
cmd.append(self.inactive)
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd.append('-p')
cmd.append(self.password)
if self.password_lock and not info[1].startswith('*LOCKED*'):
cmd.append('-C yes')
elif self.password_lock is False and info[1].startswith('*LOCKED*'):
cmd.append('-C no')
# skip if no changes to be made
if len(cmd) == 1:
return (None, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
| NetBSDUser |
python | astropy__astropy | astropy/table/tests/test_table.py | {
"start": 53763,
"end": 54069
} | class ____:
def test_set_meta(self, table_types):
d = table_types.Table(names=("a", "b"))
d.meta["a"] = 1
d.meta["b"] = 1
d.meta["c"] = 1
d.meta["d"] = 1
assert list(d.meta.keys()) == ["a", "b", "c", "d"]
@pytest.mark.usefixtures("table_types")
| TestSetMeta |
python | bokeh__bokeh | tests/unit/bokeh/core/property/test_visual.py | {
"start": 12195,
"end": 13880
} | class ____:
def test_valid(self) -> None:
prop = bcpv.MarkerType()
for typ in MarkerType:
assert prop.is_valid(typ)
def test_invalid(self) -> None:
prop = bcpv.MarkerType()
assert not prop.is_valid(None)
assert not prop.is_valid(False)
assert not prop.is_valid(True)
assert not prop.is_valid(0)
assert not prop.is_valid(1)
assert not prop.is_valid(0.0)
assert not prop.is_valid(1.0)
assert not prop.is_valid(1.0+1.0j)
assert not prop.is_valid("")
assert not prop.is_valid(())
assert not prop.is_valid([])
assert not prop.is_valid({})
assert not prop.is_valid(_TestHasProps())
assert not prop.is_valid(_TestModel())
assert not prop.is_valid("string")
assert not prop.is_valid([1, 2, 3])
assert not prop.is_valid([1, 2, 3.0])
def test_has_ref(self) -> None:
prop = bcpv.MarkerType()
assert not prop.has_ref
def test_str(self) -> None:
prop = bcpv.MarkerType()
assert str(prop).startswith("MarkerType(")
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
Test___all__ = verify_all(bcpv, ALL)
| Test_MarkerType |
python | doocs__leetcode | lcof/面试题05. 替换空格/Solution2.py | {
"start": 0,
"end": 173
} | class ____:
def replaceSpace(self, s: str) -> str:
ans = []
for c in s:
ans.append('%20' if c == ' ' else c)
return ''.join(ans)
| Solution |
python | jina-ai__jina | jina/serve/runtimes/gateway/async_request_response_handling.py | {
"start": 868,
"end": 12442
} | class ____(MonitoringRequestMixin):
"""
Class that handles the requests arriving to the gateway and the result extracted from the requests future.
:param metrics_registry: optional metrics registry for prometheus used if we need to expose metrics from the executor or from the data request handler
:param runtime_name: optional runtime_name that will be registered during monitoring
"""
def __init__(
self,
metrics_registry: Optional['CollectorRegistry'] = None,
meter: Optional['Meter'] = None,
runtime_name: Optional[str] = None,
logger: Optional[JinaLogger] = None,
):
super().__init__(metrics_registry, meter, runtime_name)
self._endpoint_discovery_finished = False
self._gathering_endpoints = False
self.logger = logger or JinaLogger(self.__class__.__name__)
def handle_request(
self, graph: 'TopologyGraph', connection_pool: 'GrpcConnectionPool'
) -> Callable[['Request'], 'Tuple[Future, Optional[Future]]']:
"""
Function that handles the requests arriving to the gateway. This will be passed to the streamer.
:param graph: The TopologyGraph of the Flow.
:param connection_pool: The connection pool to be used to send messages to specific nodes of the graph
:return: Return a Function that given a Request will return a Future from where to extract the response
"""
async def gather_endpoints(request_graph):
if not self._endpoint_discovery_finished:
self._gathering_endpoints = True
try:
_ = await request_graph._get_all_endpoints(connection_pool)
except InternalNetworkError as err:
err_code = err.code()
if err_code == grpc.StatusCode.UNAVAILABLE:
err._details = (
err.details()
+ f' |Gateway: Communication error while gathering endpoints with deployment at address(es) {err.dest_addr}. Head or worker(s) may be down.'
)
raise err
else:
raise
except Exception as exc:
self.logger.error(f' Error gathering endpoints: {exc}')
raise exc
self._endpoint_discovery_finished = True
def _handle_request(
request: 'Request', return_type: Type[DocumentArray]
) -> 'Tuple[Future, Optional[Future]]':
self._update_start_request_metrics(request)
# important that the gateway needs to have an instance of the graph per request
request_graph = copy.deepcopy(graph)
responding_tasks = []
floating_tasks = []
endpoint = request.header.exec_endpoint
r = request.routes.add()
r.executor = 'gateway'
r.start_time.GetCurrentTime()
# If the request is targeting a specific deployment, we can send directly to the deployment instead of
# querying the graph
num_outgoing_nodes = len(request_graph.origin_nodes)
has_specific_params = False
request_input_parameters = request.parameters
for key in request_input_parameters:
if _is_param_for_specific_executor(key):
has_specific_params = True
break
target_executor = request.header.target_executor
# reset it in case we send to an external gateway
request.header.target_executor = ''
exec_endpoint = request.header.exec_endpoint
gather_endpoints_task = None
if not self._endpoint_discovery_finished and not self._gathering_endpoints:
gather_endpoints_task = asyncio.create_task(
gather_endpoints(request_graph)
)
init_task = None
request_doc_ids = []
if graph.has_filter_conditions:
if not docarray_v2:
request_doc_ids = request.data.docs[
:, 'id'
] # used to maintain order of docs that are filtered by executors
else:
init_task = gather_endpoints_task
from docarray import DocList
from docarray.base_doc import AnyDoc
prev_doc_array_cls = request.data.document_array_cls
request.data.document_array_cls = DocList[AnyDoc]
request_doc_ids = request.data.docs.id
request.data._loaded_doc_array = None
request.data.document_array_cls = prev_doc_array_cls
else:
init_task = None
for origin_node in request_graph.origin_nodes:
leaf_tasks = origin_node.get_leaf_req_response_tasks(
connection_pool=connection_pool,
request_to_send=request,
previous_task=None,
endpoint=endpoint,
target_executor_pattern=target_executor or None,
request_input_parameters=request_input_parameters,
request_input_has_specific_params=has_specific_params,
copy_request_at_send=num_outgoing_nodes > 1 and has_specific_params,
init_task=init_task,
return_type=return_type,
)
# Every origin node returns a set of tasks that are the ones corresponding to the leafs of each of their
# subtrees that unwrap all the previous tasks. It starts like a chain of waiting for tasks from previous
# nodes
responding_tasks.extend([task for ret, task in leaf_tasks if ret])
floating_tasks.extend([task for ret, task in leaf_tasks if not ret])
def _sort_response_docs(response):
# sort response docs according to their order in the initial request
def sort_by_request_order(doc):
if doc.id in request_doc_ids:
return request_doc_ids.index(doc.id)
else:
return len(request_doc_ids) # put new/unknown docs at the end
sorted_docs = sorted(response.data.docs, key=sort_by_request_order)
response.data.docs = DocumentArray(sorted_docs)
async def _process_results_at_end_gateway(
tasks: List[asyncio.Task], request_graph: TopologyGraph
) -> asyncio.Future:
try:
partial_responses = await asyncio.gather(*tasks)
except Exception:
# update here failed request
self._update_end_failed_requests_metrics()
raise
partial_responses, metadatas = zip(*partial_responses)
filtered_partial_responses = list(
filter(lambda x: x is not None, partial_responses)
)
response = filtered_partial_responses[0]
# JoanFM: to keep the docs_map feature, need to add the routes in the WorkerRuntime but clear it here
# so that routes are properly done. not very clean but refactoring would be costly for such a small
# thing, `docs_map` reuses routes potentially not in the best way but works for now
for i in reversed(range(len(response.routes))):
if response.routes[i].executor != GATEWAY_NAME:
del response.routes[i]
request_graph.add_routes(response)
if graph.has_filter_conditions:
_sort_response_docs(response)
collect_results = request_graph.collect_all_results()
resp_params = response.parameters
if len(collect_results) > 0:
resp_params[WorkerRequestHandler._KEY_RESULT] = collect_results
response.parameters = resp_params
return response
# In case of empty topologies
if not responding_tasks:
r.end_time.GetCurrentTime()
future = asyncio.Future()
future.set_result((request, {}))
responding_tasks.append(future)
return (
asyncio.ensure_future(
_process_results_at_end_gateway(responding_tasks, request_graph)
),
(
asyncio.ensure_future(asyncio.gather(*floating_tasks))
if len(floating_tasks) > 0
else None
),
)
return _handle_request
def handle_single_document_request(
self, graph: 'TopologyGraph', connection_pool: 'GrpcConnectionPool'
) -> Callable[['Request', Type[DocumentArray]], 'AsyncGenerator']:
"""
Function that handles the requests arriving to the gateway. This will be passed to the streamer.
:param graph: The TopologyGraph of the Flow.
:param connection_pool: The connection pool to be used to send messages to specific nodes of the graph
:return: Return a Function that given a Request will return a Future from where to extract the response
"""
async def _handle_request(
request: 'Request', return_type: Type[DocumentArray] = DocumentArray
) -> 'Tuple[Future, Optional[Future]]':
self._update_start_request_metrics(request)
# important that the gateway needs to have an instance of the graph per request
request_graph = copy.deepcopy(graph)
r = request.routes.add()
r.executor = 'gateway'
r.start_time.GetCurrentTime()
# If the request is targeting a specific deployment, we can send directly to the deployment instead of
# querying the graph
# reset it in case we send to an external gateway
exec_endpoint = request.header.exec_endpoint
node = request_graph.all_nodes[
0
] # this assumes there is only one Executor behind this Gateway
async for resp in node.stream_single_doc(
request=request,
connection_pool=connection_pool,
endpoint=exec_endpoint,
return_type=return_type,
):
yield resp
return _handle_request
def handle_result(self) -> Callable[['Request'], 'Request']:
"""
Function that handles the result when extracted from the request future
:return: Return a Function that returns a request to be returned to the client
"""
def _handle_result(result: 'Request'):
"""
Function that handles the result when extracted from the request future
:param result: The result returned to the gateway. It extracts the request to be returned to the client
:return: Returns a request to be returned to the client
"""
for route in result.routes:
if route.executor == GATEWAY_NAME:
route.end_time.GetCurrentTime()
self._update_end_request_metrics(result)
return result
return _handle_result
| AsyncRequestResponseHandler |
python | kamyu104__LeetCode-Solutions | Python/minimum-number-of-increasing-subsequence-to-be-removed.py | {
"start": 102,
"end": 630
} | class ____(object):
def minOperations(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
def longest_non_increasing_subsequence(arr):
result = []
for x in arr:
right = bisect.bisect_right(result, -x)
if right == len(result):
result.append(-x)
else:
result[right] = -x
return len(result)
return longest_non_increasing_subsequence(nums)
| Solution |
python | scipy__scipy | scipy/spatial/tests/test_kdtree.py | {
"start": 17986,
"end": 18720
} | class ____:
def test_one_radius(self):
r = 0.2
assert_equal(self.T1.count_neighbors(self.T2, r),
np.sum([len(l) for l in self.T1.query_ball_tree(self.T2, r)]))
def test_large_radius(self):
r = 1000
assert_equal(self.T1.count_neighbors(self.T2, r),
np.sum([len(l) for l in self.T1.query_ball_tree(self.T2, r)]))
def test_multiple_radius(self):
rs = np.exp(np.linspace(np.log(0.01), np.log(10), 3))
results = self.T1.count_neighbors(self.T2, rs)
assert_(np.all(np.diff(results) >= 0))
for r, result in zip(rs, results):
assert_equal(self.T1.count_neighbors(self.T2, r), result)
@KDTreeTest
| count_neighbors_consistency |
python | pandas-dev__pandas | pandas/tests/frame/methods/test_shift.py | {
"start": 248,
"end": 29521
} | class ____:
def test_shift_axis1_with_valid_fill_value_one_array(self):
# Case with axis=1 that does not go through the "len(arrays)>1" path
# in DataFrame.shift
data = np.random.default_rng(2).standard_normal((5, 3))
df = DataFrame(data)
res = df.shift(axis=1, periods=1, fill_value=12345)
expected = df.T.shift(periods=1, fill_value=12345).T
tm.assert_frame_equal(res, expected)
# same but with a 1D ExtensionArray backing it
df2 = df[[0]].astype("Float64")
res2 = df2.shift(axis=1, periods=1, fill_value=12345)
expected2 = DataFrame([12345] * 5, dtype="Float64")
tm.assert_frame_equal(res2, expected2)
def test_shift_disallow_freq_and_fill_value(self, frame_or_series):
# Can't pass both!
obj = frame_or_series(
np.random.default_rng(2).standard_normal(5),
index=date_range("1/1/2000", periods=5, freq="h"),
)
msg = "Passing a 'freq' together with a 'fill_value'"
with pytest.raises(ValueError, match=msg):
obj.shift(1, fill_value=1, freq="h")
if frame_or_series is DataFrame:
obj.columns = date_range("1/1/2000", periods=1, freq="h")
with pytest.raises(ValueError, match=msg):
obj.shift(1, axis=1, fill_value=1, freq="h")
@pytest.mark.parametrize(
"input_data, output_data",
[(np.empty(shape=(0,)), []), (np.ones(shape=(2,)), [np.nan, 1.0])],
)
def test_shift_non_writable_array(self, input_data, output_data, frame_or_series):
# GH21049 Verify whether non writable numpy array is shiftable
input_data.setflags(write=False)
result = frame_or_series(input_data).shift(1)
if frame_or_series is not Series:
# need to explicitly specify columns in the empty case
expected = frame_or_series(
output_data,
index=range(len(output_data)),
columns=range(1),
dtype="float64",
)
else:
expected = frame_or_series(output_data, dtype="float64")
tm.assert_equal(result, expected)
def test_shift_mismatched_freq(self, frame_or_series):
ts = frame_or_series(
np.random.default_rng(2).standard_normal(5),
index=date_range("1/1/2000", periods=5, freq="h"),
)
result = ts.shift(1, freq="5min")
exp_index = ts.index.shift(1, freq="5min")
tm.assert_index_equal(result.index, exp_index)
# GH#1063, multiple of same base
result = ts.shift(1, freq="4h")
exp_index = ts.index + offsets.Hour(4)
tm.assert_index_equal(result.index, exp_index)
@pytest.mark.parametrize(
"obj",
[
Series([np.arange(5)]),
date_range("1/1/2011", periods=24, freq="h"),
Series(range(5), index=date_range("2017", periods=5)),
],
)
@pytest.mark.parametrize("shift_size", [0, 1, 2])
def test_shift_always_copy(self, obj, shift_size, frame_or_series):
# GH#22397
if frame_or_series is not Series:
obj = obj.to_frame()
assert obj.shift(shift_size) is not obj
def test_shift_object_non_scalar_fill(self):
# shift requires scalar fill_value except for object dtype
ser = Series(range(3))
with pytest.raises(ValueError, match="fill_value must be a scalar"):
ser.shift(1, fill_value=[])
df = ser.to_frame()
with pytest.raises(ValueError, match="fill_value must be a scalar"):
df.shift(1, fill_value=np.arange(3))
obj_ser = ser.astype(object)
result = obj_ser.shift(1, fill_value={})
assert result[0] == {}
obj_df = obj_ser.to_frame()
result = obj_df.shift(1, fill_value={})
assert result.iloc[0, 0] == {}
def test_shift_int(self, datetime_frame, frame_or_series):
ts = tm.get_obj(datetime_frame, frame_or_series).astype(int)
shifted = ts.shift(1)
expected = ts.astype(float).shift(1)
tm.assert_equal(shifted, expected)
@pytest.mark.parametrize("dtype", ["int32", "int64"])
def test_shift_32bit_take(self, frame_or_series, dtype):
# 32-bit taking
# GH#8129
index = date_range("2000-01-01", periods=5)
arr = np.arange(5, dtype=dtype)
s1 = frame_or_series(arr, index=index)
p = arr[1]
result = s1.shift(periods=p)
expected = frame_or_series([np.nan, 0, 1, 2, 3], index=index)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("periods", [1, 2, 3, 4])
def test_shift_preserve_freqstr(self, periods, frame_or_series):
# GH#21275
obj = frame_or_series(
range(periods),
index=date_range("2016-1-1 00:00:00", periods=periods, freq="h"),
)
result = obj.shift(1, "2h")
expected = frame_or_series(
range(periods),
index=date_range("2016-1-1 02:00:00", periods=periods, freq="h"),
)
tm.assert_equal(result, expected)
def test_shift_dst(self, frame_or_series):
# GH#13926
dates = date_range(
"2016-11-06", freq="h", periods=10, tz="US/Eastern", unit="ns"
)
obj = frame_or_series(dates)
res = obj.shift(0)
tm.assert_equal(res, obj)
assert tm.get_dtype(res) == "datetime64[ns, US/Eastern]"
res = obj.shift(1)
exp_vals = [NaT] + dates.astype(object).values.tolist()[:9]
exp = frame_or_series(exp_vals)
tm.assert_equal(res, exp)
assert tm.get_dtype(res) == "datetime64[ns, US/Eastern]"
res = obj.shift(-2)
exp_vals = dates.astype(object).values.tolist()[2:] + [NaT, NaT]
exp = frame_or_series(exp_vals)
tm.assert_equal(res, exp)
assert tm.get_dtype(res) == "datetime64[ns, US/Eastern]"
@pytest.mark.parametrize("ex", [10, -10, 20, -20])
def test_shift_dst_beyond(self, frame_or_series, ex):
# GH#13926
dates = date_range(
"2016-11-06", freq="h", periods=10, tz="US/Eastern", unit="ns"
)
obj = frame_or_series(dates)
res = obj.shift(ex)
exp = frame_or_series([NaT] * 10, dtype="datetime64[ns, US/Eastern]")
tm.assert_equal(res, exp)
assert tm.get_dtype(res) == "datetime64[ns, US/Eastern]"
def test_shift_by_zero(self, datetime_frame, frame_or_series):
# shift by 0
obj = tm.get_obj(datetime_frame, frame_or_series)
unshifted = obj.shift(0)
tm.assert_equal(unshifted, obj)
def test_shift(self, datetime_frame):
# naive shift
ser = datetime_frame["A"]
shifted = datetime_frame.shift(5)
tm.assert_index_equal(shifted.index, datetime_frame.index)
shifted_ser = ser.shift(5)
tm.assert_series_equal(shifted["A"], shifted_ser)
shifted = datetime_frame.shift(-5)
tm.assert_index_equal(shifted.index, datetime_frame.index)
shifted_ser = ser.shift(-5)
tm.assert_series_equal(shifted["A"], shifted_ser)
unshifted = datetime_frame.shift(5).shift(-5)
tm.assert_numpy_array_equal(
unshifted.dropna().values, datetime_frame.values[:-5]
)
unshifted_ser = ser.shift(5).shift(-5)
tm.assert_numpy_array_equal(unshifted_ser.dropna().values, ser.values[:-5])
def test_shift_by_offset(self, datetime_frame, frame_or_series):
# shift by DateOffset
obj = tm.get_obj(datetime_frame, frame_or_series)
offset = offsets.BDay()
shifted = obj.shift(5, freq=offset)
assert len(shifted) == len(obj)
unshifted = shifted.shift(-5, freq=offset)
tm.assert_equal(unshifted, obj)
shifted2 = obj.shift(5, freq="B")
tm.assert_equal(shifted, shifted2)
unshifted = obj.shift(0, freq=offset)
tm.assert_equal(unshifted, obj)
d = obj.index[0]
shifted_d = d + offset * 5
if frame_or_series is DataFrame:
tm.assert_series_equal(obj.xs(d), shifted.xs(shifted_d), check_names=False)
else:
tm.assert_almost_equal(obj.at[d], shifted.at[shifted_d])
def test_shift_with_periodindex(self, frame_or_series):
# Shifting with PeriodIndex
ps = DataFrame(
np.arange(4, dtype=float), index=pd.period_range("2020-01-01", periods=4)
)
ps = tm.get_obj(ps, frame_or_series)
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, ps.index)
tm.assert_index_equal(unshifted.index, ps.index)
if frame_or_series is DataFrame:
tm.assert_numpy_array_equal(
unshifted.iloc[:, 0].dropna().values, ps.iloc[:-1, 0].values
)
else:
tm.assert_numpy_array_equal(unshifted.dropna().values, ps.values[:-1])
shifted2 = ps.shift(1, "D")
shifted3 = ps.shift(1, offsets.Day())
tm.assert_equal(shifted2, shifted3)
tm.assert_equal(ps, shifted2.shift(-1, "D"))
msg = "does not match PeriodIndex freq"
with pytest.raises(ValueError, match=msg):
ps.shift(freq="W")
# legacy support
shifted4 = ps.shift(1, freq="D")
tm.assert_equal(shifted2, shifted4)
shifted5 = ps.shift(1, freq=offsets.Day())
tm.assert_equal(shifted5, shifted4)
def test_shift_other_axis(self):
# shift other axis
# GH#6371
df = DataFrame(np.random.default_rng(2).random((10, 5)))
expected = pd.concat(
[DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]],
ignore_index=True,
axis=1,
)
result = df.shift(1, axis=1)
tm.assert_frame_equal(result, expected)
def test_shift_named_axis(self):
# shift named axis
df = DataFrame(np.random.default_rng(2).random((10, 5)))
expected = pd.concat(
[DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]],
ignore_index=True,
axis=1,
)
result = df.shift(1, axis="columns")
tm.assert_frame_equal(result, expected)
def test_shift_other_axis_with_freq(self, datetime_frame):
obj = datetime_frame.T
offset = offsets.BDay()
# GH#47039
shifted = obj.shift(5, freq=offset, axis=1)
assert len(shifted) == len(obj)
unshifted = shifted.shift(-5, freq=offset, axis=1)
tm.assert_equal(unshifted, obj)
def test_shift_bool(self):
df = DataFrame({"high": [True, False], "low": [False, False]})
rs = df.shift(1)
xp = DataFrame(
np.array([[np.nan, np.nan], [True, False]], dtype=object),
columns=["high", "low"],
)
tm.assert_frame_equal(rs, xp)
def test_shift_categorical1(self, frame_or_series):
# GH#9416
obj = frame_or_series(["a", "b", "c", "d"], dtype="category")
rt = obj.shift(1).shift(-1)
tm.assert_equal(obj.iloc[:-1], rt.dropna())
def get_cat_values(ndframe):
# For Series we could just do ._values; for DataFrame
# we may be able to do this if we ever have 2D Categoricals
return ndframe._mgr.blocks[0].values
cat = get_cat_values(obj)
sp1 = obj.shift(1)
tm.assert_index_equal(obj.index, sp1.index)
assert np.all(get_cat_values(sp1).codes[:1] == -1)
assert np.all(cat.codes[:-1] == get_cat_values(sp1).codes[1:])
sn2 = obj.shift(-2)
tm.assert_index_equal(obj.index, sn2.index)
assert np.all(get_cat_values(sn2).codes[-2:] == -1)
assert np.all(cat.codes[2:] == get_cat_values(sn2).codes[:-2])
tm.assert_index_equal(cat.categories, get_cat_values(sp1).categories)
tm.assert_index_equal(cat.categories, get_cat_values(sn2).categories)
def test_shift_categorical(self):
# GH#9416
s1 = Series(["a", "b", "c"], dtype="category")
s2 = Series(["A", "B", "C"], dtype="category")
df = DataFrame({"one": s1, "two": s2})
rs = df.shift(1)
xp = DataFrame({"one": s1.shift(1), "two": s2.shift(1)})
tm.assert_frame_equal(rs, xp)
def test_shift_categorical_fill_value(self, frame_or_series):
ts = frame_or_series(["a", "b", "c", "d"], dtype="category")
res = ts.shift(1, fill_value="a")
expected = frame_or_series(
pd.Categorical(
["a", "a", "b", "c"], categories=["a", "b", "c", "d"], ordered=False
)
)
tm.assert_equal(res, expected)
# check for incorrect fill_value
msg = r"Cannot setitem on a Categorical with a new category \(f\)"
with pytest.raises(TypeError, match=msg):
ts.shift(1, fill_value="f")
def test_shift_fill_value(self, frame_or_series):
# GH#24128
dti = date_range("1/1/2000", periods=5, freq="h")
ts = frame_or_series([1.0, 2.0, 3.0, 4.0, 5.0], index=dti)
exp = frame_or_series([0.0, 1.0, 2.0, 3.0, 4.0], index=dti)
# check that fill value works
result = ts.shift(1, fill_value=0.0)
tm.assert_equal(result, exp)
exp = frame_or_series([0.0, 0.0, 1.0, 2.0, 3.0], index=dti)
result = ts.shift(2, fill_value=0.0)
tm.assert_equal(result, exp)
ts = frame_or_series([1, 2, 3])
res = ts.shift(2, fill_value=0)
assert tm.get_dtype(res) == tm.get_dtype(ts)
# retain integer dtype
obj = frame_or_series([1, 2, 3, 4, 5], index=dti)
exp = frame_or_series([0, 1, 2, 3, 4], index=dti)
result = obj.shift(1, fill_value=0)
tm.assert_equal(result, exp)
exp = frame_or_series([0, 0, 1, 2, 3], index=dti)
result = obj.shift(2, fill_value=0)
tm.assert_equal(result, exp)
def test_shift_empty(self):
# Regression test for GH#8019
df = DataFrame({"foo": []})
rs = df.shift(-1)
tm.assert_frame_equal(df, rs)
def test_shift_duplicate_columns(self):
# GH#9092; verify that position-based shifting works
# in the presence of duplicate columns
column_lists = [list(range(5)), [1] * 5, [1, 1, 2, 2, 1]]
data = np.random.default_rng(2).standard_normal((20, 5))
shifted = []
for columns in column_lists:
df = DataFrame(data.copy(), columns=columns)
for s in range(5):
df.iloc[:, s] = df.iloc[:, s].shift(s + 1)
df.columns = range(5)
shifted.append(df)
# sanity check the base case
nulls = shifted[0].isna().sum()
tm.assert_series_equal(nulls, Series(range(1, 6), dtype="int64"))
# check all answers are the same
tm.assert_frame_equal(shifted[0], shifted[1])
tm.assert_frame_equal(shifted[0], shifted[2])
def test_shift_axis1_multiple_blocks(self):
# GH#35488
df1 = DataFrame(np.random.default_rng(2).integers(1000, size=(5, 3)))
df2 = DataFrame(np.random.default_rng(2).integers(1000, size=(5, 2)))
df3 = pd.concat([df1, df2], axis=1)
assert len(df3._mgr.blocks) == 2
result = df3.shift(2, axis=1)
expected = df3.take([-1, -1, 0, 1, 2], axis=1)
# Explicit cast to float to avoid implicit cast when setting nan.
# Column names aren't unique, so directly calling `expected.astype` won't work.
expected = expected.pipe(
lambda df: df.set_axis(range(df.shape[1]), axis=1)
.astype({0: "float", 1: "float"})
.set_axis(df.columns, axis=1)
)
expected.iloc[:, :2] = np.nan
expected.columns = df3.columns
tm.assert_frame_equal(result, expected)
# Case with periods < 0
# rebuild df3 because `take` call above consolidated
df3 = pd.concat([df1, df2], axis=1)
assert len(df3._mgr.blocks) == 2
result = df3.shift(-2, axis=1)
expected = df3.take([2, 3, 4, -1, -1], axis=1)
# Explicit cast to float to avoid implicit cast when setting nan.
# Column names aren't unique, so directly calling `expected.astype` won't work.
expected = expected.pipe(
lambda df: df.set_axis(range(df.shape[1]), axis=1)
.astype({3: "float", 4: "float"})
.set_axis(df.columns, axis=1)
)
expected.iloc[:, -2:] = np.nan
expected.columns = df3.columns
tm.assert_frame_equal(result, expected)
def test_shift_axis1_multiple_blocks_with_int_fill(self):
# GH#42719
rng = np.random.default_rng(2)
df1 = DataFrame(rng.integers(1000, size=(5, 3), dtype=int))
df2 = DataFrame(rng.integers(1000, size=(5, 2), dtype=int))
df3 = pd.concat([df1.iloc[:4, 1:3], df2.iloc[:4, :]], axis=1)
result = df3.shift(2, axis=1, fill_value=np.int_(0))
assert len(df3._mgr.blocks) == 2
expected = df3.take([-1, -1, 0, 1], axis=1)
expected.iloc[:, :2] = np.int_(0)
expected.columns = df3.columns
tm.assert_frame_equal(result, expected)
# Case with periods < 0
df3 = pd.concat([df1.iloc[:4, 1:3], df2.iloc[:4, :]], axis=1)
result = df3.shift(-2, axis=1, fill_value=np.int_(0))
assert len(df3._mgr.blocks) == 2
expected = df3.take([2, 3, -1, -1], axis=1)
expected.iloc[:, -2:] = np.int_(0)
expected.columns = df3.columns
tm.assert_frame_equal(result, expected)
def test_period_index_frame_shift_with_freq(self, frame_or_series):
ps = DataFrame(range(4), index=pd.period_range("2020-01-01", periods=4))
ps = tm.get_obj(ps, frame_or_series)
shifted = ps.shift(1, freq="infer")
unshifted = shifted.shift(-1, freq="infer")
tm.assert_equal(unshifted, ps)
shifted2 = ps.shift(freq="D")
tm.assert_equal(shifted, shifted2)
shifted3 = ps.shift(freq=offsets.Day())
tm.assert_equal(shifted, shifted3)
def test_datetime_frame_shift_with_freq(self, datetime_frame, frame_or_series):
dtobj = tm.get_obj(datetime_frame, frame_or_series)
shifted = dtobj.shift(1, freq="infer")
unshifted = shifted.shift(-1, freq="infer")
tm.assert_equal(dtobj, unshifted)
shifted2 = dtobj.shift(freq=dtobj.index.freq)
tm.assert_equal(shifted, shifted2)
inferred_ts = DataFrame(
datetime_frame.values,
Index(np.asarray(datetime_frame.index)),
columns=datetime_frame.columns,
)
inferred_ts = tm.get_obj(inferred_ts, frame_or_series)
shifted = inferred_ts.shift(1, freq="infer")
expected = dtobj.shift(1, freq="infer")
expected.index = expected.index._with_freq(None)
tm.assert_equal(shifted, expected)
unshifted = shifted.shift(-1, freq="infer")
tm.assert_equal(unshifted, inferred_ts)
def test_period_index_frame_shift_with_freq_error(self, frame_or_series):
ps = DataFrame(range(4), index=pd.period_range("2020-01-01", periods=4))
ps = tm.get_obj(ps, frame_or_series)
msg = "Given freq M does not match PeriodIndex freq D"
with pytest.raises(ValueError, match=msg):
ps.shift(freq="M")
def test_datetime_frame_shift_with_freq_error(
self, datetime_frame, frame_or_series
):
dtobj = tm.get_obj(datetime_frame, frame_or_series)
no_freq = dtobj.iloc[[0, 5, 7]]
msg = "Freq was not set in the index hence cannot be inferred"
with pytest.raises(ValueError, match=msg):
no_freq.shift(freq="infer")
def test_shift_dt64values_int_fill_deprecated(self):
# GH#31971
ser = Series([pd.Timestamp("2020-01-01"), pd.Timestamp("2020-01-02")])
with pytest.raises(TypeError, match="value should be a"):
ser.shift(1, fill_value=0)
df = ser.to_frame()
with pytest.raises(TypeError, match="value should be a"):
df.shift(1, fill_value=0)
# axis = 1
df2 = DataFrame({"A": ser, "B": ser})
df2._consolidate_inplace()
result = df2.shift(1, axis=1, fill_value=0)
expected = DataFrame({"A": [0, 0], "B": df2["A"]})
tm.assert_frame_equal(result, expected)
# same thing but not consolidated; pre-2.0 we got different behavior
df3 = DataFrame({"A": ser})
df3["B"] = ser
assert len(df3._mgr.blocks) == 2
result = df3.shift(1, axis=1, fill_value=0)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"as_cat",
[
pytest.param(
True,
marks=pytest.mark.xfail(
reason="_can_hold_element incorrectly always returns True"
),
),
False,
],
)
@pytest.mark.parametrize(
"vals",
[
date_range("2020-01-01", periods=2),
date_range("2020-01-01", periods=2, tz="US/Pacific"),
pd.period_range("2020-01-01", periods=2, freq="D"),
pd.timedelta_range("2020 Days", periods=2, freq="D"),
pd.interval_range(0, 3, periods=2),
pytest.param(
pd.array([1, 2], dtype="Int64"),
marks=pytest.mark.xfail(
reason="_can_hold_element incorrectly always returns True"
),
),
pytest.param(
pd.array([1, 2], dtype="Float32"),
marks=pytest.mark.xfail(
reason="_can_hold_element incorrectly always returns True"
),
),
],
ids=lambda x: str(x.dtype),
)
def test_shift_dt64values_axis1_invalid_fill(self, vals, as_cat):
# GH#44564
ser = Series(vals)
if as_cat:
ser = ser.astype("category")
df = DataFrame({"A": ser})
result = df.shift(-1, axis=1, fill_value="foo")
expected = DataFrame({"A": ["foo", "foo"]})
tm.assert_frame_equal(result, expected)
# same thing but multiple blocks
df2 = DataFrame({"A": ser, "B": ser})
df2._consolidate_inplace()
result = df2.shift(-1, axis=1, fill_value="foo")
expected = DataFrame({"A": df2["B"], "B": ["foo", "foo"]})
tm.assert_frame_equal(result, expected)
# same thing but not consolidated
df3 = DataFrame({"A": ser})
df3["B"] = ser
assert len(df3._mgr.blocks) == 2
result = df3.shift(-1, axis=1, fill_value="foo")
tm.assert_frame_equal(result, expected)
def test_shift_axis1_categorical_columns(self):
# GH#38434
ci = CategoricalIndex(["a", "b", "c"])
df = DataFrame(
{"a": [1, 3], "b": [2, 4], "c": [5, 6]}, index=ci[:-1], columns=ci
)
result = df.shift(axis=1)
expected = DataFrame(
{"a": [np.nan, np.nan], "b": [1, 3], "c": [2, 4]}, index=ci[:-1], columns=ci
)
tm.assert_frame_equal(result, expected)
# periods != 1
result = df.shift(2, axis=1)
expected = DataFrame(
{"a": [np.nan, np.nan], "b": [np.nan, np.nan], "c": [1, 3]},
index=ci[:-1],
columns=ci,
)
tm.assert_frame_equal(result, expected)
def test_shift_axis1_many_periods(self):
# GH#44978 periods > len(columns)
df = DataFrame(np.random.default_rng(2).random((5, 3)))
shifted = df.shift(6, axis=1, fill_value=None)
expected = df * np.nan
tm.assert_frame_equal(shifted, expected)
shifted2 = df.shift(-6, axis=1, fill_value=None)
tm.assert_frame_equal(shifted2, expected)
def test_shift_with_offsets_freq(self):
df = DataFrame({"x": [1, 2, 3]}, index=date_range("2000", periods=3))
shifted = df.shift(freq="1MS")
expected = DataFrame(
{"x": [1, 2, 3]},
index=date_range(start="02/01/2000", end="02/01/2000", periods=3),
)
tm.assert_frame_equal(shifted, expected)
def test_shift_with_iterable_basic_functionality(self):
# GH#44424
data = {"a": [1, 2, 3], "b": [4, 5, 6]}
shifts = [0, 1, 2]
df = DataFrame(data)
shifted = df.shift(shifts)
expected = DataFrame(
{
"a_0": [1, 2, 3],
"b_0": [4, 5, 6],
"a_1": [np.nan, 1.0, 2.0],
"b_1": [np.nan, 4.0, 5.0],
"a_2": [np.nan, np.nan, 1.0],
"b_2": [np.nan, np.nan, 4.0],
}
)
tm.assert_frame_equal(expected, shifted)
def test_shift_with_iterable_series(self):
# GH#44424
data = {"a": [1, 2, 3]}
shifts = [0, 1, 2]
df = DataFrame(data)
s = df["a"]
tm.assert_frame_equal(s.shift(shifts), df.shift(shifts))
def test_shift_with_iterable_freq_and_fill_value(self):
# GH#44424
df = DataFrame(
np.random.default_rng(2).standard_normal(5),
index=date_range("1/1/2000", periods=5, freq="h"),
)
tm.assert_frame_equal(
# rename because shift with an iterable leads to str column names
df.shift([1], fill_value=1).rename(columns=lambda x: int(x[0])),
df.shift(1, fill_value=1),
)
tm.assert_frame_equal(
df.shift([1], freq="h").rename(columns=lambda x: int(x[0])),
df.shift(1, freq="h"),
)
def test_shift_with_iterable_check_other_arguments(self):
# GH#44424
data = {"a": [1, 2], "b": [4, 5]}
shifts = [0, 1]
df = DataFrame(data)
# test suffix
shifted = df[["a"]].shift(shifts, suffix="_suffix")
expected = DataFrame({"a_suffix_0": [1, 2], "a_suffix_1": [np.nan, 1.0]})
tm.assert_frame_equal(shifted, expected)
# check bad inputs when doing multiple shifts
msg = "If `periods` contains multiple shifts, `axis` cannot be 1."
with pytest.raises(ValueError, match=msg):
df.shift(shifts, axis=1)
msg = "Periods must be integer, but s is <class 'str'>."
with pytest.raises(TypeError, match=msg):
df.shift(["s"])
msg = "If `periods` is an iterable, it cannot be empty."
with pytest.raises(ValueError, match=msg):
df.shift([])
msg = "Cannot specify `suffix` if `periods` is an int."
with pytest.raises(ValueError, match=msg):
df.shift(1, suffix="fails")
def test_shift_axis_one_empty(self):
# GH#57301
df = DataFrame()
result = df.shift(1, axis=1)
tm.assert_frame_equal(result, df)
def test_shift_with_offsets_freq_empty(self):
# GH#60102
dates = date_range("2020-01-01", periods=3, freq="D")
offset = offsets.Day()
shifted_dates = dates + offset
df = DataFrame(index=dates)
df_shifted = DataFrame(index=shifted_dates)
result = df.shift(freq=offset)
tm.assert_frame_equal(result, df_shifted)
def test_series_shift_interval_preserves_closed(self):
# GH#60389
ser = Series(
[pd.Interval(1, 2, closed="right"), pd.Interval(2, 3, closed="right")]
)
result = ser.shift(1)
expected = Series([np.nan, pd.Interval(1, 2, closed="right")])
tm.assert_series_equal(result, expected)
def test_shift_invalid_fill_value_deprecation(self):
# GH#53802
df = DataFrame(
{
"a": [1, 2, 3],
"b": [True, False, True],
}
)
msg = "shifting with a fill value that cannot"
with tm.assert_produces_warning(Pandas4Warning, match=msg):
df.shift(1, fill_value="foo")
with tm.assert_produces_warning(Pandas4Warning, match=msg):
df["a"].shift(1, fill_value="foo")
with tm.assert_produces_warning(Pandas4Warning, match=msg):
df["b"].shift(1, fill_value="foo")
# An incompatible null value
with tm.assert_produces_warning(Pandas4Warning, match=msg):
df.shift(1, fill_value=NaT)
with tm.assert_produces_warning(Pandas4Warning, match=msg):
df["a"].shift(1, fill_value=NaT)
with tm.assert_produces_warning(Pandas4Warning, match=msg):
df["b"].shift(1, fill_value=NaT)
def test_shift_dt_index_multiple_periods_unsorted(self):
# https://github.com/pandas-dev/pandas/pull/62843
values = date_range("1/1/2000", periods=4, freq="D")
df = DataFrame({"a": [1, 2]}, index=[values[1], values[0]])
result = df.shift(periods=[1, 2], freq="D")
expected = DataFrame(
{
"a_1": [1.0, 2.0, np.nan],
"a_2": [2.0, np.nan, 1.0],
},
index=[values[2], values[1], values[3]],
)
tm.assert_frame_equal(result, expected)
| TestDataFrameShift |
python | getsentry__sentry | src/social_auth/backends/__init__.py | {
"start": 2150,
"end": 6663
} | class ____:
"""A django.contrib.auth backend that authenticates the user based on
a authentication provider response"""
name = "" # provider name, it's stored in database
supports_inactive_user = False
def authenticate(self, request, *args, **kwargs):
"""Authenticate user using social credentials
Authentication is made if this is the correct backend, backend
verification is made by kwargs inspection for current backend
name presence.
"""
# Validate backend and arguments. Require that the Social Auth
# response be passed in as a keyword argument, to make sure we
# don't match the username/password calling conventions of
# authenticate.
if not (self.name and kwargs.get(self.name) and "response" in kwargs):
return None
response = kwargs.get("response")
pipeline = PIPELINE
kwargs = kwargs.copy()
kwargs["backend"] = self
if "pipeline_index" in kwargs:
pipeline = pipeline[kwargs["pipeline_index"] :]
else:
kwargs["details"] = self.get_user_details(response)
kwargs["uid"] = self.get_user_id(kwargs["details"], response)
kwargs["is_new"] = False
out = self.pipeline(pipeline, request, *args, **kwargs)
if not isinstance(out, dict):
return out
social_user = out.get("social_user")
if social_user:
# define user.social_user attribute to track current social
# account
user = social_user.user
user.social_user = social_user
user.is_new = out.get("is_new")
return user
def pipeline(self, pipeline, request, *args, **kwargs):
"""Pipeline"""
out = kwargs.copy()
if "pipeline_index" in kwargs:
base_index = int(kwargs["pipeline_index"])
else:
base_index = 0
for idx, name in enumerate(pipeline):
out["pipeline_index"] = base_index + idx
mod_name, func_name = name.rsplit(".", 1)
mod = __import__(mod_name, {}, {}, [func_name])
func = getattr(mod, func_name)
try:
result: dict[str, Any] = {}
if func_name == "save_status_to_session":
result = func(request, *args, **out) or {}
else:
result = func(*args, **out) or {}
except StopPipeline:
# Clean partial pipeline on stop
if "request" in kwargs:
clean_partial_pipeline(kwargs["request"])
break
if isinstance(result, dict):
out.update(result)
else:
return result
# clean the partial pipeline at the end of the process
if "request" in kwargs:
clean_partial_pipeline(kwargs["request"])
return out
def extra_data(self, user, uid, response, details):
"""Return default blank user extra data"""
return {}
def get_user_id(self, details, response):
"""Must return a unique ID from values returned on details"""
raise NotImplementedError("Implement in subclass")
def get_user_details(self, response):
"""Must return user details in a know internal struct:
{'username': <username if any>,
'email': <user email if any>,
'fullname': <user full name if any>,
'first_name': <user first name if any>,
'last_name': <user last name if any>}
"""
raise NotImplementedError("Implement in subclass")
@classmethod
def tokens(cls, instance):
"""Return the tokens needed to authenticate the access to any API the
service might provide. The return value will be a dictionary with the
token type name as key and the token value.
instance must be a UserSocialAuth instance.
"""
if instance.extra_data and "access_token" in instance.extra_data:
return {"access_token": instance.extra_data["access_token"]}
else:
return {}
def get_user(self, user_id):
"""
Return user with given ID from the User model used by this backend.
This is called by django.contrib.auth.middleware.
"""
user = user_service.get_user(user_id=user_id)
if user and user.is_active:
return user
return None
| SocialAuthBackend |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/elements.py | {
"start": 141851,
"end": 144559
} | class ____(ColumnElement[_T]):
"""Represent an OVER clause.
This is a special operator against a so-called
"window" function, as well as any aggregate function,
which produces results relative to the result set
itself. Most modern SQL backends now support window functions.
"""
__visit_name__ = "over"
_traverse_internals: _TraverseInternalsType = [
("element", InternalTraversal.dp_clauseelement),
("order_by", InternalTraversal.dp_clauseelement),
("partition_by", InternalTraversal.dp_clauseelement),
("range_", InternalTraversal.dp_clauseelement),
("rows", InternalTraversal.dp_clauseelement),
("groups", InternalTraversal.dp_clauseelement),
]
order_by: Optional[ClauseList] = None
partition_by: Optional[ClauseList] = None
element: ColumnElement[_T]
"""The underlying expression object to which this :class:`.Over`
object refers."""
range_: FrameClause | None
rows: FrameClause | None
groups: FrameClause | None
def __init__(
self,
element: ColumnElement[_T],
partition_by: Optional[_ByArgument] = None,
order_by: Optional[_ByArgument] = None,
range_: _FrameIntTuple | FrameClause | None = None,
rows: _FrameIntTuple | FrameClause | None = None,
groups: _FrameIntTuple | FrameClause | None = None,
):
self.element = element
if order_by is not None:
self.order_by = ClauseList(
*util.to_list(order_by), _literal_as_text_role=roles.ByOfRole
)
if partition_by is not None:
self.partition_by = ClauseList(
*util.to_list(partition_by),
_literal_as_text_role=roles.ByOfRole,
)
if sum(item is not None for item in (range_, rows, groups)) > 1:
raise exc.ArgumentError(
"only one of 'rows', 'range_', or 'groups' may be provided"
)
else:
self.range_ = FrameClause._parse(range_, coerce_int=False)
self.rows = FrameClause._parse(rows, coerce_int=True)
self.groups = FrameClause._parse(groups, coerce_int=True)
if not TYPE_CHECKING:
@util.memoized_property
def type(self) -> TypeEngine[_T]: # noqa: A001
return self.element.type
@util.ro_non_memoized_property
def _from_objects(self) -> List[FromClause]:
return list(
itertools.chain(
*[
c._from_objects
for c in (self.element, self.partition_by, self.order_by)
if c is not None
]
)
)
| Over |
python | scrapy__scrapy | tests/test_webclient.py | {
"start": 6009,
"end": 6276
} | class ____(resource.Resource):
out_encoding = "cp1251"
def render(self, request):
body = to_unicode(request.content.read())
request.setHeader(b"content-encoding", self.out_encoding)
return body.encode(self.out_encoding)
| EncodingResource |
python | django-haystack__django-haystack | test_haystack/test_views.py | {
"start": 7218,
"end": 9192
} | class ____(TestCase):
def setUp(self):
super().setUp()
# Stow.
self.old_unified_index = connections["default"]._index
self.ui = UnifiedIndex()
self.bmmsi = BasicMockModelSearchIndex()
self.bammsi = BasicAnotherMockModelSearchIndex()
self.ui.build(indexes=[self.bmmsi, self.bammsi])
connections["default"]._index = self.ui
# Update the "index".
backend = connections["default"].get_backend()
backend.clear()
backend.update(self.bmmsi, MockModel.objects.all())
def tearDown(self):
connections["default"]._index = self.old_unified_index
super().tearDown()
def test_search_no_query(self):
response = self.client.get(reverse("haystack_faceted_search"))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["facets"], {})
def test_empty_results(self):
fsv = FacetedSearchView()
fsv.request = HttpRequest()
fsv.request.GET = QueryDict("")
fsv.form = fsv.build_form()
self.assertTrue(isinstance(fsv.get_results(), EmptySearchQuerySet))
def test_default_form(self):
fsv = FacetedSearchView()
fsv.request = HttpRequest()
fsv.request.GET = QueryDict("")
fsv.form = fsv.build_form()
self.assertTrue(isinstance(fsv.form, FacetedSearchForm))
def test_list_selected_facets(self):
fsv = FacetedSearchView()
fsv.request = HttpRequest()
fsv.request.GET = QueryDict("")
fsv.form = fsv.build_form()
self.assertEqual(fsv.form.selected_facets, [])
fsv = FacetedSearchView()
fsv.request = HttpRequest()
fsv.request.GET = QueryDict(
"selected_facets=author:daniel&selected_facets=author:chris"
)
fsv.form = fsv.build_form()
self.assertEqual(fsv.form.selected_facets, ["author:daniel", "author:chris"])
| FacetedSearchViewTestCase |
python | django__django | tests/serializers/test_yaml.py | {
"start": 5048,
"end": 5495
} | class ____(
SerializersTransactionTestBase, TransactionTestCase
):
serializer_name = "yaml"
fwd_ref_str = """- model: serializers.article
pk: 1
fields:
headline: Forward references pose no problem
pub_date: 2006-06-16 15:00:00
categories: [1]
author: 1
- model: serializers.category
pk: 1
fields:
name: Reference
- model: serializers.author
pk: 1
fields:
name: Agnes"""
| YamlSerializerTransactionTestCase |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_float.py | {
"start": 668,
"end": 3291
} | class ____(importlib.abc.MetaPathFinder):
def find_spec(self, fullname, path, target=None):
# Check if the import is the problematic one
if fullname in redirect_imports:
try:
# Attempt to import the standalone module
name = fullname.removeprefix("test.")
r = importlib.import_module(name)
# Redirect the module in sys.modules
sys.modules[fullname] = r
# Return a module spec from the found module
return importlib.util.find_spec(name)
except ImportError:
return None
return None
# Add the custom finder to sys.meta_path
sys.meta_path.insert(0, RedirectImportFinder())
# ======= END DYNAMO PATCH =======
import fractions
import operator
import os
import random
import sys
import struct
import time
import unittest
from test import support
VALID_UNDERSCORE_LITERALS = [
'0_0_0',
'4_2',
'1_0000_0000',
'0b1001_0100',
'0xffff_ffff',
'0o5_7_7',
'1_00_00.5',
'1_00_00.5e5',
'1_00_00e5_1',
'1e1_0',
'.1_4',
'.1_4e1',
'0b_0',
'0x_f',
'0o_5',
'1_00_00j',
'1_00_00.5j',
'1_00_00e5_1j',
'.1_4j',
'(1_2.5+3_3j)',
'(.5_6j)',
]
INVALID_UNDERSCORE_LITERALS = [
# Trailing underscores:
'0_',
'42_',
'1.4j_',
'0x_',
'0b1_',
'0xf_',
'0o5_',
'0 if 1_Else 1',
# Underscores in the base selector:
'0_b0',
'0_xf',
'0_o5',
# Old-style octal, still disallowed:
'0_7',
'09_99',
# Multiple consecutive underscores:
'4_______2',
'0.1__4',
'0.1__4j',
'0b1001__0100',
'0xffff__ffff',
'0x___',
'0o5__77',
'1e1__0',
'1e1__0j',
# Underscore right before a dot:
'1_.4',
'1_.4j',
# Underscore right after a dot:
'1._4',
'1._4j',
'._5',
'._5j',
# Underscore right after a sign:
'1.0e+_1',
'1.0e+_1j',
# Underscore right before j:
'1.4_j',
'1.4e5_j',
# Underscore right before e:
'1_e1',
'1.4_e1',
'1.4_e1j',
# Underscore right after e:
'1e_1',
'1.4e_1',
'1.4e_1j',
# Complex cases with parens:
'(1+1.5_j_)',
'(1+1.5_j)',
]
from math import isinf, isnan, copysign, ldexp
import math
try:
import _testcapi
except ImportError:
_testcapi = None
INF = float("inf")
NAN = float("nan")
#locate file with float format test values
test_dir = os.path.dirname(__file__) or os.curdir
format_testfile = os.path.join(test_dir, 'mathdata', 'formatfloat_testcases.txt')
| RedirectImportFinder |
python | ansible__ansible | lib/ansible/module_utils/six/__init__.py | {
"start": 7443,
"end": 12583
} | class ____(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
MovedAttribute("getoutput", "commands", "subprocess"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections", "IterableUserDict", "UserDict"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("collections_abc", "collections", "collections.abc" if sys.version_info >= (3, 3) else "collections"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("dbm_ndbm", "dbm", "dbm.ndbm"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread" if sys.version_info < (3, 9) else "_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
]
# Add windows specific modules.
if sys.platform == "win32":
_moved_attributes += [
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
| _MovedItems |
python | pytorch__pytorch | tools/experimental/torchfuzz/codegen.py | {
"start": 272,
"end": 7703
} | class ____:
def __init__(self, supported_ops, check):
self.supported_ops = supported_ops
self.check = check
def supported_dtypes(self):
"""Return list of supported dtypes for this template."""
return [
torch.float32,
torch.float64,
torch.float16,
torch.bfloat16,
torch.int8,
torch.int16,
torch.int32,
torch.int64,
torch.bool,
]
def spec_distribution(self):
"""
Define the distribution for generating random Specs.
Returns:
Dict with keys:
- 'tensor_prob': Probability of generating TensorSpec (0.0 to 1.0)
- 'scalar_prob': Probability of generating ScalarSpec (0.0 to 1.0)
- 'allow_tensors': Whether TensorSpec generation is allowed (boolean)
- 'allow_scalars': Whether ScalarSpec generation is allowed (boolean)
"""
return {
"tensor_prob": 0.8,
"scalar_prob": 0.2,
"allow_tensors": True,
"allow_scalars": True,
}
def fuzz_spec_custom(self):
"""
Generate a random Spec based on this template's distribution preferences.
Returns:
Spec: Either a TensorSpec or ScalarSpec according to template's distribution
"""
import random
from torchfuzz.tensor_fuzzer import fuzz_torch_tensor_type
# Get template's distribution configuration
distribution = self.spec_distribution()
# Get random dtype based on template
dtype = fuzz_torch_tensor_type("default")
# Validate distribution configuration
allow_tensors = distribution.get("allow_tensors", True)
allow_scalars = distribution.get("allow_scalars", True)
if not allow_tensors and not allow_scalars:
raise ValueError("Template must allow at least one of tensors or scalars")
# Determine which type to generate
if not allow_scalars:
# Only tensors allowed
return self._generate_tensor_spec(dtype)
elif not allow_tensors:
# Only scalars allowed
return self._generate_scalar_spec(dtype)
else:
# Both allowed, use probability distribution
tensor_prob = distribution.get("tensor_prob", 0.8)
if random.random() < tensor_prob:
return self._generate_tensor_spec(dtype)
else:
return self._generate_scalar_spec(dtype)
def _generate_tensor_spec(self, dtype):
"""Generate a TensorSpec with the given dtype."""
from torchfuzz.tensor_fuzzer import (
fuzz_tensor_size,
fuzz_valid_stride,
TensorSpec,
)
size = fuzz_tensor_size()
stride = fuzz_valid_stride(size)
return TensorSpec(size=size, stride=stride, dtype=dtype)
def _generate_scalar_spec(self, dtype):
"""Generate a ScalarSpec with the given dtype."""
from torchfuzz.tensor_fuzzer import ScalarSpec
return ScalarSpec(dtype=dtype)
def args_codegen(self, arg_operations):
"""Generate argument creation code for default template."""
code_lines = []
# Add sentinel tensor that ensures gradient computation
code_lines.extend(
[
"# Sentinel tensor to ensure gradient computation",
"sentinel = torch.tensor(1.0, requires_grad=True)",
"",
]
)
if arg_operations:
for i, (node_id, spec) in enumerate(arg_operations):
arg_name = f"arg_{i}"
if isinstance(spec, ScalarSpec):
dtype_str = f"torch.{spec.dtype}".replace("torch.torch.", "torch.")
if spec.dtype in [
torch.int8,
torch.int16,
torch.int32,
torch.int64,
]:
# For integer scalars, use randint to avoid always getting 0
code_lines.append(
f"{arg_name} = int(torch.randint(5, 30, ()).item())"
)
elif spec.dtype == torch.bool:
# For boolean scalars, use randint and cast to bool
code_lines.append(
f"{arg_name} = bool(torch.randint(0, 2, ()).item())"
)
else:
# For float scalars, use randn
code_lines.append(
f"{arg_name} = float(torch.randn((), dtype={dtype_str}).item())"
)
elif isinstance(spec, TensorSpec):
size_str = str(spec.size)
dtype_str = f"torch.{spec.dtype}".replace("torch.torch.", "torch.")
# Calculate storage size needed for the strided tensor
if spec.size:
# Calculate the maximum index that will be accessed
max_offset = 0
for dim_size, stride in zip(spec.size, spec.stride):
if dim_size > 1:
max_offset += (dim_size - 1) * abs(stride)
storage_size = max_offset + 1
else:
storage_size = 1
stride_str = str(spec.stride)
# Special handling for integer tensors which might be used as indices
if spec.dtype in [
torch.int8,
torch.int16,
torch.int32,
torch.int64,
]:
# For integer tensors, generate valid indices with headroom for arithmetic
# Use smaller range [5, 30] to allow for multiplication and other operations
# This prevents indices from becoming too large after arithmetic
min_val = (
5 # Minimum to avoid negative results after subtraction
)
max_val = (
30 # Maximum to avoid out-of-bounds after multiplication
)
code_lines.append(
f"{arg_name} = torch.as_strided(torch.randint({min_val}, {max_val}, ({storage_size},)).to({dtype_str}), {size_str}, {stride_str})"
)
elif spec.dtype == torch.bool:
# For boolean tensors, use randint to generate True/False values
# Using randn().to(bool) would yield almost all True due to non-zero floats
code_lines.append(
f"{arg_name} = torch.as_strided(torch.randint(0, 2, ({storage_size},), dtype=torch.int8).bool(), {size_str}, {stride_str})"
)
else:
code_lines.append(
f"{arg_name} = torch.as_strided(torch.randn({storage_size}).to({dtype_str}), {size_str}, {stride_str})"
)
return code_lines
| FuzzTemplate |
python | tornadoweb__tornado | tornado/test/websocket_test.py | {
"start": 32037,
"end": 32879
} | class ____(WebSocketBaseTestCase):
def get_app(self):
return Application([("/", EchoHandler)], websocket_max_message_size=1024)
@gen_test
def test_large_message(self):
ws = yield self.ws_connect("/")
# Write a message that is allowed.
msg = "a" * 1024
ws.write_message(msg)
resp = yield ws.read_message()
self.assertEqual(resp, msg)
# Write a message that is too large.
ws.write_message(msg + "b")
resp = yield ws.read_message()
# A message of None means the other side closed the connection.
self.assertIs(resp, None)
self.assertEqual(ws.close_code, 1009)
self.assertEqual(ws.close_reason, "message too big")
# TODO: Needs tests of messages split over multiple
# continuation frames.
| MaxMessageSizeTest |
python | numba__numba | numba/core/typed_passes.py | {
"start": 21176,
"end": 22269
} | class ____(LoweringPass):
_name = "nopython_backend"
def __init__(self):
LoweringPass.__init__(self)
def run_pass(self, state):
"""
Back-end: Generate LLVM IR from Numba IR, compile to machine code
"""
lowered = state['cr']
signature = typing.signature(state.return_type, *state.args)
from numba.core.compiler import compile_result
state.cr = compile_result(
typing_context=state.typingctx,
target_context=state.targetctx,
entry_point=lowered.cfunc,
typing_error=state.status.fail_reason,
type_annotation=state.type_annotation,
library=state.library,
call_helper=lowered.call_helper,
signature=signature,
objectmode=False,
lifted=state.lifted,
fndesc=lowered.fndesc,
environment=lowered.env,
metadata=state.metadata,
reload_init=state.reload_init,
)
return True
@register_pass(mutates_CFG=True, analysis_only=False)
| NoPythonBackend |
python | getsentry__sentry | src/sentry/auth/providers/saml2/forms.py | {
"start": 1801,
"end": 1946
} | class ____(forms.Form):
metadata_xml = forms.CharField(label="Metadata XML", widget=forms.Textarea)
processor = process_xml
| XMLMetadataForm |
python | wandb__wandb | wandb/sdk/launch/inputs/internal.py | {
"start": 706,
"end": 1586
} | class ____:
"""Singleton for managing temporary directories for configuration files.
Any configuration files designated as inputs to a launch job are copied to
a temporary directory. This singleton manages the temporary directory and
provides paths to the configuration files.
"""
_instance = None
def __new__(cls):
if cls._instance is None:
cls._instance = object.__new__(cls)
return cls._instance
def __init__(self):
if not hasattr(self, "_tmp_dir"):
self._tmp_dir = tempfile.mkdtemp()
self._configs_dir = os.path.join(self._tmp_dir, LAUNCH_MANAGED_CONFIGS_DIR)
os.mkdir(self._configs_dir)
@property
def tmp_dir(self):
return pathlib.Path(self._tmp_dir)
@property
def configs_dir(self):
return pathlib.Path(self._configs_dir)
| ConfigTmpDir |
python | sphinx-doc__sphinx | tests/test_ext_napoleon/test_ext_napoleon.py | {
"start": 1982,
"end": 2590
} | class ____:
def test_modify_in_place(self) -> None:
lines = [
'Summary line.',
'',
'Args:',
' arg1: arg1 description',
]
app = mock.Mock()
app.config = Config()
_process_docstring(
app,
'class',
'SampleClass',
SampleClass,
mock.Mock(),
lines,
)
expected = [
'Summary line.',
'',
':param arg1: arg1 description',
'',
]
assert lines == expected
| TestProcessDocstring |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/nn_ops/pool_test.py | {
"start": 4728,
"end": 14559
} | class ____(test.TestCase):
def _test(self, input_shape, **kwargs):
# Use negative numbers to make sure there isn't any zero padding getting
# used.
x = -np.arange(
np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1
y1 = pool_direct(input=x, **kwargs)
y2 = nn_ops.pool(input=x, **kwargs)
self.assertAllClose(y1, self.evaluate(y2), rtol=1e-2, atol=1e-2)
def testPoolSimple(self):
with self.session(use_gpu=test.is_gpu_available()):
for padding in ["SAME", "VALID"]:
for pooling_type in ["MAX", "AVG"]:
self._test(
input_shape=[1, 1, 10, 1],
window_shape=[1, 3],
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1, 1],
strides=[1, 2])
def testPool1D(self):
with self.session(use_gpu=test.is_gpu_available()):
for padding in ["SAME", "VALID"]:
for pooling_type in ["MAX", "AVG"]:
for input_shape in [[2, 9, 2], [2, 10, 2]]:
for window_shape in [[1], [2], [3]]:
if padding != "SAME":
for dilation_rate in [[1], [2], [3]]:
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=dilation_rate,
strides=[1])
for strides in [[1], [2], [3]]:
if np.any(np.array(strides) > window_shape):
continue
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1],
strides=strides)
def testPool2D(self):
with self.session(use_gpu=test.is_gpu_available()):
for padding in ["SAME", "VALID"]:
for pooling_type in ["MAX", "AVG"]:
for input_shape in [[2, 9, 10, 2], [2, 10, 9, 2]]:
for window_shape in [[1, 1], [2, 1], [2, 3]]:
if padding != "SAME":
for dilation_rate in [[1, 1], [2, 1], [1, 2], [2, 3]]:
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=dilation_rate,
strides=[1, 1])
for strides in [[1, 1], [2, 1], [1, 2], [2, 3]]:
if np.any(np.array(strides) > window_shape):
continue
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1, 1],
strides=strides)
def testPool3D(self):
with self.session(use_gpu=test.is_gpu_available()):
for padding in ["SAME", "VALID"]:
for pooling_type in ["MAX", "AVG"]:
for input_shape in [[2, 9, 10, 11, 2], [2, 10, 9, 11, 2]]:
for window_shape in [[1, 1, 1], [2, 1, 2], [2, 3, 2]]:
if padding != "SAME":
for dilation_rate in [[1, 1, 1], [2, 1, 2], [1, 2, 2],
[2, 3, 3]]:
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=dilation_rate,
strides=[1, 1, 1])
for strides in [[1, 1, 1], [2, 1, 2], [1, 2, 2], [2, 3, 3]]:
if np.any(np.array(strides) > window_shape):
continue
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1, 1, 1],
strides=strides)
def testPoolNC(self):
if test.is_gpu_available(cuda_only=True):
# "NC*" format is currently only supported on CUDA.
with self.session():
for padding in ["SAME", "VALID"]:
self._test(
input_shape=[2, 2, 9],
window_shape=[2],
padding=padding,
pooling_type="MAX",
strides=[1],
dilation_rate=[1],
data_format="NCW")
self._test(
input_shape=[2, 2, 9],
window_shape=[2],
padding=padding,
pooling_type="MAX",
strides=[2],
dilation_rate=[1],
data_format="NCW")
self._test(
input_shape=[2, 2, 7, 9],
window_shape=[2, 2],
padding=padding,
pooling_type="MAX",
strides=[1, 2],
dilation_rate=[1, 1],
data_format="NCHW")
self._test(
input_shape=[2, 2, 7, 5, 3],
window_shape=[2, 2, 2],
padding=padding,
pooling_type="MAX",
strides=[1, 2, 1],
dilation_rate=[1, 1, 1],
data_format="NCDHW")
self._test(
input_shape=[2, 2, 7, 9],
window_shape=[2, 2],
padding="VALID",
pooling_type="MAX",
strides=[1, 1],
dilation_rate=[2, 2],
data_format="NCHW")
def _test_gradient(self, input_shape, **kwargs):
x_val = -np.arange(
np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1
x = constant_op.constant(x_val, name="x", dtype=dtypes.float32)
output = nn_ops.pool(input=x, **kwargs)
y_shape = output.get_shape().as_list()
err = gradient_checker.compute_gradient_error([x], [input_shape],
output,
y_shape,
x_init_value=[x_val])
err_tolerance = 1e-2
self.assertLess(err, err_tolerance)
@test_util.run_deprecated_v1
def testGradient1D(self):
with self.session(use_gpu=test.is_gpu_available()):
for padding in ["SAME", "VALID"]:
for pooling_type in ["AVG", "MAX"]:
for input_shape in [[2, 5, 2], [1, 4, 1]]:
for window_shape in [[1], [2]]:
if padding != "SAME":
for dilation_rate in [[1], [2]]:
self._test_gradient(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=dilation_rate,
strides=[1])
for strides in [[1], [2]]:
if np.any(np.array(strides) > window_shape):
continue
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1],
strides=strides)
@test_util.run_deprecated_v1
def testGradient2D(self):
with self.session(use_gpu=test.is_gpu_available()):
for padding in ["SAME", "VALID"]:
for pooling_type in ["AVG", "MAX"]:
for input_shape in [[2, 4, 5, 2], [1, 5, 4, 1]]:
for window_shape in [[1, 1], [2, 1], [2, 2]]:
if padding != "SAME":
for dilation_rate in [[1, 1], [2, 1], [2, 2]]:
self._test_gradient(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=dilation_rate,
strides=[1, 1])
for strides in [[1, 1], [2, 1], [1, 2], [2, 2]]:
if np.any(np.array(strides) > window_shape):
continue
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1, 1],
strides=strides)
@test_util.run_deprecated_v1
def testGradient3D(self):
with self.session(use_gpu=test.is_gpu_available()):
for padding in ["SAME", "VALID"]:
for pooling_type in ["AVG", "MAX"]:
for input_shape in [[1, 3, 5, 4, 1], [1, 5, 4, 3, 1]]:
for window_shape in [[1, 1, 1], [2, 1, 2], [2, 2, 2]]:
if padding != "SAME":
for dilation_rate in [[1, 1, 1], [2, 1, 2], [2, 2, 2]]:
self._test_gradient(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=dilation_rate,
strides=[1, 1, 1])
for strides in [[1, 1, 1], [2, 1, 2], [2, 2, 2]]:
if np.any(np.array(strides) > window_shape):
continue
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1, 1, 1],
strides=strides)
if __name__ == "__main__":
test.main()
| PoolingTest |
python | apache__airflow | devel-common/src/sphinx_exts/substitution_extensions.py | {
"start": 1935,
"end": 4210
} | class ____(SphinxTransform):
"""Substitute ``|variables|`` in code and code-block nodes"""
# Run before we highlight the code!
default_priority = HighlightLanguageTransform.default_priority - 1
def apply(self, **kwargs: Any) -> None:
def condition(node):
return isinstance(node, (nodes.literal_block, nodes.literal))
for node in self.document.traverse(condition):
# Guard: Only process Element nodes with a truthy substitution option
if not (isinstance(node, nodes.Element) and node.attributes.get(_SUBSTITUTION_OPTION_NAME)):
continue
# Some nodes don't have a direct document property, so walk up until we find it
document = node.document
parent = node.parent
while document is None:
parent = parent.parent
document = parent.document
substitution_defs = document.substitution_defs
for child in node.children:
old_child = child
# Only substitute for Text nodes
if isinstance(child, nodes.Text):
new_text = str(child)
for name, value in substitution_defs.items():
replacement = value.astext()
new_text = new_text.replace(f"|{name}|", replacement)
# Only replace if the text actually changed
if new_text != str(child):
child_new = nodes.Text(new_text)
node.replace(old_child, child_new)
# For non-Text nodes, do not replace
# The highlighter checks this -- without this, it will refuse to apply highlighting
node.rawsource = node.astext()
def substitution_code_role(*args, **kwargs) -> tuple[list, list[Any]]:
"""Decorate an inline code so that SubstitutionCodeBlockTransform will notice it"""
[node], system_messages = code_role(*args, **kwargs)
node[_SUBSTITUTION_OPTION_NAME] = True # type: ignore[index]
return [node], system_messages
substitution_code_role.options = { # type: ignore
"class": directives.class_option,
"language": directives.unchanged,
}
| SubstitutionCodeBlockTransform |
python | boto__boto3 | tests/unit/docs/test_utils.py | {
"start": 691,
"end": 1760
} | class ____(unittest.TestCase):
def test_target_is_single_resource(self):
param = Parameter('InstanceId', 'response')
ignore_params = get_resource_ignore_params([param])
assert ignore_params == ['InstanceId']
def test_target_is_multiple_resources(self):
param = Parameter('InstanceIds[]', 'response')
ignore_params = get_resource_ignore_params([param])
assert ignore_params == ['InstanceIds']
def test_target_is_element_of_multiple_resources(self):
param = Parameter('InstanceIds[0]', 'response')
ignore_params = get_resource_ignore_params([param])
assert ignore_params == ['InstanceIds']
def test_target_is_nested_param(self):
param = Parameter('Filters[0].Name', 'response')
ignore_params = get_resource_ignore_params([param])
assert ignore_params == ['Filters']
param = Parameter('Filters[0].Values[0]', 'response')
ignore_params = get_resource_ignore_params([param])
assert ignore_params == ['Filters']
| TestGetResourceIgnoreParams |
python | python-jsonschema__jsonschema | jsonschema/tests/test_validators.py | {
"start": 53570,
"end": 55432
} | class ____:
# TODO: These all belong upstream
def test_invalid_properties(self):
with self.assertRaises(exceptions.SchemaError):
self.Validator.check_schema({"properties": 12})
def test_minItems_invalid_string(self):
with self.assertRaises(exceptions.SchemaError):
# needs to be an integer
self.Validator.check_schema({"minItems": "1"})
def test_enum_allows_empty_arrays(self):
"""
Technically, all the spec says is they SHOULD have elements, not MUST.
(As of Draft 6. Previous drafts do say MUST).
See #529.
"""
if self.Validator in {
validators.Draft3Validator,
validators.Draft4Validator,
}:
with self.assertRaises(exceptions.SchemaError):
self.Validator.check_schema({"enum": []})
else:
self.Validator.check_schema({"enum": []})
def test_enum_allows_non_unique_items(self):
"""
Technically, all the spec says is they SHOULD be unique, not MUST.
(As of Draft 6. Previous drafts do say MUST).
See #529.
"""
if self.Validator in {
validators.Draft3Validator,
validators.Draft4Validator,
}:
with self.assertRaises(exceptions.SchemaError):
self.Validator.check_schema({"enum": [12, 12]})
else:
self.Validator.check_schema({"enum": [12, 12]})
def test_schema_with_invalid_regex(self):
with self.assertRaises(exceptions.SchemaError):
self.Validator.check_schema({"pattern": "*notaregex"})
def test_schema_with_invalid_regex_with_disabled_format_validation(self):
self.Validator.check_schema(
{"pattern": "*notaregex"},
format_checker=None,
)
| MetaSchemaTestsMixin |
python | spack__spack | lib/spack/spack/vendor/jinja2/ext.py | {
"start": 21881,
"end": 22235
} | class ____(Extension):
def __init__(self, environment: Environment) -> None:
super().__init__(environment)
warnings.warn(
"The 'autoescape' extension is deprecated and will be"
" removed in Jinja 3.1. This is built in now.",
DeprecationWarning,
stacklevel=3,
)
| AutoEscapeExtension |
python | marshmallow-code__marshmallow | src/marshmallow/schema.py | {
"start": 8072,
"end": 49955
} | class ____(metaclass=SchemaMeta):
"""Base schema class with which to define schemas.
Example usage:
.. code-block:: python
import datetime as dt
from dataclasses import dataclass
from marshmallow import Schema, fields
@dataclass
class Album:
title: str
release_date: dt.date
class AlbumSchema(Schema):
title = fields.Str()
release_date = fields.Date()
album = Album("Beggars Banquet", dt.date(1968, 12, 6))
schema = AlbumSchema()
data = schema.dump(album)
data # {'release_date': '1968-12-06', 'title': 'Beggars Banquet'}
:param only: Whitelist of the declared fields to select when
instantiating the Schema. If None, all fields are used. Nested fields
can be represented with dot delimiters.
:param exclude: Blacklist of the declared fields to exclude
when instantiating the Schema. If a field appears in both `only` and
`exclude`, it is not used. Nested fields can be represented with dot
delimiters.
:param many: Should be set to `True` if ``obj`` is a collection
so that the object will be serialized to a list.
:param load_only: Fields to skip during serialization (write-only fields)
:param dump_only: Fields to skip during deserialization (read-only fields)
:param partial: Whether to ignore missing fields and not require
any fields declared. Propagates down to ``Nested`` fields as well. If
its value is an iterable, only missing fields listed in that iterable
will be ignored. Use dot delimiters to specify nested fields.
:param unknown: Whether to exclude, include, or raise an error for unknown
fields in the data. Use `EXCLUDE`, `INCLUDE` or `RAISE`.
.. versionchanged:: 3.0.0
Remove ``prefix`` parameter.
.. versionchanged:: 4.0.0
Remove ``context`` parameter.
"""
TYPE_MAPPING: dict[type, type[Field]] = {
str: ma_fields.String,
bytes: ma_fields.String,
dt.datetime: ma_fields.DateTime,
float: ma_fields.Float,
bool: ma_fields.Boolean,
tuple: ma_fields.Raw,
list: ma_fields.Raw,
set: ma_fields.Raw,
int: ma_fields.Integer,
uuid.UUID: ma_fields.UUID,
dt.time: ma_fields.Time,
dt.date: ma_fields.Date,
dt.timedelta: ma_fields.TimeDelta,
decimal.Decimal: ma_fields.Decimal,
}
#: Overrides for default schema-level error messages
error_messages: dict[str, str] = {}
_default_error_messages: dict[str, str] = {
"type": "Invalid input type.",
"unknown": "Unknown field.",
}
OPTIONS_CLASS: type = SchemaOpts
set_class = OrderedSet
dict_class: type[dict] = dict
"""`dict` type to return when serializing."""
# These get set by SchemaMeta
opts: typing.Any
_declared_fields: dict[str, Field] = {}
_hooks: dict[str, list[tuple[str, bool, dict]]] = {}
class Meta:
"""Options object for a Schema.
Example usage: ::
from marshmallow import Schema
class MySchema(Schema):
class Meta:
fields = ("id", "email", "date_created")
exclude = ("password", "secret_attribute")
.. admonition:: A note on type checking
Type checkers will only check the attributes of the `Meta <marshmallow.Schema.Meta>`
class if you explicitly subclass `marshmallow.Schema.Meta`.
.. code-block:: python
from marshmallow import Schema
class MySchema(Schema):
# Not checked by type checkers
class Meta:
additional = True
class MySchema2(Schema):
# Type checkers will check attributes
class Meta(Schema.Opts):
additional = True # Incompatible types in assignment
.. versionremoved:: 3.0.0b7 Remove ``strict``.
.. versionadded:: 3.0.0b12 Add `unknown`.
.. versionchanged:: 3.0.0b17 Rename ``dateformat`` to `datetimeformat`.
.. versionadded:: 3.9.0 Add `timeformat`.
.. versionchanged:: 3.26.0 Deprecate ``ordered``. Field order is preserved by default.
.. versionremoved:: 4.0.0 Remove ``ordered``.
"""
fields: typing.ClassVar[tuple[str, ...] | list[str]]
"""Fields to include in the (de)serialized result"""
additional: typing.ClassVar[tuple[str, ...] | list[str]]
"""Fields to include in addition to the explicitly declared fields.
`additional <marshmallow.Schema.Meta.additional>` and `fields <marshmallow.Schema.Meta.fields>`
are mutually-exclusive options.
"""
include: typing.ClassVar[dict[str, Field]]
"""Dictionary of additional fields to include in the schema. It is
usually better to define fields as class variables, but you may need to
use this option, e.g., if your fields are Python keywords.
"""
exclude: typing.ClassVar[tuple[str, ...] | list[str]]
"""Fields to exclude in the serialized result.
Nested fields can be represented with dot delimiters.
"""
many: typing.ClassVar[bool]
"""Whether data should be (de)serialized as a collection by default."""
dateformat: typing.ClassVar[str]
"""Default format for `Date <marshmallow.fields.Date>` fields."""
datetimeformat: typing.ClassVar[str]
"""Default format for `DateTime <marshmallow.fields.DateTime>` fields."""
timeformat: typing.ClassVar[str]
"""Default format for `Time <marshmallow.fields.Time>` fields."""
# FIXME: Use a more constrained type here.
# ClassVar[RenderModule] doesn't work.
render_module: typing.Any
""" Module to use for `loads <marshmallow.Schema.loads>` and `dumps <marshmallow.Schema.dumps>`.
Defaults to `json` from the standard library.
"""
index_errors: typing.ClassVar[bool]
"""If `True`, errors dictionaries will include the index of invalid items in a collection."""
load_only: typing.ClassVar[tuple[str, ...] | list[str]]
"""Fields to exclude from serialized results"""
dump_only: typing.ClassVar[tuple[str, ...] | list[str]]
"""Fields to exclude from serialized results"""
unknown: typing.ClassVar[types.UnknownOption]
"""Whether to exclude, include, or raise an error for unknown fields in the data.
Use `EXCLUDE`, `INCLUDE` or `RAISE`.
"""
register: typing.ClassVar[bool]
"""Whether to register the `Schema <marshmallow.Schema>` with marshmallow's internal
class registry. Must be `True` if you intend to refer to this `Schema <marshmallow.Schema>`
by class name in `Nested` fields. Only set this to `False` when memory
usage is critical. Defaults to `True`.
"""
def __init__(
self,
*,
only: types.StrSequenceOrSet | None = None,
exclude: types.StrSequenceOrSet = (),
many: bool | None = None,
load_only: types.StrSequenceOrSet = (),
dump_only: types.StrSequenceOrSet = (),
partial: bool | types.StrSequenceOrSet | None = None,
unknown: types.UnknownOption | None = None,
):
# Raise error if only or exclude is passed as string, not list of strings
if only is not None and not is_collection(only):
raise StringNotCollectionError('"only" should be a list of strings')
if not is_collection(exclude):
raise StringNotCollectionError('"exclude" should be a list of strings')
# copy declared fields from metaclass
self.declared_fields = copy.deepcopy(self._declared_fields)
self.many = self.opts.many if many is None else many
self.only = only
self.exclude: set[typing.Any] | typing.MutableSet[typing.Any] = set(
self.opts.exclude
) | set(exclude)
self.load_only = set(load_only) or set(self.opts.load_only)
self.dump_only = set(dump_only) or set(self.opts.dump_only)
self.partial = partial
self.unknown: types.UnknownOption = (
self.opts.unknown if unknown is None else unknown
)
self._normalize_nested_options()
#: Dictionary mapping field_names -> :class:`Field` objects
self.fields: dict[str, Field] = {}
self.load_fields: dict[str, Field] = {}
self.dump_fields: dict[str, Field] = {}
self._init_fields()
messages = {}
messages.update(self._default_error_messages)
for cls in reversed(self.__class__.__mro__):
messages.update(getattr(cls, "error_messages", {}))
messages.update(self.error_messages or {})
self.error_messages = messages
def __repr__(self) -> str:
return f"<{self.__class__.__name__}(many={self.many})>"
@classmethod
def from_dict(
cls,
fields: dict[str, Field],
*,
name: str = "GeneratedSchema",
) -> type[Schema]:
"""Generate a `Schema <marshmallow.Schema>` class given a dictionary of fields.
.. code-block:: python
from marshmallow import Schema, fields
PersonSchema = Schema.from_dict({"name": fields.Str()})
print(PersonSchema().load({"name": "David"})) # => {'name': 'David'}
Generated schemas are not added to the class registry and therefore cannot
be referred to by name in `Nested` fields.
:param fields: Dictionary mapping field names to field instances.
:param name: Optional name for the class, which will appear in
the ``repr`` for the class.
.. versionadded:: 3.0.0
"""
Meta = type(
"GeneratedMeta", (getattr(cls, "Meta", object),), {"register": False}
)
return type(name, (cls,), {**fields.copy(), "Meta": Meta})
##### Override-able methods #####
def handle_error(
self, error: ValidationError, data: typing.Any, *, many: bool, **kwargs
):
"""Custom error handler function for the schema.
:param error: The `ValidationError` raised during (de)serialization.
:param data: The original input data.
:param many: Value of ``many`` on dump or load.
:param partial: Value of ``partial`` on load.
.. versionchanged:: 3.0.0rc9
Receives `many` and `partial` (on deserialization) as keyword arguments.
"""
def get_attribute(self, obj: typing.Any, attr: str, default: typing.Any):
"""Defines how to pull values from an object to serialize.
.. versionchanged:: 3.0.0a1
Changed position of ``obj`` and ``attr``.
"""
return get_value(obj, attr, default)
##### Serialization/Deserialization API #####
@staticmethod
def _call_and_store(getter_func, data, *, field_name, error_store, index=None):
"""Call ``getter_func`` with ``data`` as its argument, and store any `ValidationErrors`.
:param getter_func: Function for getting the serialized/deserialized
value from ``data``.
:param data: The data passed to ``getter_func``.
:param field_name: Field name.
:param index: Index of the item being validated, if validating a collection,
otherwise `None`.
"""
try:
value = getter_func(data)
except ValidationError as error:
error_store.store_error(error.messages, field_name, index=index)
# When a Nested field fails validation, the marshalled data is stored
# on the ValidationError's valid_data attribute
return error.valid_data or missing
return value
def _serialize(self, obj: typing.Any, *, many: bool = False):
"""Serialize ``obj``.
:param obj: The object(s) to serialize.
:param many: `True` if ``data`` should be serialized as a collection.
:return: A dictionary of the serialized data
"""
if many and obj is not None:
return [self._serialize(d, many=False) for d in obj]
ret = self.dict_class()
for attr_name, field_obj in self.dump_fields.items():
value = field_obj.serialize(attr_name, obj, accessor=self.get_attribute)
if value is missing:
continue
key = field_obj.data_key if field_obj.data_key is not None else attr_name
ret[key] = value
return ret
def dump(self, obj: typing.Any, *, many: bool | None = None):
"""Serialize an object to native Python data types according to this
Schema's fields.
:param obj: The object to serialize.
:param many: Whether to serialize `obj` as a collection. If `None`, the value
for `self.many` is used.
:return: Serialized data
.. versionchanged:: 3.0.0b7
This method returns the serialized data rather than a ``(data, errors)`` tuple.
A :exc:`ValidationError <marshmallow.exceptions.ValidationError>` is raised
if ``obj`` is invalid.
.. versionchanged:: 3.0.0rc9
Validation no longer occurs upon serialization.
"""
many = self.many if many is None else bool(many)
if self._hooks[PRE_DUMP]:
processed_obj = self._invoke_dump_processors(
PRE_DUMP, obj, many=many, original_data=obj
)
else:
processed_obj = obj
result = self._serialize(processed_obj, many=many)
if self._hooks[POST_DUMP]:
result = self._invoke_dump_processors(
POST_DUMP, result, many=many, original_data=obj
)
return result
def dumps(self, obj: typing.Any, *args, many: bool | None = None, **kwargs):
"""Same as :meth:`dump`, except return a JSON-encoded string.
:param obj: The object to serialize.
:param many: Whether to serialize `obj` as a collection. If `None`, the value
for `self.many` is used.
:return: A ``json`` string
.. versionchanged:: 3.0.0b7
This method returns the serialized data rather than a ``(data, errors)`` tuple.
A :exc:`ValidationError <marshmallow.exceptions.ValidationError>` is raised
if ``obj`` is invalid.
"""
serialized = self.dump(obj, many=many)
return self.opts.render_module.dumps(serialized, *args, **kwargs)
def _deserialize(
self,
data: Mapping[str, typing.Any] | Sequence[Mapping[str, typing.Any]],
*,
error_store: ErrorStore,
many: bool = False,
partial=None,
unknown: types.UnknownOption = RAISE,
index=None,
) -> typing.Any | list[typing.Any]:
"""Deserialize ``data``.
:param data: The data to deserialize.
:param error_store: Structure to store errors.
:param many: `True` if ``data`` should be deserialized as a collection.
:param partial: Whether to ignore missing fields and not require
any fields declared. Propagates down to ``Nested`` fields as well. If
its value is an iterable, only missing fields listed in that iterable
will be ignored. Use dot delimiters to specify nested fields.
:param unknown: Whether to exclude, include, or raise an error for unknown
fields in the data. Use `EXCLUDE`, `INCLUDE` or `RAISE`.
:param index: Index of the item being serialized (for storing errors) if
serializing a collection, otherwise `None`.
:return: The deserialized data as `dict_class` instance or list of `dict_class`
instances if `many` is `True`.
"""
index_errors = self.opts.index_errors
index = index if index_errors else None
if many:
if not is_sequence_but_not_string(data):
error_store.store_error([self.error_messages["type"]], index=index)
ret_l = []
else:
ret_l = [
self._deserialize(
d,
error_store=error_store,
many=False,
partial=partial,
unknown=unknown,
index=idx,
)
for idx, d in enumerate(data)
]
return ret_l
ret_d = self.dict_class()
# Check data is a dict
if not isinstance(data, Mapping):
error_store.store_error([self.error_messages["type"]], index=index)
else:
partial_is_collection = is_collection(partial)
for attr_name, field_obj in self.load_fields.items():
field_name = (
field_obj.data_key if field_obj.data_key is not None else attr_name
)
raw_value = data.get(field_name, missing)
if raw_value is missing:
# Ignore missing field if we're allowed to.
if partial is True or (
partial_is_collection and attr_name in partial
):
continue
d_kwargs = {}
# Allow partial loading of nested schemas.
if partial_is_collection:
prefix = field_name + "."
len_prefix = len(prefix)
sub_partial = [
f[len_prefix:] for f in partial if f.startswith(prefix)
]
d_kwargs["partial"] = sub_partial
elif partial is not None:
d_kwargs["partial"] = partial
def getter(
val, field_obj=field_obj, field_name=field_name, d_kwargs=d_kwargs
):
return field_obj.deserialize(
val,
field_name,
data,
**d_kwargs,
)
value = self._call_and_store(
getter_func=getter,
data=raw_value,
field_name=field_name,
error_store=error_store,
index=index,
)
if value is not missing:
key = field_obj.attribute or attr_name
set_value(ret_d, key, value)
if unknown != EXCLUDE:
fields = {
field_obj.data_key if field_obj.data_key is not None else field_name
for field_name, field_obj in self.load_fields.items()
}
for key in set(data) - fields:
value = data[key]
if unknown == INCLUDE:
ret_d[key] = value
elif unknown == RAISE:
error_store.store_error(
[self.error_messages["unknown"]],
key,
(index if index_errors else None),
)
return ret_d
def load(
self,
data: Mapping[str, typing.Any] | Sequence[Mapping[str, typing.Any]],
*,
many: bool | None = None,
partial: bool | types.StrSequenceOrSet | None = None,
unknown: types.UnknownOption | None = None,
):
"""Deserialize a data structure to an object defined by this Schema's fields.
:param data: The data to deserialize.
:param many: Whether to deserialize `data` as a collection. If `None`, the
value for `self.many` is used.
:param partial: Whether to ignore missing fields and not require
any fields declared. Propagates down to ``Nested`` fields as well. If
its value is an iterable, only missing fields listed in that iterable
will be ignored. Use dot delimiters to specify nested fields.
:param unknown: Whether to exclude, include, or raise an error for unknown
fields in the data. Use `EXCLUDE`, `INCLUDE` or `RAISE`.
If `None`, the value for `self.unknown` is used.
:return: Deserialized data
.. versionchanged:: 3.0.0b7
This method returns the deserialized data rather than a ``(data, errors)`` tuple.
A :exc:`ValidationError <marshmallow.exceptions.ValidationError>` is raised
if invalid data are passed.
"""
return self._do_load(
data, many=many, partial=partial, unknown=unknown, postprocess=True
)
def loads(
self,
s: str | bytes | bytearray,
/,
*,
many: bool | None = None,
partial: bool | types.StrSequenceOrSet | None = None,
unknown: types.UnknownOption | None = None,
**kwargs,
):
"""Same as :meth:`load`, except it uses `marshmallow.Schema.Meta.render_module` to deserialize
the passed string before passing data to :meth:`load`.
:param s: A string of the data to deserialize.
:param many: Whether to deserialize `obj` as a collection. If `None`, the
value for `self.many` is used.
:param partial: Whether to ignore missing fields and not require
any fields declared. Propagates down to ``Nested`` fields as well. If
its value is an iterable, only missing fields listed in that iterable
will be ignored. Use dot delimiters to specify nested fields.
:param unknown: Whether to exclude, include, or raise an error for unknown
fields in the data. Use `EXCLUDE`, `INCLUDE` or `RAISE`.
If `None`, the value for `self.unknown` is used.
:return: Deserialized data
.. versionchanged:: 3.0.0b7
This method returns the deserialized data rather than a ``(data, errors)`` tuple.
A :exc:`ValidationError <marshmallow.exceptions.ValidationError>` is raised
if invalid data are passed.
.. versionchanged:: 4.0.0
Rename ``json_module`` parameter to ``s``.
"""
data = self.opts.render_module.loads(s, **kwargs)
return self.load(data, many=many, partial=partial, unknown=unknown)
def _run_validator(
self,
validator_func: types.SchemaValidator,
output,
*,
original_data,
error_store: ErrorStore,
many: bool,
partial: bool | types.StrSequenceOrSet | None,
unknown: types.UnknownOption | None,
pass_original: bool,
index: int | None = None,
):
try:
if pass_original: # Pass original, raw data (before unmarshalling)
validator_func(
output, original_data, partial=partial, many=many, unknown=unknown
)
else:
validator_func(output, partial=partial, many=many, unknown=unknown)
except ValidationError as err:
field_name = err.field_name
data_key: str
if field_name == SCHEMA:
data_key = SCHEMA
else:
field_obj: Field | None = None
try:
field_obj = self.fields[field_name]
except KeyError:
if field_name in self.declared_fields:
field_obj = self.declared_fields[field_name]
if field_obj:
data_key = (
field_obj.data_key
if field_obj.data_key is not None
else field_name
)
else:
data_key = field_name
error_store.store_error(err.messages, data_key, index=index)
def validate(
self,
data: Mapping[str, typing.Any] | Sequence[Mapping[str, typing.Any]],
*,
many: bool | None = None,
partial: bool | types.StrSequenceOrSet | None = None,
) -> dict[str, list[str]]:
"""Validate `data` against the schema, returning a dictionary of
validation errors.
:param data: The data to validate.
:param many: Whether to validate `data` as a collection. If `None`, the
value for `self.many` is used.
:param partial: Whether to ignore missing fields and not require
any fields declared. Propagates down to ``Nested`` fields as well. If
its value is an iterable, only missing fields listed in that iterable
will be ignored. Use dot delimiters to specify nested fields.
:return: A dictionary of validation errors.
"""
try:
self._do_load(data, many=many, partial=partial, postprocess=False)
except ValidationError as exc:
return typing.cast("dict[str, list[str]]", exc.messages)
return {}
##### Private Helpers #####
def _do_load(
self,
data: (Mapping[str, typing.Any] | Sequence[Mapping[str, typing.Any]]),
*,
many: bool | None = None,
partial: bool | types.StrSequenceOrSet | None = None,
unknown: types.UnknownOption | None = None,
postprocess: bool = True,
):
"""Deserialize `data`, returning the deserialized result.
This method is private API.
:param data: The data to deserialize.
:param many: Whether to deserialize `data` as a collection. If `None`, the
value for `self.many` is used.
:param partial: Whether to validate required fields. If its
value is an iterable, only fields listed in that iterable will be
ignored will be allowed missing. If `True`, all fields will be allowed missing.
If `None`, the value for `self.partial` is used.
:param unknown: Whether to exclude, include, or raise an error for unknown
fields in the data. Use `EXCLUDE`, `INCLUDE` or `RAISE`.
If `None`, the value for `self.unknown` is used.
:param postprocess: Whether to run post_load methods..
:return: Deserialized data
"""
error_store = ErrorStore()
errors: dict[str, list[str]] = {}
many = self.many if many is None else bool(many)
unknown = self.unknown if unknown is None else unknown
if partial is None:
partial = self.partial
# Run preprocessors
if self._hooks[PRE_LOAD]:
try:
processed_data = self._invoke_load_processors(
PRE_LOAD,
data,
many=many,
original_data=data,
partial=partial,
unknown=unknown,
)
except ValidationError as err:
errors = err.normalized_messages()
result: list | dict | None = None
else:
processed_data = data
if not errors:
# Deserialize data
result = self._deserialize(
processed_data,
error_store=error_store,
many=many,
partial=partial,
unknown=unknown,
)
# Run field-level validation
self._invoke_field_validators(
error_store=error_store, data=result, many=many
)
# Run schema-level validation
if self._hooks[VALIDATES_SCHEMA]:
field_errors = bool(error_store.errors)
self._invoke_schema_validators(
error_store=error_store,
pass_collection=True,
data=result,
original_data=data,
many=many,
partial=partial,
unknown=unknown,
field_errors=field_errors,
)
self._invoke_schema_validators(
error_store=error_store,
pass_collection=False,
data=result,
original_data=data,
many=many,
partial=partial,
unknown=unknown,
field_errors=field_errors,
)
errors = error_store.errors
# Run post processors
if not errors and postprocess and self._hooks[POST_LOAD]:
try:
result = self._invoke_load_processors(
POST_LOAD,
result,
many=many,
original_data=data,
partial=partial,
unknown=unknown,
)
except ValidationError as err:
errors = err.normalized_messages()
if errors:
exc = ValidationError(errors, data=data, valid_data=result)
self.handle_error(exc, data, many=many, partial=partial)
raise exc
return result
def _normalize_nested_options(self) -> None:
"""Apply then flatten nested schema options.
This method is private API.
"""
if self.only is not None:
# Apply the only option to nested fields.
self.__apply_nested_option("only", self.only, "intersection")
# Remove the child field names from the only option.
self.only = self.set_class([field.split(".", 1)[0] for field in self.only])
if self.exclude:
# Apply the exclude option to nested fields.
self.__apply_nested_option("exclude", self.exclude, "union")
# Remove the parent field names from the exclude option.
self.exclude = self.set_class(
[field for field in self.exclude if "." not in field]
)
def __apply_nested_option(self, option_name, field_names, set_operation) -> None:
"""Apply nested options to nested fields"""
# Split nested field names on the first dot.
nested_fields = [name.split(".", 1) for name in field_names if "." in name]
# Partition the nested field names by parent field.
nested_options = defaultdict(list) # type: defaultdict
for parent, nested_names in nested_fields:
nested_options[parent].append(nested_names)
# Apply the nested field options.
for key, options in iter(nested_options.items()):
new_options = self.set_class(options)
original_options = getattr(self.declared_fields[key], option_name, ())
if original_options:
if set_operation == "union":
new_options |= self.set_class(original_options)
if set_operation == "intersection":
new_options &= self.set_class(original_options)
setattr(self.declared_fields[key], option_name, new_options)
def _init_fields(self) -> None:
"""Update self.fields, self.load_fields, and self.dump_fields based on schema options.
This method is private API.
"""
if self.opts.fields:
available_field_names = self.set_class(self.opts.fields)
else:
available_field_names = self.set_class(self.declared_fields.keys())
invalid_fields = self.set_class()
if self.only is not None:
# Return only fields specified in only option
field_names: typing.AbstractSet[typing.Any] = self.set_class(self.only)
invalid_fields |= field_names - available_field_names
else:
field_names = available_field_names
# If "exclude" option or param is specified, remove those fields.
if self.exclude:
# Note that this isn't available_field_names, since we want to
# apply "only" for the actual calculation.
field_names = field_names - self.exclude
invalid_fields |= self.exclude - available_field_names
if invalid_fields:
message = f"Invalid fields for {self}: {invalid_fields}."
raise ValueError(message)
fields_dict = self.dict_class()
for field_name in field_names:
field_obj = self.declared_fields[field_name]
self._bind_field(field_name, field_obj)
fields_dict[field_name] = field_obj
load_fields, dump_fields = self.dict_class(), self.dict_class()
for field_name, field_obj in fields_dict.items():
if not field_obj.dump_only:
load_fields[field_name] = field_obj
if not field_obj.load_only:
dump_fields[field_name] = field_obj
dump_data_keys = [
field_obj.data_key if field_obj.data_key is not None else name
for name, field_obj in dump_fields.items()
]
if len(dump_data_keys) != len(set(dump_data_keys)):
data_keys_duplicates = {
x for x in dump_data_keys if dump_data_keys.count(x) > 1
}
raise ValueError(
"The data_key argument for one or more fields collides "
"with another field's name or data_key argument. "
"Check the following field names and "
f"data_key arguments: {list(data_keys_duplicates)}"
)
load_attributes = [obj.attribute or name for name, obj in load_fields.items()]
if len(load_attributes) != len(set(load_attributes)):
attributes_duplicates = {
x for x in load_attributes if load_attributes.count(x) > 1
}
raise ValueError(
"The attribute argument for one or more fields collides "
"with another field's name or attribute argument. "
"Check the following field names and "
f"attribute arguments: {list(attributes_duplicates)}"
)
self.fields = fields_dict
self.dump_fields = dump_fields
self.load_fields = load_fields
def on_bind_field(self, field_name: str, field_obj: Field) -> None:
"""Hook to modify a field when it is bound to the `Schema <marshmallow.Schema>`.
No-op by default.
"""
return
def _bind_field(self, field_name: str, field_obj: Field) -> None:
"""Bind field to the schema, setting any necessary attributes on the
field (e.g. parent and name).
Also set field load_only and dump_only values if field_name was
specified in `class Meta <marshmallow.Schema.Meta>`.
"""
if field_name in self.load_only:
field_obj.load_only = True
if field_name in self.dump_only:
field_obj.dump_only = True
field_obj._bind_to_schema(field_name, self)
self.on_bind_field(field_name, field_obj)
def _invoke_dump_processors(
self, tag: str, data, *, many: bool, original_data=None
):
# The pass_collection post-dump processors may do things like add an envelope, so
# invoke those after invoking the non-pass_collection processors which will expect
# to get a list of items.
data = self._invoke_processors(
tag,
pass_collection=False,
data=data,
many=many,
original_data=original_data,
)
return self._invoke_processors(
tag, pass_collection=True, data=data, many=many, original_data=original_data
)
def _invoke_load_processors(
self,
tag: str,
data: Mapping[str, typing.Any] | Sequence[Mapping[str, typing.Any]],
*,
many: bool,
original_data,
partial: bool | types.StrSequenceOrSet | None,
unknown: types.UnknownOption | None,
):
# This has to invert the order of the dump processors, so run the pass_collection
# processors first.
data = self._invoke_processors(
tag,
pass_collection=True,
data=data,
many=many,
original_data=original_data,
partial=partial,
unknown=unknown,
)
return self._invoke_processors(
tag,
pass_collection=False,
data=data,
many=many,
original_data=original_data,
partial=partial,
unknown=unknown,
)
def _invoke_field_validators(self, *, error_store: ErrorStore, data, many: bool):
for attr_name, _, validator_kwargs in self._hooks[VALIDATES]:
validator = getattr(self, attr_name)
field_names = validator_kwargs["field_names"]
for field_name in field_names:
try:
field_obj = self.fields[field_name]
except KeyError as error:
if field_name in self.declared_fields:
continue
raise ValueError(f'"{field_name}" field does not exist.') from error
data_key = (
field_obj.data_key if field_obj.data_key is not None else field_name
)
do_validate = functools.partial(validator, data_key=data_key)
if many:
for idx, item in enumerate(data):
try:
value = item[field_obj.attribute or field_name]
except KeyError:
pass
else:
validated_value = self._call_and_store(
getter_func=do_validate,
data=value,
field_name=data_key,
error_store=error_store,
index=(idx if self.opts.index_errors else None),
)
if validated_value is missing:
item.pop(field_name, None)
else:
try:
value = data[field_obj.attribute or field_name]
except KeyError:
pass
else:
validated_value = self._call_and_store(
getter_func=do_validate,
data=value,
field_name=data_key,
error_store=error_store,
)
if validated_value is missing:
data.pop(field_name, None)
def _invoke_schema_validators(
self,
*,
error_store: ErrorStore,
pass_collection: bool,
data,
original_data,
many: bool,
partial: bool | types.StrSequenceOrSet | None,
field_errors: bool = False,
unknown: types.UnknownOption | None,
):
for attr_name, hook_many, validator_kwargs in self._hooks[VALIDATES_SCHEMA]:
if hook_many != pass_collection:
continue
validator = getattr(self, attr_name)
if field_errors and validator_kwargs["skip_on_field_errors"]:
continue
pass_original = validator_kwargs.get("pass_original", False)
if many and not pass_collection:
for idx, (item, orig) in enumerate(
zip(data, original_data, strict=True)
):
self._run_validator(
validator,
item,
original_data=orig,
error_store=error_store,
many=many,
partial=partial,
unknown=unknown,
index=idx,
pass_original=pass_original,
)
else:
self._run_validator(
validator,
data,
original_data=original_data,
error_store=error_store,
many=many,
pass_original=pass_original,
partial=partial,
unknown=unknown,
)
def _invoke_processors(
self,
tag: str,
*,
pass_collection: bool,
data: Mapping[str, typing.Any] | Sequence[Mapping[str, typing.Any]],
many: bool,
original_data=None,
**kwargs,
):
for attr_name, hook_many, processor_kwargs in self._hooks[tag]:
if hook_many != pass_collection:
continue
# This will be a bound method.
processor = getattr(self, attr_name)
pass_original = processor_kwargs.get("pass_original", False)
if many and not pass_collection:
if pass_original:
data = [
processor(item, original, many=many, **kwargs)
for item, original in zip_longest(data, original_data)
]
else:
data = [processor(item, many=many, **kwargs) for item in data]
elif pass_original:
data = processor(data, original_data, many=many, **kwargs)
else:
data = processor(data, many=many, **kwargs)
return data
BaseSchema = Schema # for backwards compatibility
| Schema |
python | google__jax | jax/experimental/mosaic/gpu/dialect_lowering.py | {
"start": 3601,
"end": 13360
} | class ____:
pass
RECURSED = Recursed()
MlirLoweringRuleResult = Sequence[ir.Value] | Recursed
MlirLoweringRule = Callable[
[LoweringContext, ir.Operation | ir.OpView], MlirLoweringRuleResult
]
_lowerings: dict[str, MlirLoweringRule] = {}
def _undo_conversion_cast(
ir_value: ir.Value,
expected_types: Sequence[ir.Type],
) -> tuple[builtin.UnrealizedConversionCastOp, Sequence[ir.Value]]:
"""Undoes the provided unrealized conversion cast.
The `ir_value` must be an unrealized conversion cast. This function will
create a new conversion cast that undoes the original one. The returned tuple
contains:
- The original unrealzied conversion cast (useful for extract attributes).
- The list of operands of the original conversion cast (which are the result
values of the undone conversion cast).
The function will verify that the returned values have types that match
`expected_types`.
"""
conversion_cast = cast(
builtin.UnrealizedConversionCastOp, ir_value.owner.opview # pytype: disable=attribute-error
)
if not isinstance(conversion_cast, builtin.UnrealizedConversionCastOp):
raise ValueError(f"{conversion_cast} is not a conversion_cast")
converted_outputs = builtin.unrealized_conversion_cast(
[operand.type for operand in conversion_cast.operands],
conversion_cast.results,
)
if isinstance(converted_outputs, ir.OpResultList):
converted_outputs = list(converted_outputs)
elif not isinstance(converted_outputs, list):
converted_outputs = [converted_outputs]
for v, t in zip(converted_outputs, expected_types, strict=True):
if v.type != t:
raise ValueError(f"Expected type {t} for value {v}")
return conversion_cast, converted_outputs
def fragmented_array_to_ir(
fragmented_array: fa.FragmentedArray, ty: ir.Type
) -> ir.Value:
"""Converts a FragmentedArray to an IR value.
The fragmented array's signedness is omitted from the IR representation.
"""
conversion_cast = builtin.UnrealizedConversionCastOp(
[ty], fragmented_array.registers.flatten().tolist()
)
conversion_cast.attributes["registers_shape"] = ir.ArrayAttr.get([
ir.IntegerAttr.get(ir.IntegerType.get_signless(64), s)
for s in fragmented_array.registers.shape
])
conversion_cast.attributes["layout"] = layouts.to_layout_attr(
fragmented_array.layout
)
return conversion_cast.result
def _default_is_signed(dtype: ir.Type) -> bool | None:
"""Returns `False` for Integer types, `None` otherwise.
When converting from Pallas dtype to IR type, we lose the `is_signed`
information. We can default to `False` for most use cases.
"""
return False if ir.IntegerType.isinstance(dtype) else None
def _fragmented_array_from_ir(
fragmented_array_as_ir: ir.Value,
layout: ir.Attribute,
is_signed: bool | None = None,
) -> fa.FragmentedArray:
producer_layout_attr = fragmented_array_as_ir.owner.attributes["layout"]
producer_layout = layouts.from_layout_attr(producer_layout_attr)
vector_ty = ir.VectorType(fragmented_array_as_ir.type)
reg_shape = producer_layout.registers_shape(tuple(vector_ty.shape))
reg_ty = producer_layout.registers_element_type(vector_ty.element_type)
conversion_cast, converted_outputs = _undo_conversion_cast(
fragmented_array_as_ir, [reg_ty] * math.prod(reg_shape)
)
reverse_conversion_cast = converted_outputs[0].owner.opview
for attribute in conversion_cast.attributes:
reverse_conversion_cast.attributes[attribute] = conversion_cast.attributes[attribute]
registers = np.array(list(converted_outputs)).reshape(
[attr.value for attr in conversion_cast.attributes["registers_shape"]]
)
if ir.IntegerType.isinstance(conversion_cast.outputs[0].type.element_type):
is_signed = False if is_signed is None else is_signed
return fa.FragmentedArray(
_registers=registers, _layout=producer_layout, _is_signed=is_signed
).to_layout(layouts.from_layout_attr(layout))
def wrap_transformed_memref(
transformed_memref: ir.Value,
logical_type: ir.Type,
transforms: ir.ArrayAttr,
) -> ir.Value:
"""Wraps a transformed memref to an unrealized cast with transforms.
The return type of the cast is the untransformed logical type.
"""
conversion_cast = builtin.UnrealizedConversionCastOp(
[logical_type], [transformed_memref]
)
conversion_cast.attributes["transforms"] = transforms
return conversion_cast.result
def unwrap_transformed_memref(
ref: ir.Value, expected_transforms: ir.ArrayAttr
) -> ir.Value:
"""Uwraps a memref from an unrealized cast and verifies its transforms."""
_, transforms = swizzle_and_transforms_from_transforms_attr(expected_transforms)
transformed_type = transformed_smem_ref_type(ref.type, transforms)
conversion_cast, [result] = _undo_conversion_cast(ref, [transformed_type])
# Check that the actual transforms match the expected ones.
if expected_transforms != conversion_cast.attributes["transforms"]:
raise ValueError(
f"Expected transforms {expected_transforms} do not match actual"
f" transforms {conversion_cast.attributes['transforms']}"
)
return result
def _register_lowering(
op: str | type[ir.OpView] | None
) -> Callable[[MlirLoweringRule], MlirLoweringRule]:
def wrapper(f):
if op is not None:
op_name = op if isinstance(op, str) else op.OPERATION_NAME # pytype: disable=attribute-error
_lowerings[op_name] = f
return f
return wrapper
def _lowered_barrier_type() -> ir.Type:
return ir.IntegerType.get_signless(64)
@_register_lowering(mgpu.InitializeBarrierOp)
def _initialize_barrier_op_lowering_rule(
ctx: LoweringContext,
op: mgpu.InitializeBarrierOp,
) -> Sequence[ir.Value]:
i32 = ir.IntegerType.get_signless(32)
lowered_barrier_type = _lowered_barrier_type()
for i in range(op.num_barriers.value):
nvvm.mbarrier_init(
utils.getelementptr(op.base_pointer, [i], lowered_barrier_type),
utils.c(
op.arrival_count.value * utils.WARPGROUP_SIZE,
i32,
),
predicate=ctx.single_thread_per_block_predicate,
)
gpu.barrier()
return []
@_register_lowering(mgpu.OptimizationBarrierOp)
def _optimization_barrier_op_lowering_rule(
_: LoweringContext,
op: mgpu.OptimizationBarrierOp,
) -> Sequence[ir.Value]:
if not all(ir.VectorType.isinstance(operand.type) for operand in op.operands):
raise NotImplementedError(
f"Optimization barrier op {op} has non-vector operands."
)
fragmented_arrays = []
for operand, layout in safe_zip(op.operands, inference_utils.in_layouts(op)):
fragmented_arrays.append(_fragmented_array_from_ir(operand, layout))
lowered_fragmented_arrays = fa.optimization_barrier(*fragmented_arrays)
if isinstance(lowered_fragmented_arrays, fa.FragmentedArray):
lowered_fragmented_arrays = [lowered_fragmented_arrays]
return [
fragmented_array_to_ir(arr, result.type)
for arr, result in safe_zip(lowered_fragmented_arrays, op.results)
]
@_register_lowering(arith.ConstantOp)
def _arith_constant_op_lowering_rule(
_: LoweringContext, op: arith.ConstantOp
) -> Sequence[ir.Value]:
if not ir.DenseElementsAttr.isinstance(op.value):
raise NotImplementedError(f"Unsupported constant op: {op}")
value = ir.DenseElementsAttr(op.value)
if not value.is_splat:
raise NotImplementedError(f"Unsupported constant op: {op}")
ty = ir.VectorType(op.result.type)
is_signed = _default_is_signed(ty.element_type)
return [
fragmented_array_to_ir(
fa.FragmentedArray.splat(
arith.constant(ty.element_type, value.get_splat_value()),
tuple(ty.shape),
layouts.from_layout_attr(op.attributes["out_layouts"][0]),
is_signed=is_signed,
),
op.result.type,
)
]
def _check_transforms_and_swizzle_are_supported(
ref_ty: ir.MemRefType,
transforms: Sequence[launch_context.MemRefTransform],
swizzle: mgpu.SwizzlingMode,
minimum_swizzle: mgpu.SwizzlingMode = mgpu.SwizzlingMode.kNoSwizzle,
):
"""Checks that the list of provided transforms and swizzle are supported.
Currently, we allow the following:
- any swizzle that is larger than or equal to `minimum_swizzle`;
- optionally, a single tile transform (with rank equal to the rank of the
memref being annotated);
- optionally, a single transpose transform.
"""
if swizzle < minimum_swizzle:
raise NotImplementedError(
f"Unsupported swizzle {swizzle} smaller than {minimum_swizzle}."
)
partitioned_transforms = {
k: list(v)
for k, v in itertools.groupby(
transforms, lambda t: isinstance(t, launch_context.TileTransform)
)
}
tile_transforms = cast(
list[launch_context.TileTransform],
partitioned_transforms.get(True, []),
)
other_transforms = partitioned_transforms.get(False, [])
if len(tile_transforms) > 1:
raise NotImplementedError(
f"{tile_transforms} contains more than one tile transform."
)
if len(tile_transforms) == 1:
if len(tile_transforms[0].tiling) != len(ref_ty.shape):
raise NotImplementedError(
f"Only tile transforms with rank equal to the rank of the memref "
f"being annotated are supported but got {tile_transforms[0]} for "
f"{ref_ty}."
)
if len(other_transforms) > 1:
raise NotImplementedError(
f"{other_transforms} contains more than one transform."
)
if len(other_transforms) == 1:
if not isinstance(other_transforms[0], launch_context.TransposeTransform):
raise NotImplementedError(
f"{other_transforms[0]} is not a transpose transform."
)
| Recursed |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/tags.py | {
"start": 235,
"end": 505
} | class ____(graphene.ObjectType):
key = graphene.NonNull(graphene.String)
value = graphene.NonNull(graphene.String)
class Meta:
name = "PipelineTag"
def __init__(self, key, value):
super().__init__(key=key, value=value)
| GraphenePipelineTag |
python | lazyprogrammer__machine_learning_examples | rl2/cartpole/pg_theano.py | {
"start": 3058,
"end": 7320
} | class ____:
def __init__(self, D, hidden_layer_sizes):
# constant learning rate is fine
lr = 1e-4
# create the graph
self.layers = []
M1 = D
for M2 in hidden_layer_sizes:
layer = HiddenLayer(M1, M2)
self.layers.append(layer)
M1 = M2
# final layer
layer = HiddenLayer(M1, 1, lambda x: x)
self.layers.append(layer)
# get all params for gradient later
params = []
for layer in self.layers:
params += layer.params
# inputs and targets
X = T.matrix('X')
Y = T.vector('Y')
# calculate output and cost
Z = X
for layer in self.layers:
Z = layer.forward(Z)
Y_hat = T.flatten(Z)
cost = T.sum((Y - Y_hat)**2)
# specify update rule
grads = T.grad(cost, params)
updates = [(p, p - lr*g) for p, g in zip(params, grads)]
# compile functions
self.train_op = theano.function(
inputs=[X, Y],
updates=updates,
allow_input_downcast=True
)
self.predict_op = theano.function(
inputs=[X],
outputs=Y_hat,
allow_input_downcast=True
)
def partial_fit(self, X, Y):
X = np.atleast_2d(X)
Y = np.atleast_1d(Y)
self.train_op(X, Y)
def predict(self, X):
X = np.atleast_2d(X)
return self.predict_op(X)
def play_one_td(env, pmodel, vmodel, gamma):
observation = env.reset()
done = False
totalreward = 0
iters = 0
while not done and iters < 2000:
# if we reach 2000, just quit, don't want this going forever
# the 200 limit seems a bit early
action = pmodel.sample_action(observation)
prev_observation = observation
observation, reward, done, info = env.step(action)
if done:
reward = -200
# update the models
V_next = vmodel.predict(observation)
G = reward + gamma*np.max(V_next)
advantage = G - vmodel.predict(prev_observation)
pmodel.partial_fit(prev_observation, action, advantage)
vmodel.partial_fit(prev_observation, G)
if reward == 1: # if we changed the reward to -200
totalreward += reward
iters += 1
return totalreward
def play_one_mc(env, pmodel, vmodel, gamma):
observation = env.reset()
done = False
totalreward = 0
iters = 0
states = []
actions = []
rewards = []
reward = 0
while not done and iters < 2000:
# if we reach 2000, just quit, don't want this going forever
# the 200 limit seems a bit early
action = pmodel.sample_action(observation)
states.append(observation)
actions.append(action)
rewards.append(reward)
prev_observation = observation
observation, reward, done, info = env.step(action)
if done:
reward = -200
if reward == 1: # if we changed the reward to -200
totalreward += reward
iters += 1
# save the final (s,a,r) tuple
action = pmodel.sample_action(observation)
states.append(observation)
actions.append(action)
rewards.append(reward)
returns = []
advantages = []
G = 0
for s, r in zip(reversed(states), reversed(rewards)):
returns.append(G)
advantages.append(G - vmodel.predict(s)[0])
G = r + gamma*G
returns.reverse()
advantages.reverse()
# update the models
pmodel.partial_fit(states[1:], actions[1:], advantages[1:])
vmodel.partial_fit(states, returns)
return totalreward
def main():
env = gym.make('CartPole-v0')
D = env.observation_space.shape[0]
K = env.action_space.n
pmodel = PolicyModel(D, K, [])
vmodel = ValueModel(D, [10])
gamma = 0.99
if 'monitor' in sys.argv:
filename = os.path.basename(__file__).split('.')[0]
monitor_dir = './' + filename + '_' + str(datetime.now())
env = wrappers.Monitor(env, monitor_dir)
N = 1000
totalrewards = np.empty(N)
costs = np.empty(N)
for n in range(N):
totalreward = play_one_mc(env, pmodel, vmodel, gamma)
totalrewards[n] = totalreward
if n % 100 == 0:
print("episode:", n, "total reward:", totalreward, "avg reward (last 100):", totalrewards[max(0, n-100):(n+1)].mean())
print("avg reward for last 100 episodes:", totalrewards[-100:].mean())
print("total steps:", totalrewards.sum())
plt.plot(totalrewards)
plt.title("Rewards")
plt.show()
plot_running_avg(totalrewards)
if __name__ == '__main__':
main()
| ValueModel |
python | getsentry__sentry | src/sentry/sentry_apps/api/bases/sentryapps.py | {
"start": 16947,
"end": 17111
} | class ____(SentryAppInstallationBaseEndpoint):
permission_classes = (SentryAppInstallationExternalIssuePermission,)
| SentryAppInstallationExternalIssueBaseEndpoint |
python | kamyu104__LeetCode-Solutions | Python/maximum-possible-number-by-binary-concatenation.py | {
"start": 391,
"end": 629
} | class ____(object):
def maxGoodNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
return max(int("".join(x), 2) for x in itertools.permutations(map(lambda x: bin(x)[2:], nums)))
| Solution2 |
python | django-haystack__django-haystack | test_haystack/test_forms.py | {
"start": 1485,
"end": 3511
} | class ____(TestCase):
def setUp(self):
super().setUp()
# Stow.
self.old_unified_index = connections["default"]._index
self.ui = UnifiedIndex()
self.bmmsi = BasicMockModelSearchIndex()
self.bammsi = BasicAnotherMockModelSearchIndex()
self.ui.build(indexes=[self.bmmsi, self.bammsi])
connections["default"]._index = self.ui
# Update the "index".
backend = connections["default"].get_backend()
backend.clear()
backend.update(self.bmmsi, MockModel.objects.all())
self.sqs = SearchQuerySet()
def tearDown(self):
connections["default"]._index = self.old_unified_index
super().tearDown()
def test_models_regression_1(self):
# Regression for issue #1.
msf = ModelSearchForm(
{"query": "test", "models": ["core.mockmodel", "core.anothermockmodel"]},
searchqueryset=self.sqs,
)
self.assertEqual(
msf.fields["models"].choices,
[
("core.anothermockmodel", "Another mock models"),
("core.mockmodel", "Mock models"),
],
)
self.assertEqual(msf.errors, {})
self.assertEqual(msf.is_valid(), True)
sqs_with_models = msf.search()
self.assertEqual(len(sqs_with_models.query.models), 2)
def test_model_choices(self):
self.assertEqual(len(model_choices()), 2)
self.assertEqual(
[option[1] for option in model_choices()],
["Another mock models", "Mock models"],
)
def test_model_choices_unicode(self):
stowed_verbose_name_plural = MockModel._meta.verbose_name_plural
MockModel._meta.verbose_name_plural = "☃"
self.assertEqual(len(model_choices()), 2)
self.assertEqual(
[option[1] for option in model_choices()], ["Another mock models", "☃"]
)
MockModel._meta.verbose_name_plural = stowed_verbose_name_plural
| ModelSearchFormTestCase |
python | pandas-dev__pandas | asv_bench/benchmarks/groupby.py | {
"start": 21703,
"end": 23104
} | class ____:
params = [True, False]
param_names = ["observed"]
def setup(self, observed):
N = 10**5
arr = np.random.random(N)
data = {"a": Categorical(np.random.randint(10000, size=N)), "b": arr}
self.df = DataFrame(data)
data = {
"a": Categorical(np.random.randint(10000, size=N), ordered=True),
"b": arr,
}
self.df_ordered = DataFrame(data)
data = {
"a": Categorical(
np.random.randint(100, size=N), categories=np.arange(10000)
),
"b": arr,
}
self.df_extra_cat = DataFrame(data)
def time_groupby_sort(self, observed):
self.df.groupby("a", observed=observed)["b"].count()
def time_groupby_nosort(self, observed):
self.df.groupby("a", observed=observed, sort=False)["b"].count()
def time_groupby_ordered_sort(self, observed):
self.df_ordered.groupby("a", observed=observed)["b"].count()
def time_groupby_ordered_nosort(self, observed):
self.df_ordered.groupby("a", observed=observed, sort=False)["b"].count()
def time_groupby_extra_cat_sort(self, observed):
self.df_extra_cat.groupby("a", observed=observed)["b"].count()
def time_groupby_extra_cat_nosort(self, observed):
self.df_extra_cat.groupby("a", observed=observed, sort=False)["b"].count()
| Categories |
python | GoogleCloudPlatform__python-docs-samples | speech/microphone/transcribe_streaming_infinite_test.py | {
"start": 735,
"end": 2594
} | class ____:
def __init__(self: object, audio_filename: str) -> None:
self.audio_filename = audio_filename
def __call__(self: object, *args: object) -> object:
return self
def open(
self: object,
stream_callback: object,
rate: int,
*args: object,
**kwargs: object
) -> object:
self.rate = rate
self.closed = threading.Event()
self.stream_thread = threading.Thread(
target=self.stream_audio,
args=(self.audio_filename, stream_callback, self.closed),
)
self.stream_thread.start()
return self
def close(self: object) -> None:
self.closed.set()
def stop_stream(self: object) -> None:
pass
def terminate(self: object) -> None:
pass
def stream_audio(
self: object,
audio_filename: str,
callback: object,
closed: object,
num_frames: int = 512,
) -> None:
with open(audio_filename, "rb") as audio_file:
while not closed.is_set():
# Approximate realtime by sleeping for the appropriate time for
# the requested number of frames
time.sleep(num_frames / float(self.rate))
# audio is 16-bit samples, whereas python byte is 8-bit
num_bytes = 2 * num_frames
chunk = audio_file.read(num_bytes) or b"\0" * num_bytes
callback(chunk, None, None, None)
@mock.patch.dict(
"sys.modules",
pyaudio=mock.MagicMock(PyAudio=MockPyAudio(os.path.join(RESOURCES, "quit.raw"))),
)
def test_main(capsys: pytest.CaptureFixture) -> None:
import transcribe_streaming_infinite
transcribe_streaming_infinite.main()
out, err = capsys.readouterr()
assert re.search(r"quit", out, re.DOTALL | re.I)
| MockPyAudio |
python | kamyu104__LeetCode-Solutions | Python/maximize-palindrome-length-from-subsequences.py | {
"start": 751,
"end": 1390
} | class ____(object):
def longestPalindrome(self, word1, word2):
"""
:type word1: str
:type word2: str
:rtype: int
"""
s = word1+word2
dp = [[0]*len(s) for _ in xrange(len(s))]
for j in xrange(len(s)):
dp[j][j] = 1
for i in reversed(xrange(j)):
if s[i] == s[j]:
dp[i][j] = 2 if i+1 == j else dp[i+1][j-1] + 2
else:
dp[i][j] = max(dp[i+1][j], dp[i][j-1])
return max([dp[i][j] for i in xrange(len(word1)) for j in xrange(len(word1), len(s)) if s[i] == s[j]] or [0])
| Solution2 |
python | huggingface__transformers | src/transformers/models/convbert/modeling_convbert.py | {
"start": 43055,
"end": 45596
} | class ____(ConvBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.convbert = ConvBertModel(config)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, TokenClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.convbert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring
| ConvBertForTokenClassification |
python | plotly__plotly.py | plotly/graph_objs/carpet/_stream.py | {
"start": 233,
"end": 3494
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "carpet"
_path_str = "carpet.stream"
_valid_props = {"maxpoints", "token"}
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self["maxpoints"]
@maxpoints.setter
def maxpoints(self, val):
self["maxpoints"] = val
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://chart-studio.plotly.com/settings for more
details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["token"]
@token.setter
def token(self, val):
self["token"] = val
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.carpet.Stream`
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
Returns
-------
Stream
"""
super().__init__("stream")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.carpet.Stream
constructor must be a dict or
an instance of :class:`plotly.graph_objs.carpet.Stream`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("maxpoints", arg, maxpoints)
self._set_property("token", arg, token)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Stream |
python | django-import-export__django-import-export | tests/core/forms.py | {
"start": 163,
"end": 282
} | class ____(forms.Form):
author = forms.ModelChoiceField(queryset=Author.objects.all(), required=True)
| AuthorFormMixin |
python | kamyu104__LeetCode-Solutions | Python/cousins-in-binary-tree-ii.py | {
"start": 165,
"end": 812
} | class ____(object):
def replaceValueInTree(self, root):
"""
:type root: Optional[TreeNode]
:rtype: Optional[TreeNode]
"""
q = [(root, root.val)]
while q:
new_q = []
total = sum(node.val for node, _ in q)
for node, x in q:
node.val = total-x
x = (node.left.val if node.left else 0) + (node.right.val if node.right else 0)
if node.left:
new_q.append((node.left, x))
if node.right:
new_q.append((node.right, x))
q = new_q
return root
| Solution |
python | pytest-dev__pytest | src/_pytest/fixtures.py | {
"start": 13177,
"end": 25720
} | class ____(abc.ABC):
"""The type of the ``request`` fixture.
A request object gives access to the requesting test context and has a
``param`` attribute in case the fixture is parametrized.
"""
def __init__(
self,
pyfuncitem: Function,
fixturename: str | None,
arg2fixturedefs: dict[str, Sequence[FixtureDef[Any]]],
fixture_defs: dict[str, FixtureDef[Any]],
*,
_ispytest: bool = False,
) -> None:
check_ispytest(_ispytest)
#: Fixture for which this request is being performed.
self.fixturename: Final = fixturename
self._pyfuncitem: Final = pyfuncitem
# The FixtureDefs for each fixture name requested by this item.
# Starts from the statically-known fixturedefs resolved during
# collection. Dynamically requested fixtures (using
# `request.getfixturevalue("foo")`) are added dynamically.
self._arg2fixturedefs: Final = arg2fixturedefs
# The evaluated argnames so far, mapping to the FixtureDef they resolved
# to.
self._fixture_defs: Final = fixture_defs
# Notes on the type of `param`:
# -`request.param` is only defined in parametrized fixtures, and will raise
# AttributeError otherwise. Python typing has no notion of "undefined", so
# this cannot be reflected in the type.
# - Technically `param` is only (possibly) defined on SubRequest, not
# FixtureRequest, but the typing of that is still in flux so this cheats.
# - In the future we might consider using a generic for the param type, but
# for now just using Any.
self.param: Any
@property
def _fixturemanager(self) -> FixtureManager:
return self._pyfuncitem.session._fixturemanager
@property
@abc.abstractmethod
def _scope(self) -> Scope:
raise NotImplementedError()
@property
def scope(self) -> _ScopeName:
"""Scope string, one of "function", "class", "module", "package", "session"."""
return self._scope.value
@abc.abstractmethod
def _check_scope(
self,
requested_fixturedef: FixtureDef[object],
requested_scope: Scope,
) -> None:
raise NotImplementedError()
@property
def fixturenames(self) -> list[str]:
"""Names of all active fixtures in this request."""
result = list(self._pyfuncitem.fixturenames)
result.extend(set(self._fixture_defs).difference(result))
return result
@property
@abc.abstractmethod
def node(self):
"""Underlying collection node (depends on current request scope)."""
raise NotImplementedError()
@property
def config(self) -> Config:
"""The pytest config object associated with this request."""
return self._pyfuncitem.config
@property
def function(self):
"""Test function object if the request has a per-function scope."""
if self.scope != "function":
raise AttributeError(
f"function not available in {self.scope}-scoped context"
)
return self._pyfuncitem.obj
@property
def cls(self):
"""Class (can be None) where the test function was collected."""
if self.scope not in ("class", "function"):
raise AttributeError(f"cls not available in {self.scope}-scoped context")
clscol = self._pyfuncitem.getparent(_pytest.python.Class)
if clscol:
return clscol.obj
@property
def instance(self):
"""Instance (can be None) on which test function was collected."""
if self.scope != "function":
return None
return getattr(self._pyfuncitem, "instance", None)
@property
def module(self):
"""Python module object where the test function was collected."""
if self.scope not in ("function", "class", "module"):
raise AttributeError(f"module not available in {self.scope}-scoped context")
mod = self._pyfuncitem.getparent(_pytest.python.Module)
assert mod is not None
return mod.obj
@property
def path(self) -> Path:
"""Path where the test function was collected."""
if self.scope not in ("function", "class", "module", "package"):
raise AttributeError(f"path not available in {self.scope}-scoped context")
return self._pyfuncitem.path
@property
def keywords(self) -> MutableMapping[str, Any]:
"""Keywords/markers dictionary for the underlying node."""
node: nodes.Node = self.node
return node.keywords
@property
def session(self) -> Session:
"""Pytest session object."""
return self._pyfuncitem.session
@abc.abstractmethod
def addfinalizer(self, finalizer: Callable[[], object]) -> None:
"""Add finalizer/teardown function to be called without arguments after
the last test within the requesting test context finished execution."""
raise NotImplementedError()
def applymarker(self, marker: str | MarkDecorator) -> None:
"""Apply a marker to a single test function invocation.
This method is useful if you don't want to have a keyword/marker
on all function invocations.
:param marker:
An object created by a call to ``pytest.mark.NAME(...)``.
"""
self.node.add_marker(marker)
def raiseerror(self, msg: str | None) -> NoReturn:
"""Raise a FixtureLookupError exception.
:param msg:
An optional custom error message.
"""
raise FixtureLookupError(None, self, msg)
def getfixturevalue(self, argname: str) -> Any:
"""Dynamically run a named fixture function.
Declaring fixtures via function argument is recommended where possible.
But if you can only decide whether to use another fixture at test
setup time, you may use this function to retrieve it inside a fixture
or test function body.
This method can be used during the test setup phase or the test run
phase, but during the test teardown phase a fixture's value may not
be available.
:param argname:
The fixture name.
:raises pytest.FixtureLookupError:
If the given fixture could not be found.
"""
# Note that in addition to the use case described in the docstring,
# getfixturevalue() is also called by pytest itself during item and fixture
# setup to evaluate the fixtures that are requested statically
# (using function parameters, autouse, etc).
fixturedef = self._get_active_fixturedef(argname)
assert fixturedef.cached_result is not None, (
f'The fixture value for "{argname}" is not available. '
"This can happen when the fixture has already been torn down."
)
return fixturedef.cached_result[0]
def _iter_chain(self) -> Iterator[SubRequest]:
"""Yield all SubRequests in the chain, from self up.
Note: does *not* yield the TopRequest.
"""
current = self
while isinstance(current, SubRequest):
yield current
current = current._parent_request
def _get_active_fixturedef(self, argname: str) -> FixtureDef[object]:
if argname == "request":
return RequestFixtureDef(self)
# If we already finished computing a fixture by this name in this item,
# return it.
fixturedef = self._fixture_defs.get(argname)
if fixturedef is not None:
self._check_scope(fixturedef, fixturedef._scope)
return fixturedef
# Find the appropriate fixturedef.
fixturedefs = self._arg2fixturedefs.get(argname, None)
if fixturedefs is None:
# We arrive here because of a dynamic call to
# getfixturevalue(argname) which was naturally
# not known at parsing/collection time.
fixturedefs = self._fixturemanager.getfixturedefs(argname, self._pyfuncitem)
if fixturedefs is not None:
self._arg2fixturedefs[argname] = fixturedefs
# No fixtures defined with this name.
if fixturedefs is None:
raise FixtureLookupError(argname, self)
# The are no fixtures with this name applicable for the function.
if not fixturedefs:
raise FixtureLookupError(argname, self)
# A fixture may override another fixture with the same name, e.g. a
# fixture in a module can override a fixture in a conftest, a fixture in
# a class can override a fixture in the module, and so on.
# An overriding fixture can request its own name (possibly indirectly);
# in this case it gets the value of the fixture it overrides, one level
# up.
# Check how many `argname`s deep we are, and take the next one.
# `fixturedefs` is sorted from furthest to closest, so use negative
# indexing to go in reverse.
index = -1
for request in self._iter_chain():
if request.fixturename == argname:
index -= 1
# If already consumed all of the available levels, fail.
if -index > len(fixturedefs):
raise FixtureLookupError(argname, self)
fixturedef = fixturedefs[index]
# Prepare a SubRequest object for calling the fixture.
try:
callspec = self._pyfuncitem.callspec
except AttributeError:
callspec = None
if callspec is not None and argname in callspec.params:
param = callspec.params[argname]
param_index = callspec.indices[argname]
# The parametrize invocation scope overrides the fixture's scope.
scope = callspec._arg2scope[argname]
else:
param = NOTSET
param_index = 0
scope = fixturedef._scope
self._check_fixturedef_without_param(fixturedef)
# The parametrize invocation scope only controls caching behavior while
# allowing wider-scoped fixtures to keep depending on the parametrized
# fixture. Scope control is enforced for parametrized fixtures
# by recreating the whole fixture tree on parameter change.
# Hence `fixturedef._scope`, not `scope`.
self._check_scope(fixturedef, fixturedef._scope)
subrequest = SubRequest(
self, scope, param, param_index, fixturedef, _ispytest=True
)
# Make sure the fixture value is cached, running it if it isn't
fixturedef.execute(request=subrequest)
self._fixture_defs[argname] = fixturedef
return fixturedef
def _check_fixturedef_without_param(self, fixturedef: FixtureDef[object]) -> None:
"""Check that this request is allowed to execute this fixturedef without
a param."""
funcitem = self._pyfuncitem
has_params = fixturedef.params is not None
fixtures_not_supported = getattr(funcitem, "nofuncargs", False)
if has_params and fixtures_not_supported:
msg = (
f"{funcitem.name} does not support fixtures, maybe unittest.TestCase subclass?\n"
f"Node id: {funcitem.nodeid}\n"
f"Function type: {type(funcitem).__name__}"
)
fail(msg, pytrace=False)
if has_params:
frame = inspect.stack()[3]
frameinfo = inspect.getframeinfo(frame[0])
source_path = absolutepath(frameinfo.filename)
source_lineno = frameinfo.lineno
try:
source_path_str = str(source_path.relative_to(funcitem.config.rootpath))
except ValueError:
source_path_str = str(source_path)
location = getlocation(fixturedef.func, funcitem.config.rootpath)
msg = (
"The requested fixture has no parameter defined for test:\n"
f" {funcitem.nodeid}\n\n"
f"Requested fixture '{fixturedef.argname}' defined in:\n"
f"{location}\n\n"
f"Requested here:\n"
f"{source_path_str}:{source_lineno}"
)
fail(msg, pytrace=False)
def _get_fixturestack(self) -> list[FixtureDef[Any]]:
values = [request._fixturedef for request in self._iter_chain()]
values.reverse()
return values
@final
| FixtureRequest |
python | numpy__numpy | benchmarks/benchmarks/bench_ufunc.py | {
"start": 3854,
"end": 4293
} | class ____(Benchmark):
""" Benchmark for the methods which do not take any arguments
"""
params = [['__abs__', '__neg__', '__pos__'], TYPES1]
param_names = ['methods', 'npdtypes']
timeout = 10
def setup(self, methname, npdtypes):
values = get_squares_()
self.xarg = values.get(npdtypes)[0]
def time_ndarray_meth(self, methname, npdtypes):
getattr(operator, methname)(self.xarg)
| MethodsV0 |
python | ray-project__ray | python/ray/llm/tests/common/utils/test_callback_base.py | {
"start": 1338,
"end": 3938
} | class ____:
@pytest.fixture
def llm_config(self):
config = LLMConfig(
model_loading_config=ModelLoadingConfig(model_id="test-model"),
llm_engine="vLLM",
callback_config={
"callback_class": TestingCallback,
"callback_kwargs": {"kwargs_test_key": "kwargs_test_value"},
},
)
return config
def test_callback_methods_called(self, llm_config):
"""Test that callback methods are called during initialization."""
# Run initialization
async def run_initialization():
callback = llm_config.get_or_create_callback()
await callback.run_callback("on_before_node_init")
if callback.ctx.run_init_node:
raise Exception("run_init_node is True")
await callback.run_callback("on_after_node_init")
asyncio.run(run_initialization())
# Verify callback was created and methods were called
callback = llm_config.get_or_create_callback()
assert callback is not None
assert isinstance(callback, TestingCallback)
assert callback.before_init_called is True
assert callback.after_init_called is True
def test_callback_singleton_behavior(self, llm_config):
"""Test that callback instance is cached (singleton pattern)."""
# Get callback multiple times
callback1 = llm_config.get_or_create_callback()
callback2 = llm_config.get_or_create_callback()
# Should be the same instance
assert callback1 is callback2
def test_callback_must_inherit_from_callback_class(self):
"""Test that callback_class must be a subclass of Callback, not just implement the same methods."""
class FakeCallback:
"""A class that implements the same methods as Callback but doesn't inherit from it."""
def __init__(self, **kwargs):
pass
async def on_before_node_init(self):
pass
async def on_after_node_init(self):
pass
# Should raise an error when trying to create callback
with pytest.raises(Exception, match="is-subclass"):
LLMConfig(
model_loading_config=ModelLoadingConfig(model_id="test-model"),
llm_engine="vLLM",
callback_config={
"callback_class": FakeCallback,
"callback_kwargs": {},
},
)
if __name__ == "__main__":
pytest.main(["-v", __file__])
| TestCallbackBase |
python | PyCQA__pylint | tests/functional/u/used/used_before_assignment_py37.py | {
"start": 932,
"end": 1187
} | class ____(namedtuple("NamedTupleSubclass", [])):
"""Taken from https://github.com/pylint-dev/pylint/issues/5982"""
def method(self) -> NamedTupleSubclass:
"""Variables checker crashed when astroid did not supply a lineno"""
| NamedTupleSubclass |
python | dagster-io__dagster | python_modules/libraries/dagster-cloud-cli/dagster_cloud_cli/types.py | {
"start": 1822,
"end": 2070
} | class ____(Enum):
"""When to snapshot the base deployment during branch deployment create and update flows.
Enum values hyphenated for use in CLI.
"""
ON_CREATE = "on-create"
ON_UPDATE = "on-update"
| SnapshotBaseDeploymentCondition |
python | ansible__ansible | test/integration/targets/ansible-doc/library/double_doc.py | {
"start": 160,
"end": 310
} | class ____:
# 2nd ref to documentation string, used to trip up tokinzer doc reader
DOCUMENTATION = None
def __init__(self):
pass
| Foo |
python | wandb__wandb | wandb/vendor/graphql-core-1.1/wandb_graphql/language/ast.py | {
"start": 11380,
"end": 12079
} | class ____(Value):
__slots__ = ('loc', 'value',)
_fields = ('value',)
def __init__(self, value, loc=None):
self.loc = loc
self.value = value
def __eq__(self, other):
return (
self is other or (
isinstance(other, IntValue) and
# self.loc == other.loc and
self.value == other.value
)
)
def __repr__(self):
return ('IntValue('
'value={self.value!r}'
')').format(self=self)
def __copy__(self):
return type(self)(
self.value,
self.loc
)
def __hash__(self):
return id(self)
| IntValue |
python | apache__airflow | providers/keycloak/src/airflow/providers/keycloak/auth_manager/datamodels/token.py | {
"start": 926,
"end": 1025
} | class ____(BaseModel):
"""Token serializer for responses."""
access_token: str
| TokenResponse |
python | huggingface__transformers | src/transformers/models/layoutlm/modeling_layoutlm.py | {
"start": 15012,
"end": 15813
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->LayoutLM
| LayoutLMPredictionHeadTransform |
python | django__django | tests/backends/models.py | {
"start": 4376,
"end": 4600
} | class ____(models.Model):
id = models.AutoField(primary_key=True, db_column="select")
reporter = models.ForeignKey(Reporter, models.CASCADE, db_column="where")
class Meta:
db_table = "order"
| SQLKeywordsModel |
python | great-expectations__great_expectations | tests/test_plugins/fake_configs.py | {
"start": 81,
"end": 289
} | class ____:
def __init__(self, a, x, b=None, c=None, y=None, z=None):
self.a = a
self.b = b
self.c = c
self.x = x
self.y = y
self.z = z
| FakeConfigurableClass |
python | pennersr__django-allauth | tests/apps/socialaccount/providers/flickr/tests.py | {
"start": 294,
"end": 2052
} | class ____(OAuthTestsMixin, TestCase):
provider_id = FlickrProvider.id
def get_mocked_response(self):
#
return [
MockedResponse(
HTTPStatus.OK,
r"""
{"stat": "ok",
"user": {
"username": {
"_content": "pennersr"},
"id": "12345678@N00"}}
""",
), # noqa
MockedResponse(
HTTPStatus.OK,
r"""
{"person": {"username": {"_content": "pennersr"}, "photosurl": {"_content":
"http://www.flickr.com/photos/12345678@N00/"},
"nsid": "12345678@N00",
"path_alias": null, "photos": {"count": {"_content": 0},
"firstdatetaken": {"_content": null}, "views": {"_content": "28"},
"firstdate": {"_content": null}}, "iconserver": "0",
"description": {"_content": ""}, "mobileurl": {"_content":
"http://m.flickr.com/photostream.gne?id=6294613"},
"profileurl": {
"_content": "http://www.flickr.com/people/12345678@N00/"},
"mbox_sha1sum": {"_content":
"5e5b359c123e54f95236209c8808d607a5cdd21e"},
"ispro": 0, "location": {"_content": ""},
"id": "12345678@N00",
"realname": {"_content": "raymond penners"},
"iconfarm": 0}, "stat": "ok"}
""",
),
] # noqa
def get_expected_to_str(self):
return "pennersr"
def test_login(self):
super().test_login()
account = SocialAccount.objects.get(uid="12345678@N00")
f_account = account.get_provider_account()
self.assertEqual(account.user.first_name, "raymond")
self.assertEqual(account.user.last_name, "penners")
self.assertEqual(
f_account.get_profile_url(),
"http://www.flickr.com/people/12345678@N00/",
)
self.assertEqual(f_account.to_str(), "pennersr")
| FlickrTests |
python | mlflow__mlflow | mlflow/types/schema.py | {
"start": 20875,
"end": 21590
} | class ____(Array):
"""
Specification used to represent a vector type in Spark ML.
"""
def __init__(self):
super().__init__(dtype=DataType.double)
def to_dict(self):
return {"type": SPARKML_VECTOR_TYPE}
@classmethod
def from_json_dict(cls, **kwargs):
return SparkMLVector()
def __repr__(self) -> str:
return "SparkML vector"
def __eq__(self, other) -> bool:
return isinstance(other, SparkMLVector)
def _merge(self, arr: BaseType) -> SparkMLVector:
if isinstance(arr, SparkMLVector):
return deepcopy(self)
raise MlflowException("SparkML vector type can't be merged with another Array type.")
| SparkMLVector |
python | spyder-ide__spyder | external-deps/qtconsole/qtconsole/console_widget.py | {
"start": 1384,
"end": 108894
} | class ____(MetaQObjectHasTraits('NewBase', (LoggingConfigurable, superQ(QtWidgets.QWidget)), {})):
""" An abstract base class for console-type widgets. This class has
functionality for:
* Maintaining a prompt and editing region
* Providing the traditional Unix-style console keyboard shortcuts
* Performing tab completion
* Paging text
* Handling ANSI escape codes
ConsoleWidget also provides a number of utility methods that will be
convenient to implementors of a console-style widget.
"""
#------ Configuration ------------------------------------------------------
ansi_codes = Bool(True, config=True,
help="Whether to process ANSI escape codes."
)
buffer_size = Integer(500, config=True,
help="""
The maximum number of lines of text before truncation. Specifying a
non-positive number disables text truncation (not recommended).
"""
)
execute_on_complete_input = Bool(True, config=True,
help="""Whether to automatically execute on syntactically complete input.
If False, Shift-Enter is required to submit each execution.
Disabling this is mainly useful for non-Python kernels,
where the completion check would be wrong.
"""
)
gui_completion = Enum(['plain', 'droplist', 'ncurses'], config=True,
default_value = 'ncurses',
help="""
The type of completer to use. Valid values are:
'plain' : Show the available completion as a text list
Below the editing area.
'droplist': Show the completion in a drop down list navigable
by the arrow keys, and from which you can select
completion by pressing Return.
'ncurses' : Show the completion as a text list which is navigable by
`tab` and arrow keys.
"""
)
gui_completion_height = Integer(0, config=True,
help="""
Set Height for completion.
'droplist'
Height in pixels.
'ncurses'
Maximum number of rows.
"""
)
# NOTE: this value can only be specified during initialization.
kind = Enum(['plain', 'rich'], default_value='plain', config=True,
help="""
The type of underlying text widget to use. Valid values are 'plain',
which specifies a QPlainTextEdit, and 'rich', which specifies a
QTextEdit.
"""
)
# NOTE: this value can only be specified during initialization.
paging = Enum(['inside', 'hsplit', 'vsplit', 'custom', 'none'],
default_value='inside', config=True,
help="""
The type of paging to use. Valid values are:
'inside'
The widget pages like a traditional terminal.
'hsplit'
When paging is requested, the widget is split horizontally. The top
pane contains the console, and the bottom pane contains the paged text.
'vsplit'
Similar to 'hsplit', except that a vertical splitter is used.
'custom'
No action is taken by the widget beyond emitting a
'custom_page_requested(str)' signal.
'none'
The text is written directly to the console.
""")
scrollbar_visibility = Bool(True, config=True,
help="""The visibility of the scrollar. If False then the scrollbar will be
invisible."""
)
font_family = Unicode(config=True,
help="""The font family to use for the console.
On OSX this defaults to Monaco, on Windows the default is
Consolas with fallback of Courier, and on other platforms
the default is Monospace.
""")
def _font_family_default(self):
if sys.platform == 'win32':
# Consolas ships with Vista/Win7, fallback to Courier if needed
return 'Consolas'
elif sys.platform == 'darwin':
# OSX always has Monaco, no need for a fallback
return 'Monaco'
else:
# Monospace should always exist, no need for a fallback
return 'Monospace'
font_size = Integer(config=True,
help="""The font size. If unconfigured, Qt will be entrusted
with the size of the font.
""")
console_width = Integer(81, config=True,
help="""The width of the console at start time in number
of characters (will double with `hsplit` paging)
""")
console_height = Integer(25, config=True,
help="""The height of the console at start time in number
of characters (will double with `vsplit` paging)
""")
# Whether to override ShortcutEvents for the keybindings defined by this
# widget (Ctrl+n, Ctrl+a, etc). Enable this if you want this widget to take
# priority (when it has focus) over, e.g., window-level menu shortcuts.
override_shortcuts = Bool(False)
# ------ Custom Qt Widgets -------------------------------------------------
# For other projects to easily override the Qt widgets used by the console
# (e.g. Spyder)
custom_control = None
custom_page_control = None
#------ Signals ------------------------------------------------------------
# Signals that indicate ConsoleWidget state.
copy_available = QtCore.Signal(bool)
redo_available = QtCore.Signal(bool)
undo_available = QtCore.Signal(bool)
# Signal emitted when paging is needed and the paging style has been
# specified as 'custom'.
custom_page_requested = QtCore.Signal(object)
# Signal emitted when the font is changed.
font_changed = QtCore.Signal(QtGui.QFont)
#------ Protected class variables ------------------------------------------
# control handles
_control = None
_page_control = None
_splitter = None
# When the control key is down, these keys are mapped.
_ctrl_down_remap = { QtCore.Qt.Key_B : QtCore.Qt.Key_Left,
QtCore.Qt.Key_F : QtCore.Qt.Key_Right,
QtCore.Qt.Key_A : QtCore.Qt.Key_Home,
QtCore.Qt.Key_P : QtCore.Qt.Key_Up,
QtCore.Qt.Key_N : QtCore.Qt.Key_Down,
QtCore.Qt.Key_H : QtCore.Qt.Key_Backspace, }
if not sys.platform == 'darwin':
# On OS X, Ctrl-E already does the right thing, whereas End moves the
# cursor to the bottom of the buffer.
_ctrl_down_remap[QtCore.Qt.Key_E] = QtCore.Qt.Key_End
# The shortcuts defined by this widget. We need to keep track of these to
# support 'override_shortcuts' above.
_shortcuts = set(_ctrl_down_remap.keys()) | \
{ QtCore.Qt.Key_C, QtCore.Qt.Key_G, QtCore.Qt.Key_O,
QtCore.Qt.Key_V }
_temp_buffer_filled = False
#---------------------------------------------------------------------------
# 'QObject' interface
#---------------------------------------------------------------------------
def __init__(self, parent=None, **kw):
""" Create a ConsoleWidget.
Parameters
----------
parent : QWidget, optional [default None]
The parent for this widget.
"""
super().__init__(**kw)
if parent:
self.setParent(parent)
self._is_complete_msg_id = None
self._is_complete_timeout = 0.1
self._is_complete_max_time = None
# While scrolling the pager on Mac OS X, it tears badly. The
# NativeGesture is platform and perhaps build-specific hence
# we take adequate precautions here.
self._pager_scroll_events = [QtCore.QEvent.Wheel]
if hasattr(QtCore.QEvent, 'NativeGesture'):
self._pager_scroll_events.append(QtCore.QEvent.NativeGesture)
# Create the layout and underlying text widget.
layout = QtWidgets.QStackedLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
self._control = self._create_control()
if self.paging in ('hsplit', 'vsplit'):
self._splitter = QtWidgets.QSplitter()
if self.paging == 'hsplit':
self._splitter.setOrientation(QtCore.Qt.Horizontal)
else:
self._splitter.setOrientation(QtCore.Qt.Vertical)
self._splitter.addWidget(self._control)
layout.addWidget(self._splitter)
else:
layout.addWidget(self._control)
# Create the paging widget, if necessary.
if self.paging in ('inside', 'hsplit', 'vsplit'):
self._page_control = self._create_page_control()
if self._splitter:
self._page_control.hide()
self._splitter.addWidget(self._page_control)
else:
layout.addWidget(self._page_control)
# Initialize protected variables. Some variables contain useful state
# information for subclasses; they should be considered read-only.
self._append_before_prompt_cursor = self._control.textCursor()
self._ansi_processor = QtAnsiCodeProcessor()
if self.gui_completion == 'ncurses':
self._completion_widget = CompletionHtml(self, self.gui_completion_height)
elif self.gui_completion == 'droplist':
self._completion_widget = CompletionWidget(self, self.gui_completion_height)
elif self.gui_completion == 'plain':
self._completion_widget = CompletionPlain(self)
self._continuation_prompt = '> '
self._continuation_prompt_html = None
self._executing = False
self._filter_resize = False
self._html_exporter = HtmlExporter(self._control)
self._input_buffer_executing = ''
self._input_buffer_pending = ''
self._kill_ring = QtKillRing(self._control)
self._prompt = ''
self._prompt_html = None
self._prompt_cursor = self._control.textCursor()
self._prompt_sep = ''
self._reading = False
self._reading_callback = None
self._tab_width = 4
# Cursor position of where to insert text.
# Control characters allow this to move around on the current line.
self._insert_text_cursor = self._control.textCursor()
# List of strings pending to be appended as plain text in the widget.
# The text is not immediately inserted when available to not
# choke the Qt event loop with paint events for the widget in
# case of lots of output from kernel.
self._pending_insert_text = []
# Timer to flush the pending stream messages. The interval is adjusted
# later based on actual time taken for flushing a screen (buffer_size)
# of output text.
self._pending_text_flush_interval = QtCore.QTimer(self._control)
self._pending_text_flush_interval.setInterval(100)
self._pending_text_flush_interval.setSingleShot(True)
self._pending_text_flush_interval.timeout.connect(
self._on_flush_pending_stream_timer)
# Set a monospaced font.
self.reset_font()
# Configure actions.
action = QtWidgets.QAction('Print', None)
action.setEnabled(True)
printkey = QtGui.QKeySequence(QtGui.QKeySequence.Print)
if printkey.matches("Ctrl+P") and sys.platform != 'darwin':
# Only override the default if there is a collision.
# Qt ctrl = cmd on OSX, so the match gets a false positive on OSX.
printkey = "Ctrl+Shift+P"
action.setShortcut(printkey)
action.setShortcutContext(QtCore.Qt.WidgetWithChildrenShortcut)
action.triggered.connect(self.print_)
self.addAction(action)
self.print_action = action
action = QtWidgets.QAction('Save as HTML/XML', None)
action.setShortcut(QtGui.QKeySequence.Save)
action.setShortcutContext(QtCore.Qt.WidgetWithChildrenShortcut)
action.triggered.connect(self.export_html)
self.addAction(action)
self.export_action = action
action = QtWidgets.QAction('Select All', None)
action.setEnabled(True)
selectall = QtGui.QKeySequence(QtGui.QKeySequence.SelectAll)
if selectall.matches("Ctrl+A") and sys.platform != 'darwin':
# Only override the default if there is a collision.
# Qt ctrl = cmd on OSX, so the match gets a false positive on OSX.
selectall = "Ctrl+Shift+A"
action.setShortcut(selectall)
action.setShortcutContext(QtCore.Qt.WidgetWithChildrenShortcut)
action.triggered.connect(self.select_all_smart)
self.addAction(action)
self.select_all_action = action
self.increase_font_size = QtWidgets.QAction("Bigger Font",
self,
shortcut=QtGui.QKeySequence.ZoomIn,
shortcutContext=QtCore.Qt.WidgetWithChildrenShortcut,
statusTip="Increase the font size by one point",
triggered=self._increase_font_size)
self.addAction(self.increase_font_size)
self.decrease_font_size = QtWidgets.QAction("Smaller Font",
self,
shortcut=QtGui.QKeySequence.ZoomOut,
shortcutContext=QtCore.Qt.WidgetWithChildrenShortcut,
statusTip="Decrease the font size by one point",
triggered=self._decrease_font_size)
self.addAction(self.decrease_font_size)
self.reset_font_size = QtWidgets.QAction("Normal Font",
self,
shortcut="Ctrl+0",
shortcutContext=QtCore.Qt.WidgetWithChildrenShortcut,
statusTip="Restore the Normal font size",
triggered=self.reset_font)
self.addAction(self.reset_font_size)
# Accept drag and drop events here. Drops were already turned off
# in self._control when that widget was created.
self.setAcceptDrops(True)
#---------------------------------------------------------------------------
# Drag and drop support
#---------------------------------------------------------------------------
def dragEnterEvent(self, e):
if e.mimeData().hasUrls():
# The link action should indicate to that the drop will insert
# the file anme.
e.setDropAction(QtCore.Qt.LinkAction)
e.accept()
elif e.mimeData().hasText():
# By changing the action to copy we don't need to worry about
# the user accidentally moving text around in the widget.
e.setDropAction(QtCore.Qt.CopyAction)
e.accept()
def dragMoveEvent(self, e):
if e.mimeData().hasUrls():
pass
elif e.mimeData().hasText():
cursor = self._control.cursorForPosition(e.pos())
if self._in_buffer(cursor.position()):
e.setDropAction(QtCore.Qt.CopyAction)
self._control.setTextCursor(cursor)
else:
e.setDropAction(QtCore.Qt.IgnoreAction)
e.accept()
def dropEvent(self, e):
if e.mimeData().hasUrls():
self._keep_cursor_in_buffer()
cursor = self._control.textCursor()
filenames = [url.toLocalFile() for url in e.mimeData().urls()]
text = ', '.join("'" + f.replace("'", "'\"'\"'") + "'"
for f in filenames)
self._insert_plain_text_into_buffer(cursor, text)
elif e.mimeData().hasText():
cursor = self._control.cursorForPosition(e.pos())
if self._in_buffer(cursor.position()):
text = e.mimeData().text()
self._insert_plain_text_into_buffer(cursor, text)
def eventFilter(self, obj, event):
""" Reimplemented to ensure a console-like behavior in the underlying
text widgets.
"""
etype = event.type()
if etype == QtCore.QEvent.KeyPress:
# Re-map keys for all filtered widgets.
key = event.key()
if self._control_key_down(event.modifiers()) and \
key in self._ctrl_down_remap:
new_event = QtGui.QKeyEvent(QtCore.QEvent.KeyPress,
self._ctrl_down_remap[key],
QtCore.Qt.NoModifier)
QtWidgets.QApplication.instance().sendEvent(obj, new_event)
return True
elif obj == self._control:
return self._event_filter_console_keypress(event)
elif obj == self._page_control:
return self._event_filter_page_keypress(event)
# Make middle-click paste safe.
elif getattr(event, 'button', False) and \
etype == QtCore.QEvent.MouseButtonRelease and \
event.button() == QtCore.Qt.MiddleButton and \
obj == self._control.viewport():
cursor = self._control.cursorForPosition(event.pos())
self._control.setTextCursor(cursor)
self.paste(QtGui.QClipboard.Selection)
return True
# Manually adjust the scrollbars *after* a resize event is dispatched.
elif etype == QtCore.QEvent.Resize and not self._filter_resize:
self._filter_resize = True
QtWidgets.QApplication.instance().sendEvent(obj, event)
self._adjust_scrollbars()
self._filter_resize = False
return True
# Override shortcuts for all filtered widgets.
elif etype == QtCore.QEvent.ShortcutOverride and \
self.override_shortcuts and \
self._control_key_down(event.modifiers()) and \
event.key() in self._shortcuts:
event.accept()
# Handle scrolling of the vsplit pager. This hack attempts to solve
# problems with tearing of the help text inside the pager window. This
# happens only on Mac OS X with both PySide and PyQt. This fix isn't
# perfect but makes the pager more usable.
elif etype in self._pager_scroll_events and \
obj == self._page_control:
self._page_control.repaint()
return True
elif etype == QtCore.QEvent.MouseMove:
anchor = self._control.anchorAt(event.pos())
if QT6:
pos = event.globalPosition().toPoint()
else:
pos = event.globalPos()
QtWidgets.QToolTip.showText(pos, anchor)
elif (
etype == QtCore.QEvent.Wheel
and self._control_key_down(event.modifiers())
):
if sys.platform != 'darwin':
if hasattr(event, 'angleDelta'):
if event.angleDelta().y() < 0:
self._decrease_font_size()
elif event.angleDelta().y() > 0:
self._increase_font_size()
elif hasattr(event, 'delta'):
if event.delta() < 0:
self._decrease_font_size()
elif event.delta() > 0:
self._increase_font_size()
# This is necessary to prevent that the mouse wheel event also
# scrolls up and down in this case.
return True
return super().eventFilter(obj, event)
#---------------------------------------------------------------------------
# 'QWidget' interface
#---------------------------------------------------------------------------
def sizeHint(self):
""" Reimplemented to suggest a size that is 80 characters wide and
25 lines high.
"""
font_metrics = QtGui.QFontMetrics(self.font)
margin = (self._control.frameWidth() +
self._control.document().documentMargin()) * 2
style = self.style()
splitwidth = style.pixelMetric(QtWidgets.QStyle.PM_SplitterWidth)
# Note 1: Despite my best efforts to take the various margins into
# account, the width is still coming out a bit too small, so we include
# a fudge factor of one character here.
# Note 2: QFontMetrics.maxWidth is not used here or anywhere else due
# to a Qt bug on certain Mac OS systems where it returns 0.
width = self._get_font_width() * self.console_width + margin
width += style.pixelMetric(QtWidgets.QStyle.PM_ScrollBarExtent)
if self.paging == 'hsplit':
width = width * 2 + splitwidth
height = font_metrics.height() * self.console_height + margin
if self.paging == 'vsplit':
height = height * 2 + splitwidth
return QtCore.QSize(int(width), int(height))
#---------------------------------------------------------------------------
# 'ConsoleWidget' public interface
#---------------------------------------------------------------------------
include_other_output = Bool(False, config=True,
help="""Whether to include output from clients
other than this one sharing the same kernel.
Outputs are not displayed until enter is pressed.
"""
)
other_output_prefix = Unicode('[remote] ', config=True,
help="""Prefix to add to outputs coming from clients other than this one.
Only relevant if include_other_output is True.
"""
)
def can_copy(self):
""" Returns whether text can be copied to the clipboard.
"""
return self._control.textCursor().hasSelection()
def can_cut(self):
""" Returns whether text can be cut to the clipboard.
"""
cursor = self._control.textCursor()
return (cursor.hasSelection() and
self._in_buffer(cursor.anchor()) and
self._in_buffer(cursor.position()))
def can_paste(self):
""" Returns whether text can be pasted from the clipboard.
"""
if self._control.textInteractionFlags() & QtCore.Qt.TextEditable:
return bool(QtWidgets.QApplication.clipboard().text())
return False
def clear(self, keep_input=True):
""" Clear the console.
Parameters
----------
keep_input : bool, optional (default True)
If set, restores the old input buffer if a new prompt is written.
"""
if self._executing:
self._control.clear()
else:
if keep_input:
input_buffer = self.input_buffer
self._control.clear()
self._show_prompt()
if keep_input:
self.input_buffer = input_buffer
def copy(self):
""" Copy the currently selected text to the clipboard.
"""
self.layout().currentWidget().copy()
def copy_anchor(self, anchor):
""" Copy anchor text to the clipboard
"""
QtWidgets.QApplication.clipboard().setText(anchor)
def cut(self):
""" Copy the currently selected text to the clipboard and delete it
if it's inside the input buffer.
"""
self.copy()
if self.can_cut():
self._control.textCursor().removeSelectedText()
def _handle_is_complete_reply(self, msg):
if msg['parent_header'].get('msg_id', 0) != self._is_complete_msg_id:
return
status = msg['content'].get('status', 'complete')
indent = msg['content'].get('indent', '')
self._trigger_is_complete_callback(status != 'incomplete', indent)
def _trigger_is_complete_callback(self, complete=False, indent=''):
if self._is_complete_msg_id is not None:
self._is_complete_msg_id = None
self._is_complete_callback(complete, indent)
def _register_is_complete_callback(self, source, callback):
if self._is_complete_msg_id is not None:
if self._is_complete_max_time < time.time():
# Second return while waiting for is_complete
return
else:
# request timed out
self._trigger_is_complete_callback()
self._is_complete_max_time = time.time() + self._is_complete_timeout
self._is_complete_callback = callback
self._is_complete_msg_id = self.kernel_client.is_complete(source)
def execute(self, source=None, hidden=False, interactive=False):
""" Executes source or the input buffer, possibly prompting for more
input.
Parameters
----------
source : str, optional
The source to execute. If not specified, the input buffer will be
used. If specified and 'hidden' is False, the input buffer will be
replaced with the source before execution.
hidden : bool, optional (default False)
If set, no output will be shown and the prompt will not be modified.
In other words, it will be completely invisible to the user that
an execution has occurred.
interactive : bool, optional (default False)
Whether the console is to treat the source as having been manually
entered by the user. The effect of this parameter depends on the
subclass implementation.
Raises
------
RuntimeError
If incomplete input is given and 'hidden' is True. In this case,
it is not possible to prompt for more input.
Returns
-------
A boolean indicating whether the source was executed.
"""
# WARNING: The order in which things happen here is very particular, in
# large part because our syntax highlighting is fragile. If you change
# something, test carefully!
# Decide what to execute.
if source is None:
source = self.input_buffer
elif not hidden:
self.input_buffer = source
if hidden:
self._execute(source, hidden)
# Execute the source or show a continuation prompt if it is incomplete.
elif interactive and self.execute_on_complete_input:
self._register_is_complete_callback(
source, partial(self.do_execute, source))
else:
self.do_execute(source, True, '')
def do_execute(self, source, complete, indent):
if complete:
self._append_plain_text('\n')
self._input_buffer_executing = self.input_buffer
self._executing = True
self._finalize_input_request()
# Perform actual execution.
self._execute(source, False)
else:
# Do this inside an edit block so continuation prompts are
# removed seamlessly via undo/redo.
cursor = self._get_end_cursor()
cursor.beginEditBlock()
try:
cursor.insertText('\n')
self._insert_continuation_prompt(cursor, indent)
finally:
cursor.endEditBlock()
# Do not do this inside the edit block. It works as expected
# when using a QPlainTextEdit control, but does not have an
# effect when using a QTextEdit. I believe this is a Qt bug.
self._control.moveCursor(QtGui.QTextCursor.End)
# Advance where text is inserted
self._insert_text_cursor.movePosition(QtGui.QTextCursor.End)
def export_html(self):
""" Shows a dialog to export HTML/XML in various formats.
"""
self._html_exporter.export()
def _finalize_input_request(self):
"""
Set the widget to a non-reading state.
"""
# Must set _reading to False before calling _prompt_finished
self._reading = False
self._prompt_finished()
# There is no prompt now, so before_prompt_position is eof
self._append_before_prompt_cursor.setPosition(
self._get_end_cursor().position())
self._insert_text_cursor.setPosition(
self._get_end_cursor().position())
# The maximum block count is only in effect during execution.
# This ensures that _prompt_pos does not become invalid due to
# text truncation.
self._control.document().setMaximumBlockCount(self.buffer_size)
# Setting a positive maximum block count will automatically
# disable the undo/redo history, but just to be safe:
self._control.setUndoRedoEnabled(False)
def _get_input_buffer(self, force=False):
""" The text that the user has entered entered at the current prompt.
If the console is currently executing, the text that is executing will
always be returned.
"""
# If we're executing, the input buffer may not even exist anymore due to
# the limit imposed by 'buffer_size'. Therefore, we store it.
if self._executing and not force:
return self._input_buffer_executing
cursor = self._get_end_cursor()
cursor.setPosition(self._prompt_pos, QtGui.QTextCursor.KeepAnchor)
input_buffer = cursor.selection().toPlainText()
# Strip out continuation prompts.
return input_buffer.replace('\n' + self._continuation_prompt, '\n')
def _set_input_buffer(self, string):
""" Sets the text in the input buffer.
If the console is currently executing, this call has no *immediate*
effect. When the execution is finished, the input buffer will be updated
appropriately.
"""
# If we're executing, store the text for later.
if self._executing:
self._input_buffer_pending = string
return
# Remove old text.
cursor = self._get_end_cursor()
cursor.beginEditBlock()
cursor.setPosition(self._prompt_pos, QtGui.QTextCursor.KeepAnchor)
cursor.removeSelectedText()
# Insert new text with continuation prompts.
self._insert_plain_text_into_buffer(self._get_prompt_cursor(), string)
cursor.endEditBlock()
self._control.moveCursor(QtGui.QTextCursor.End)
input_buffer = property(_get_input_buffer, _set_input_buffer)
def _get_font(self):
""" The base font being used by the ConsoleWidget.
"""
return self._control.document().defaultFont()
def _get_font_width(self, font=None):
if font is None:
font = self.font
font_metrics = QtGui.QFontMetrics(font)
if hasattr(font_metrics, 'horizontalAdvance'):
return font_metrics.horizontalAdvance(' ')
else:
return font_metrics.width(' ')
def _set_font(self, font):
""" Sets the base font for the ConsoleWidget to the specified QFont.
"""
self._control.setTabStopWidth(
self.tab_width * self._get_font_width(font)
)
self._completion_widget.setFont(font)
self._control.document().setDefaultFont(font)
if self._page_control:
self._page_control.document().setDefaultFont(font)
self.font_changed.emit(font)
font = property(_get_font, _set_font)
def _set_completion_widget(self, gui_completion):
""" Set gui completion widget.
"""
if gui_completion == 'ncurses':
self._completion_widget = CompletionHtml(self)
elif gui_completion == 'droplist':
self._completion_widget = CompletionWidget(self)
elif gui_completion == 'plain':
self._completion_widget = CompletionPlain(self)
self.gui_completion = gui_completion
def open_anchor(self, anchor):
""" Open selected anchor in the default webbrowser
"""
webbrowser.open( anchor )
def paste(self, mode=QtGui.QClipboard.Clipboard):
""" Paste the contents of the clipboard into the input region.
Parameters
----------
mode : QClipboard::Mode, optional [default QClipboard::Clipboard]
Controls which part of the system clipboard is used. This can be
used to access the selection clipboard in X11 and the Find buffer
in Mac OS. By default, the regular clipboard is used.
"""
if self._control.textInteractionFlags() & QtCore.Qt.TextEditable:
# Make sure the paste is safe.
self._keep_cursor_in_buffer()
cursor = self._control.textCursor()
# Remove any trailing newline, which confuses the GUI and forces the
# user to backspace.
text = QtWidgets.QApplication.clipboard().text(mode).rstrip()
# dedent removes "common leading whitespace" but to preserve relative
# indent of multiline code, we have to compensate for any
# leading space on the first line, if we're pasting into
# an indented position.
cursor_offset = cursor.position() - self._get_line_start_pos()
if text.startswith(' ' * cursor_offset):
text = text[cursor_offset:]
self._insert_plain_text_into_buffer(cursor, dedent(text))
def print_(self, printer=None):
""" Print the contents of the ConsoleWidget to the specified QPrinter.
"""
if not printer:
printer = QtPrintSupport.QPrinter()
if QtPrintSupport.QPrintDialog(printer).exec_() != QtPrintSupport.QPrintDialog.Accepted:
return
self._control.print_(printer)
def prompt_to_top(self):
""" Moves the prompt to the top of the viewport.
"""
if not self._executing:
prompt_cursor = self._get_prompt_cursor()
if self._get_cursor().blockNumber() < prompt_cursor.blockNumber():
self._set_cursor(prompt_cursor)
self._set_top_cursor(prompt_cursor)
def redo(self):
""" Redo the last operation. If there is no operation to redo, nothing
happens.
"""
self._control.redo()
def reset_font(self):
""" Sets the font to the default fixed-width font for this platform.
"""
if sys.platform == 'win32':
# Consolas ships with Vista/Win7, fallback to Courier if needed
fallback = 'Courier'
elif sys.platform == 'darwin':
# OSX always has Monaco
fallback = 'Monaco'
else:
# Monospace should always exist
fallback = 'Monospace'
font = get_font(self.font_family, fallback)
if self.font_size:
font.setPointSize(self.font_size)
else:
font.setPointSize(QtWidgets.QApplication.instance().font().pointSize())
font.setStyleHint(QtGui.QFont.TypeWriter)
self._set_font(font)
def change_font_size(self, delta):
"""Change the font size by the specified amount (in points).
"""
font = self.font
size = max(font.pointSize() + delta, 1) # minimum 1 point
font.setPointSize(size)
self._set_font(font)
def _increase_font_size(self):
self.change_font_size(1)
def _decrease_font_size(self):
self.change_font_size(-1)
def select_all_smart(self):
""" Select current cell, or, if already selected, the whole document.
"""
c = self._get_cursor()
sel_range = c.selectionStart(), c.selectionEnd()
c.clearSelection()
c.setPosition(self._get_prompt_cursor().position())
c.setPosition(self._get_end_pos(),
mode=QtGui.QTextCursor.KeepAnchor)
new_sel_range = c.selectionStart(), c.selectionEnd()
if sel_range == new_sel_range:
# cell already selected, expand selection to whole document
self.select_document()
else:
# set cell selection as active selection
self._control.setTextCursor(c)
def select_document(self):
""" Selects all the text in the buffer.
"""
self._control.selectAll()
def _get_tab_width(self):
""" The width (in terms of space characters) for tab characters.
"""
return self._tab_width
def _set_tab_width(self, tab_width):
""" Sets the width (in terms of space characters) for tab characters.
"""
self._control.setTabStopWidth(tab_width * self._get_font_width())
self._tab_width = tab_width
tab_width = property(_get_tab_width, _set_tab_width)
def undo(self):
""" Undo the last operation. If there is no operation to undo, nothing
happens.
"""
self._control.undo()
#---------------------------------------------------------------------------
# 'ConsoleWidget' abstract interface
#---------------------------------------------------------------------------
def _is_complete(self, source, interactive):
""" Returns whether 'source' can be executed. When triggered by an
Enter/Return key press, 'interactive' is True; otherwise, it is
False.
"""
raise NotImplementedError
def _execute(self, source, hidden):
""" Execute 'source'. If 'hidden', do not show any output.
"""
raise NotImplementedError
def _prompt_started_hook(self):
""" Called immediately after a new prompt is displayed.
"""
pass
def _prompt_finished_hook(self):
""" Called immediately after a prompt is finished, i.e. when some input
will be processed and a new prompt displayed.
"""
pass
def _up_pressed(self, shift_modifier):
""" Called when the up key is pressed. Returns whether to continue
processing the event.
"""
return True
def _down_pressed(self, shift_modifier):
""" Called when the down key is pressed. Returns whether to continue
processing the event.
"""
return True
def _tab_pressed(self):
""" Called when the tab key is pressed. Returns whether to continue
processing the event.
"""
return True
#--------------------------------------------------------------------------
# 'ConsoleWidget' protected interface
#--------------------------------------------------------------------------
def _append_custom(self, insert, input, before_prompt=False, *args, **kwargs):
""" A low-level method for appending content to the end of the buffer.
If 'before_prompt' is enabled, the content will be inserted before the
current prompt, if there is one.
"""
# Determine where to insert the content.
cursor = self._insert_text_cursor
if before_prompt and (self._reading or not self._executing):
self._flush_pending_stream()
# Jump to before prompt, if there is one
if cursor.position() >= self._append_before_prompt_pos \
and self._append_before_prompt_pos != self._get_end_pos():
cursor.setPosition(self._append_before_prompt_pos)
# If we're appending on the same line as the prompt, use insert mode.
# If so, the character at self._append_before_prompt_pos will not be a newline
cursor.movePosition(QtGui.QTextCursor.Right,
QtGui.QTextCursor.KeepAnchor)
if cursor.selection().toPlainText() != '\n':
cursor._insert_mode = True
cursor.movePosition(QtGui.QTextCursor.Left)
else:
# Insert at current printing point.
# If cursor is before prompt jump to end, but only if there
# is a prompt (before_prompt_pos != end)
if cursor.position() <= self._append_before_prompt_pos \
and self._append_before_prompt_pos != self._get_end_pos():
cursor.movePosition(QtGui.QTextCursor.End)
if insert != self._insert_plain_text:
self._flush_pending_stream()
# Perform the insertion.
result = insert(cursor, input, *args, **kwargs)
# Remove insert mode tag
if hasattr(cursor, '_insert_mode'):
del cursor._insert_mode
return result
def _append_block(self, block_format=None, before_prompt=False):
""" Appends an new QTextBlock to the end of the console buffer.
"""
self._append_custom(self._insert_block, block_format, before_prompt)
def _append_html(self, html, before_prompt=False):
""" Appends HTML at the end of the console buffer.
"""
self._append_custom(self._insert_html, html, before_prompt)
def _append_html_fetching_plain_text(self, html, before_prompt=False):
""" Appends HTML, then returns the plain text version of it.
"""
return self._append_custom(self._insert_html_fetching_plain_text,
html, before_prompt)
def _append_plain_text(self, text, before_prompt=False):
""" Appends plain text, processing ANSI codes if enabled.
"""
self._append_custom(self._insert_plain_text, text, before_prompt)
def _cancel_completion(self):
""" If text completion is progress, cancel it.
"""
self._completion_widget.cancel_completion()
def _clear_temporary_buffer(self):
""" Clears the "temporary text" buffer, i.e. all the text following
the prompt region.
"""
# Select and remove all text below the input buffer.
cursor = self._get_prompt_cursor()
prompt = self._continuation_prompt.lstrip()
if self._temp_buffer_filled:
self._temp_buffer_filled = False
while cursor.movePosition(QtGui.QTextCursor.NextBlock):
temp_cursor = QtGui.QTextCursor(cursor)
temp_cursor.select(QtGui.QTextCursor.BlockUnderCursor)
text = temp_cursor.selection().toPlainText().lstrip()
if not text.startswith(prompt):
break
else:
# We've reached the end of the input buffer and no text follows.
return
cursor.movePosition(QtGui.QTextCursor.Left) # Grab the newline.
cursor.movePosition(QtGui.QTextCursor.End,
QtGui.QTextCursor.KeepAnchor)
cursor.removeSelectedText()
# After doing this, we have no choice but to clear the undo/redo
# history. Otherwise, the text is not "temporary" at all, because it
# can be recalled with undo/redo. Unfortunately, Qt does not expose
# fine-grained control to the undo/redo system.
if self._control.isUndoRedoEnabled():
self._control.setUndoRedoEnabled(False)
self._control.setUndoRedoEnabled(True)
def _complete_with_items(self, cursor, items):
""" Performs completion with 'items' at the specified cursor location.
"""
self._cancel_completion()
if len(items) == 1:
cursor.setPosition(self._control.textCursor().position(),
QtGui.QTextCursor.KeepAnchor)
cursor.insertText(items[0])
elif len(items) > 1:
current_pos = self._control.textCursor().position()
prefix = os.path.commonprefix(items)
if prefix:
cursor.setPosition(current_pos, QtGui.QTextCursor.KeepAnchor)
cursor.insertText(prefix)
current_pos = cursor.position()
self._completion_widget.show_items(cursor, items,
prefix_length=len(prefix))
def _fill_temporary_buffer(self, cursor, text, html=False):
"""fill the area below the active editting zone with text"""
current_pos = self._control.textCursor().position()
cursor.beginEditBlock()
self._append_plain_text('\n')
self._page(text, html=html)
cursor.endEditBlock()
cursor.setPosition(current_pos)
self._control.moveCursor(QtGui.QTextCursor.End)
self._control.setTextCursor(cursor)
self._temp_buffer_filled = True
def _context_menu_make(self, pos):
""" Creates a context menu for the given QPoint (in widget coordinates).
"""
menu = QtWidgets.QMenu(self)
self.cut_action = menu.addAction('Cut', self.cut)
self.cut_action.setEnabled(self.can_cut())
self.cut_action.setShortcut(QtGui.QKeySequence.Cut)
self.copy_action = menu.addAction('Copy', self.copy)
self.copy_action.setEnabled(self.can_copy())
self.copy_action.setShortcut(QtGui.QKeySequence.Copy)
self.paste_action = menu.addAction('Paste', self.paste)
self.paste_action.setEnabled(self.can_paste())
self.paste_action.setShortcut(QtGui.QKeySequence.Paste)
anchor = self._control.anchorAt(pos)
if anchor:
menu.addSeparator()
self.copy_link_action = menu.addAction(
'Copy Link Address', lambda: self.copy_anchor(anchor=anchor))
self.open_link_action = menu.addAction(
'Open Link', lambda: self.open_anchor(anchor=anchor))
menu.addSeparator()
menu.addAction(self.select_all_action)
menu.addSeparator()
menu.addAction(self.export_action)
menu.addAction(self.print_action)
return menu
def _control_key_down(self, modifiers, include_command=False):
""" Given a KeyboardModifiers flags object, return whether the Control
key is down.
Parameters
----------
include_command : bool, optional (default True)
Whether to treat the Command key as a (mutually exclusive) synonym
for Control when in Mac OS.
"""
# Note that on Mac OS, ControlModifier corresponds to the Command key
# while MetaModifier corresponds to the Control key.
if sys.platform == 'darwin':
down = include_command and (modifiers & QtCore.Qt.ControlModifier)
return bool(down) ^ bool(modifiers & QtCore.Qt.MetaModifier)
else:
return bool(modifiers & QtCore.Qt.ControlModifier)
def _create_control(self):
""" Creates and connects the underlying text widget.
"""
# Create the underlying control.
if self.custom_control:
control = self.custom_control()
elif self.kind == 'plain':
control = QtWidgets.QPlainTextEdit()
elif self.kind == 'rich':
control = QtWidgets.QTextEdit()
control.setAcceptRichText(False)
control.setMouseTracking(True)
# Prevent the widget from handling drops, as we already provide
# the logic in this class.
control.setAcceptDrops(False)
# Install event filters. The filter on the viewport is needed for
# mouse events.
control.installEventFilter(self)
control.viewport().installEventFilter(self)
# Connect signals.
control.customContextMenuRequested.connect(
self._custom_context_menu_requested)
control.copyAvailable.connect(self.copy_available)
control.redoAvailable.connect(self.redo_available)
control.undoAvailable.connect(self.undo_available)
# Hijack the document size change signal to prevent Qt from adjusting
# the viewport's scrollbar. We are relying on an implementation detail
# of Q(Plain)TextEdit here, which is potentially dangerous, but without
# this functionality we cannot create a nice terminal interface.
layout = control.document().documentLayout()
layout.documentSizeChanged.disconnect()
layout.documentSizeChanged.connect(self._adjust_scrollbars)
# Configure the scrollbar policy
if self.scrollbar_visibility:
scrollbar_policy = QtCore.Qt.ScrollBarAlwaysOn
else :
scrollbar_policy = QtCore.Qt.ScrollBarAlwaysOff
# Configure the control.
control.setAttribute(QtCore.Qt.WA_InputMethodEnabled, True)
control.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
control.setReadOnly(True)
control.setUndoRedoEnabled(False)
control.setVerticalScrollBarPolicy(scrollbar_policy)
return control
def _create_page_control(self):
""" Creates and connects the underlying paging widget.
"""
if self.custom_page_control:
control = self.custom_page_control()
elif self.kind == 'plain':
control = QtWidgets.QPlainTextEdit()
elif self.kind == 'rich':
control = QtWidgets.QTextEdit()
control.installEventFilter(self)
viewport = control.viewport()
viewport.installEventFilter(self)
control.setReadOnly(True)
control.setUndoRedoEnabled(False)
# Configure the scrollbar policy
if self.scrollbar_visibility:
scrollbar_policy = QtCore.Qt.ScrollBarAlwaysOn
else :
scrollbar_policy = QtCore.Qt.ScrollBarAlwaysOff
control.setVerticalScrollBarPolicy(scrollbar_policy)
return control
def _event_filter_console_keypress(self, event):
""" Filter key events for the underlying text widget to create a
console-like interface.
"""
intercepted = False
cursor = self._control.textCursor()
position = cursor.position()
key = event.key()
ctrl_down = self._control_key_down(event.modifiers())
alt_down = event.modifiers() & QtCore.Qt.AltModifier
shift_down = event.modifiers() & QtCore.Qt.ShiftModifier
cmd_down = (
sys.platform == "darwin" and
self._control_key_down(event.modifiers(), include_command=True)
)
if cmd_down:
if key == QtCore.Qt.Key_Left:
key = QtCore.Qt.Key_Home
elif key == QtCore.Qt.Key_Right:
key = QtCore.Qt.Key_End
elif key == QtCore.Qt.Key_Up:
ctrl_down = True
key = QtCore.Qt.Key_Home
elif key == QtCore.Qt.Key_Down:
ctrl_down = True
key = QtCore.Qt.Key_End
#------ Special modifier logic -----------------------------------------
if key in (QtCore.Qt.Key_Return, QtCore.Qt.Key_Enter):
intercepted = True
# Special handling when tab completing in text mode.
self._cancel_completion()
if self._in_buffer(position):
# Special handling when a reading a line of raw input.
if self._reading:
self._append_plain_text('\n')
self._reading = False
if self._reading_callback:
self._reading_callback()
# If the input buffer is a single line or there is only
# whitespace after the cursor, execute. Otherwise, split the
# line with a continuation prompt.
elif not self._executing:
cursor.movePosition(QtGui.QTextCursor.End,
QtGui.QTextCursor.KeepAnchor)
at_end = len(cursor.selectedText().strip()) == 0
single_line = (self._get_end_cursor().blockNumber() ==
self._get_prompt_cursor().blockNumber())
if (at_end or shift_down or single_line) and not ctrl_down:
self.execute(interactive = not shift_down)
else:
# Do this inside an edit block for clean undo/redo.
pos = self._get_input_buffer_cursor_pos()
def callback(complete, indent):
try:
cursor.beginEditBlock()
cursor.setPosition(position)
cursor.insertText('\n')
self._insert_continuation_prompt(cursor)
if indent:
cursor.insertText(indent)
finally:
cursor.endEditBlock()
# Ensure that the whole input buffer is visible.
# FIXME: This will not be usable if the input buffer is
# taller than the console widget.
self._control.moveCursor(QtGui.QTextCursor.End)
self._control.setTextCursor(cursor)
self._register_is_complete_callback(
self._get_input_buffer()[:pos], callback)
#------ Control/Cmd modifier -------------------------------------------
elif ctrl_down:
if key == QtCore.Qt.Key_G:
self._keyboard_quit()
intercepted = True
elif key == QtCore.Qt.Key_K:
if self._in_buffer(position):
cursor.clearSelection()
cursor.movePosition(QtGui.QTextCursor.EndOfLine,
QtGui.QTextCursor.KeepAnchor)
if not cursor.hasSelection():
# Line deletion (remove continuation prompt)
cursor.movePosition(QtGui.QTextCursor.NextBlock,
QtGui.QTextCursor.KeepAnchor)
cursor.movePosition(QtGui.QTextCursor.Right,
QtGui.QTextCursor.KeepAnchor,
len(self._continuation_prompt))
self._kill_ring.kill_cursor(cursor)
self._set_cursor(cursor)
intercepted = True
elif key == QtCore.Qt.Key_L:
self.prompt_to_top()
intercepted = True
elif key == QtCore.Qt.Key_O:
if self._page_control and self._page_control.isVisible():
self._page_control.setFocus()
intercepted = True
elif key == QtCore.Qt.Key_U:
if self._in_buffer(position):
cursor.clearSelection()
start_line = cursor.blockNumber()
if start_line == self._get_prompt_cursor().blockNumber():
offset = len(self._prompt)
else:
offset = len(self._continuation_prompt)
cursor.movePosition(QtGui.QTextCursor.StartOfBlock,
QtGui.QTextCursor.KeepAnchor)
cursor.movePosition(QtGui.QTextCursor.Right,
QtGui.QTextCursor.KeepAnchor, offset)
self._kill_ring.kill_cursor(cursor)
self._set_cursor(cursor)
intercepted = True
elif key == QtCore.Qt.Key_Y:
self._keep_cursor_in_buffer()
self._kill_ring.yank()
intercepted = True
elif key in (QtCore.Qt.Key_Backspace, QtCore.Qt.Key_Delete):
if key == QtCore.Qt.Key_Backspace:
cursor = self._get_word_start_cursor(position)
else: # key == QtCore.Qt.Key_Delete
cursor = self._get_word_end_cursor(position)
cursor.setPosition(position, QtGui.QTextCursor.KeepAnchor)
self._kill_ring.kill_cursor(cursor)
intercepted = True
elif key == QtCore.Qt.Key_D:
if len(self.input_buffer) == 0 and not self._executing:
self.exit_requested.emit(self)
# if executing and input buffer empty
elif len(self._get_input_buffer(force=True)) == 0:
# input a EOT ansi control character
self._control.textCursor().insertText(chr(4))
new_event = QtGui.QKeyEvent(QtCore.QEvent.KeyPress,
QtCore.Qt.Key_Return,
QtCore.Qt.NoModifier)
QtWidgets.QApplication.instance().sendEvent(self._control, new_event)
intercepted = True
else:
new_event = QtGui.QKeyEvent(QtCore.QEvent.KeyPress,
QtCore.Qt.Key_Delete,
QtCore.Qt.NoModifier)
QtWidgets.QApplication.instance().sendEvent(self._control, new_event)
intercepted = True
elif key == QtCore.Qt.Key_Down:
self._scroll_to_end()
elif key == QtCore.Qt.Key_Up:
self._control.verticalScrollBar().setValue(0)
#------ Alt modifier ---------------------------------------------------
elif alt_down:
if key == QtCore.Qt.Key_B:
self._set_cursor(self._get_word_start_cursor(position))
intercepted = True
elif key == QtCore.Qt.Key_F:
self._set_cursor(self._get_word_end_cursor(position))
intercepted = True
elif key == QtCore.Qt.Key_Y:
self._kill_ring.rotate()
intercepted = True
elif key == QtCore.Qt.Key_Backspace:
cursor = self._get_word_start_cursor(position)
cursor.setPosition(position, QtGui.QTextCursor.KeepAnchor)
self._kill_ring.kill_cursor(cursor)
intercepted = True
elif key == QtCore.Qt.Key_D:
cursor = self._get_word_end_cursor(position)
cursor.setPosition(position, QtGui.QTextCursor.KeepAnchor)
self._kill_ring.kill_cursor(cursor)
intercepted = True
elif key == QtCore.Qt.Key_Delete:
intercepted = True
elif key == QtCore.Qt.Key_Greater:
self._control.moveCursor(QtGui.QTextCursor.End)
intercepted = True
elif key == QtCore.Qt.Key_Less:
self._control.setTextCursor(self._get_prompt_cursor())
intercepted = True
#------ No modifiers ---------------------------------------------------
else:
self._trigger_is_complete_callback()
if shift_down:
anchormode = QtGui.QTextCursor.KeepAnchor
else:
anchormode = QtGui.QTextCursor.MoveAnchor
if key == QtCore.Qt.Key_Escape:
self._keyboard_quit()
intercepted = True
elif key == QtCore.Qt.Key_Up and not shift_down:
if self._reading or not self._up_pressed(shift_down):
intercepted = True
else:
prompt_line = self._get_prompt_cursor().blockNumber()
intercepted = cursor.blockNumber() <= prompt_line
elif key == QtCore.Qt.Key_Down and not shift_down:
if self._reading or not self._down_pressed(shift_down):
intercepted = True
else:
end_line = self._get_end_cursor().blockNumber()
intercepted = cursor.blockNumber() == end_line
elif key == QtCore.Qt.Key_Tab:
if not self._reading:
if self._tab_pressed():
self._indent(dedent=False)
intercepted = True
elif key == QtCore.Qt.Key_Backtab:
self._indent(dedent=True)
intercepted = True
elif key == QtCore.Qt.Key_Left and not shift_down:
# Move to the previous line
line, col = cursor.blockNumber(), cursor.columnNumber()
if line > self._get_prompt_cursor().blockNumber() and \
col == len(self._continuation_prompt):
self._control.moveCursor(QtGui.QTextCursor.PreviousBlock,
mode=anchormode)
self._control.moveCursor(QtGui.QTextCursor.EndOfBlock,
mode=anchormode)
intercepted = True
# Regular left movement
else:
intercepted = not self._in_buffer(position - 1)
elif key == QtCore.Qt.Key_Right and not shift_down:
#original_block_number = cursor.blockNumber()
if position == self._get_line_end_pos():
cursor.movePosition(QtGui.QTextCursor.NextBlock, mode=anchormode)
cursor.movePosition(QtGui.QTextCursor.Right,
mode=anchormode,
n=len(self._continuation_prompt))
self._control.setTextCursor(cursor)
else:
self._control.moveCursor(QtGui.QTextCursor.Right,
mode=anchormode)
intercepted = True
elif key == QtCore.Qt.Key_Home:
start_pos = self._get_line_start_pos()
c = self._get_cursor()
spaces = self._get_leading_spaces()
if (c.position() > start_pos + spaces or
c.columnNumber() == len(self._continuation_prompt)):
start_pos += spaces # Beginning of text
if shift_down and self._in_buffer(position):
if c.selectedText():
sel_max = max(c.selectionStart(), c.selectionEnd())
cursor.setPosition(sel_max,
QtGui.QTextCursor.MoveAnchor)
cursor.setPosition(start_pos, QtGui.QTextCursor.KeepAnchor)
else:
cursor.setPosition(start_pos)
self._set_cursor(cursor)
intercepted = True
elif key == QtCore.Qt.Key_Backspace:
# Line deletion (remove continuation prompt)
line, col = cursor.blockNumber(), cursor.columnNumber()
if not self._reading and \
col == len(self._continuation_prompt) and \
line > self._get_prompt_cursor().blockNumber():
cursor.beginEditBlock()
cursor.movePosition(QtGui.QTextCursor.StartOfBlock,
QtGui.QTextCursor.KeepAnchor)
cursor.removeSelectedText()
cursor.deletePreviousChar()
cursor.endEditBlock()
intercepted = True
# Regular backwards deletion
else:
anchor = cursor.anchor()
if anchor == position:
intercepted = not self._in_buffer(position - 1)
else:
intercepted = not self._in_buffer(min(anchor, position))
elif key == QtCore.Qt.Key_Delete:
# Line deletion (remove continuation prompt)
if not self._reading and self._in_buffer(position) and \
cursor.atBlockEnd() and not cursor.hasSelection():
cursor.movePosition(QtGui.QTextCursor.NextBlock,
QtGui.QTextCursor.KeepAnchor)
cursor.movePosition(QtGui.QTextCursor.Right,
QtGui.QTextCursor.KeepAnchor,
len(self._continuation_prompt))
cursor.removeSelectedText()
intercepted = True
# Regular forwards deletion:
else:
anchor = cursor.anchor()
intercepted = (not self._in_buffer(anchor) or
not self._in_buffer(position))
#------ Special sequences ----------------------------------------------
if not intercepted:
if event.matches(QtGui.QKeySequence.Copy):
self.copy()
intercepted = True
elif event.matches(QtGui.QKeySequence.Cut):
self.cut()
intercepted = True
elif event.matches(QtGui.QKeySequence.Paste):
self.paste()
intercepted = True
# Don't move the cursor if Control/Cmd is pressed to allow copy-paste
# using the keyboard in any part of the buffer. Also, permit scrolling
# with Page Up/Down keys. Finally, if we're executing, don't move the
# cursor (if even this made sense, we can't guarantee that the prompt
# position is still valid due to text truncation).
if not (self._control_key_down(event.modifiers(), include_command=True)
or key in (QtCore.Qt.Key_PageUp, QtCore.Qt.Key_PageDown)
or (self._executing and not self._reading)
or (event.text() == "" and not
(not shift_down and key in (QtCore.Qt.Key_Up, QtCore.Qt.Key_Down)))):
self._keep_cursor_in_buffer()
return intercepted
def _event_filter_page_keypress(self, event):
""" Filter key events for the paging widget to create console-like
interface.
"""
key = event.key()
ctrl_down = self._control_key_down(event.modifiers())
alt_down = event.modifiers() & QtCore.Qt.AltModifier
if ctrl_down:
if key == QtCore.Qt.Key_O:
self._control.setFocus()
return True
elif alt_down:
if key == QtCore.Qt.Key_Greater:
self._page_control.moveCursor(QtGui.QTextCursor.End)
return True
elif key == QtCore.Qt.Key_Less:
self._page_control.moveCursor(QtGui.QTextCursor.Start)
return True
elif key in (QtCore.Qt.Key_Q, QtCore.Qt.Key_Escape):
if self._splitter:
self._page_control.hide()
self._control.setFocus()
else:
self.layout().setCurrentWidget(self._control)
# re-enable buffer truncation after paging
self._control.document().setMaximumBlockCount(self.buffer_size)
return True
elif key in (QtCore.Qt.Key_Enter, QtCore.Qt.Key_Return,
QtCore.Qt.Key_Tab):
new_event = QtGui.QKeyEvent(QtCore.QEvent.KeyPress,
QtCore.Qt.Key_PageDown,
QtCore.Qt.NoModifier)
QtWidgets.QApplication.instance().sendEvent(self._page_control, new_event)
return True
elif key == QtCore.Qt.Key_Backspace:
new_event = QtGui.QKeyEvent(QtCore.QEvent.KeyPress,
QtCore.Qt.Key_PageUp,
QtCore.Qt.NoModifier)
QtWidgets.QApplication.instance().sendEvent(self._page_control, new_event)
return True
# vi/less -like key bindings
elif key == QtCore.Qt.Key_J:
new_event = QtGui.QKeyEvent(QtCore.QEvent.KeyPress,
QtCore.Qt.Key_Down,
QtCore.Qt.NoModifier)
QtWidgets.QApplication.instance().sendEvent(self._page_control, new_event)
return True
# vi/less -like key bindings
elif key == QtCore.Qt.Key_K:
new_event = QtGui.QKeyEvent(QtCore.QEvent.KeyPress,
QtCore.Qt.Key_Up,
QtCore.Qt.NoModifier)
QtWidgets.QApplication.instance().sendEvent(self._page_control, new_event)
return True
return False
def _on_flush_pending_stream_timer(self):
""" Flush pending text into the widget on console timer trigger.
"""
self._flush_pending_stream()
def _flush_pending_stream(self):
"""
Flush pending text into the widget.
It only applies to text that is pending when the console is in the
running state. Text printed when console is not running is shown
immediately, and does not wait to be flushed.
"""
text = self._pending_insert_text
self._pending_insert_text = []
buffer_size = self._control.document().maximumBlockCount()
if buffer_size > 0:
text = self._get_last_lines_from_list(text, buffer_size)
text = ''.join(text)
t = time.time()
self._insert_plain_text(self._insert_text_cursor, text, flush=True)
# Set the flush interval to equal the maximum time to update text.
self._pending_text_flush_interval.setInterval(
int(max(100, (time.time() - t) * 1000))
)
def _get_cursor(self):
""" Get a cursor at the current insert position.
"""
return self._control.textCursor()
def _get_end_cursor(self):
""" Get a cursor at the last character of the current cell.
"""
cursor = self._control.textCursor()
cursor.movePosition(QtGui.QTextCursor.End)
return cursor
def _get_end_pos(self):
""" Get the position of the last character of the current cell.
"""
return self._get_end_cursor().position()
def _get_line_start_cursor(self):
""" Get a cursor at the first character of the current line.
"""
cursor = self._control.textCursor()
start_line = cursor.blockNumber()
if start_line == self._get_prompt_cursor().blockNumber():
cursor.setPosition(self._prompt_pos)
else:
cursor.movePosition(QtGui.QTextCursor.StartOfLine)
cursor.setPosition(cursor.position() +
len(self._continuation_prompt))
return cursor
def _get_line_start_pos(self):
""" Get the position of the first character of the current line.
"""
return self._get_line_start_cursor().position()
def _get_line_end_cursor(self):
""" Get a cursor at the last character of the current line.
"""
cursor = self._control.textCursor()
cursor.movePosition(QtGui.QTextCursor.EndOfLine)
return cursor
def _get_line_end_pos(self):
""" Get the position of the last character of the current line.
"""
return self._get_line_end_cursor().position()
def _get_input_buffer_cursor_column(self):
""" Get the column of the cursor in the input buffer, excluding the
contribution by the prompt, or -1 if there is no such column.
"""
prompt = self._get_input_buffer_cursor_prompt()
if prompt is None:
return -1
else:
cursor = self._control.textCursor()
return cursor.columnNumber() - len(prompt)
def _get_input_buffer_cursor_line(self):
""" Get the text of the line of the input buffer that contains the
cursor, or None if there is no such line.
"""
prompt = self._get_input_buffer_cursor_prompt()
if prompt is None:
return None
else:
cursor = self._control.textCursor()
text = cursor.block().text()
return text[len(prompt):]
def _get_input_buffer_cursor_pos(self):
"""Get the cursor position within the input buffer."""
cursor = self._control.textCursor()
cursor.setPosition(self._prompt_pos, QtGui.QTextCursor.KeepAnchor)
input_buffer = cursor.selection().toPlainText()
# Don't count continuation prompts
return len(input_buffer.replace('\n' + self._continuation_prompt, '\n'))
def _get_input_buffer_cursor_prompt(self):
""" Returns the (plain text) prompt for line of the input buffer that
contains the cursor, or None if there is no such line.
"""
if self._executing:
return None
cursor = self._control.textCursor()
if cursor.position() >= self._prompt_pos:
if cursor.blockNumber() == self._get_prompt_cursor().blockNumber():
return self._prompt
else:
return self._continuation_prompt
else:
return None
def _get_last_lines(self, text, num_lines, return_count=False):
""" Get the last specified number of lines of text (like `tail -n`).
If return_count is True, returns a tuple of clipped text and the
number of lines in the clipped text.
"""
pos = len(text)
if pos < num_lines:
if return_count:
return text, text.count('\n') if return_count else text
else:
return text
i = 0
while i < num_lines:
pos = text.rfind('\n', None, pos)
if pos == -1:
pos = None
break
i += 1
if return_count:
return text[pos:], i
else:
return text[pos:]
def _get_last_lines_from_list(self, text_list, num_lines):
""" Get the list of text clipped to last specified lines.
"""
ret = []
lines_pending = num_lines
for text in reversed(text_list):
text, lines_added = self._get_last_lines(text, lines_pending,
return_count=True)
ret.append(text)
lines_pending -= lines_added
if lines_pending <= 0:
break
return ret[::-1]
def _get_leading_spaces(self):
""" Get the number of leading spaces of the current line.
"""
cursor = self._get_cursor()
start_line = cursor.blockNumber()
if start_line == self._get_prompt_cursor().blockNumber():
# first line
offset = len(self._prompt)
else:
# continuation
offset = len(self._continuation_prompt)
cursor.select(QtGui.QTextCursor.LineUnderCursor)
text = cursor.selectedText()[offset:]
return len(text) - len(text.lstrip())
@property
def _prompt_pos(self):
""" Find the position in the text right after the prompt.
"""
return min(self._prompt_cursor.position() + 1, self._get_end_pos())
@property
def _append_before_prompt_pos(self):
""" Find the position in the text right before the prompt.
"""
return min(self._append_before_prompt_cursor.position(),
self._get_end_pos())
def _get_prompt_cursor(self):
""" Get a cursor at the prompt position of the current cell.
"""
cursor = self._control.textCursor()
cursor.setPosition(self._prompt_pos)
return cursor
def _get_selection_cursor(self, start, end):
""" Get a cursor with text selected between the positions 'start' and
'end'.
"""
cursor = self._control.textCursor()
cursor.setPosition(start)
cursor.setPosition(end, QtGui.QTextCursor.KeepAnchor)
return cursor
def _get_word_start_cursor(self, position):
""" Find the start of the word to the left the given position. If a
sequence of non-word characters precedes the first word, skip over
them. (This emulates the behavior of bash, emacs, etc.)
"""
document = self._control.document()
cursor = self._control.textCursor()
line_start_pos = self._get_line_start_pos()
if position == self._prompt_pos:
return cursor
elif position == line_start_pos:
# Cursor is at the beginning of a line, move to the last
# non-whitespace character of the previous line
cursor = self._control.textCursor()
cursor.setPosition(position)
cursor.movePosition(QtGui.QTextCursor.PreviousBlock)
cursor.movePosition(QtGui.QTextCursor.EndOfBlock)
position = cursor.position()
while (
position >= self._prompt_pos and
is_whitespace(document.characterAt(position))
):
position -= 1
cursor.setPosition(position + 1)
else:
position -= 1
# Find the last alphanumeric char, but don't move across lines
while (
position >= self._prompt_pos and
position >= line_start_pos and
not is_letter_or_number(document.characterAt(position))
):
position -= 1
# Find the first alphanumeric char, but don't move across lines
while (
position >= self._prompt_pos and
position >= line_start_pos and
is_letter_or_number(document.characterAt(position))
):
position -= 1
cursor.setPosition(position + 1)
return cursor
def _get_word_end_cursor(self, position):
""" Find the end of the word to the right the given position. If a
sequence of non-word characters precedes the first word, skip over
them. (This emulates the behavior of bash, emacs, etc.)
"""
document = self._control.document()
cursor = self._control.textCursor()
end_pos = self._get_end_pos()
line_end_pos = self._get_line_end_pos()
if position == end_pos:
# Cursor is at the very end of the buffer
return cursor
elif position == line_end_pos:
# Cursor is at the end of a line, move to the first
# non-whitespace character of the next line
cursor = self._control.textCursor()
cursor.setPosition(position)
cursor.movePosition(QtGui.QTextCursor.NextBlock)
position = cursor.position() + len(self._continuation_prompt)
while (
position < end_pos and
is_whitespace(document.characterAt(position))
):
position += 1
cursor.setPosition(position)
else:
if is_whitespace(document.characterAt(position)):
# The next character is whitespace. If this is part of
# indentation whitespace, skip to the first non-whitespace
# character.
is_indentation_whitespace = True
back_pos = position - 1
line_start_pos = self._get_line_start_pos()
while back_pos >= line_start_pos:
if not is_whitespace(document.characterAt(back_pos)):
is_indentation_whitespace = False
break
back_pos -= 1
if is_indentation_whitespace:
# Skip to the first non-whitespace character
while (
position < end_pos and
position < line_end_pos and
is_whitespace(document.characterAt(position))
):
position += 1
cursor.setPosition(position)
return cursor
while (
position < end_pos and
position < line_end_pos and
not is_letter_or_number(document.characterAt(position))
):
position += 1
while (
position < end_pos and
position < line_end_pos and
is_letter_or_number(document.characterAt(position))
):
position += 1
cursor.setPosition(position)
return cursor
def _indent(self, dedent=True):
""" Indent/Dedent current line or current text selection.
"""
num_newlines = self._get_cursor().selectedText().count("\u2029")
save_cur = self._get_cursor()
cur = self._get_cursor()
# move to first line of selection, if present
cur.setPosition(cur.selectionStart())
self._control.setTextCursor(cur)
spaces = self._get_leading_spaces()
# calculate number of spaces neded to align/indent to 4-space multiple
step = self._tab_width - (spaces % self._tab_width)
# insertText shouldn't replace if selection is active
cur.clearSelection()
# indent all lines in selection (ir just current) by `step`
for _ in range(num_newlines+1):
# update underlying cursor for _get_line_start_pos
self._control.setTextCursor(cur)
# move to first non-ws char on line
cur.setPosition(self._get_line_start_pos())
if dedent:
spaces = min(step, self._get_leading_spaces())
safe_step = spaces % self._tab_width
cur.movePosition(QtGui.QTextCursor.Right,
QtGui.QTextCursor.KeepAnchor,
min(spaces, safe_step if safe_step != 0
else self._tab_width))
cur.removeSelectedText()
else:
cur.insertText(' '*step)
cur.movePosition(QtGui.QTextCursor.Down)
# restore cursor
self._control.setTextCursor(save_cur)
def _insert_continuation_prompt(self, cursor, indent=''):
""" Inserts new continuation prompt using the specified cursor.
"""
if self._continuation_prompt_html is None:
self._insert_plain_text(cursor, self._continuation_prompt)
else:
self._continuation_prompt = self._insert_html_fetching_plain_text(
cursor, self._continuation_prompt_html)
if indent:
cursor.insertText(indent)
def _insert_block(self, cursor, block_format=None):
""" Inserts an empty QTextBlock using the specified cursor.
"""
if block_format is None:
block_format = QtGui.QTextBlockFormat()
cursor.insertBlock(block_format)
def _insert_html(self, cursor, html):
""" Inserts HTML using the specified cursor in such a way that future
formatting is unaffected.
"""
cursor.beginEditBlock()
cursor.insertHtml(html)
# After inserting HTML, the text document "remembers" it's in "html
# mode", which means that subsequent calls adding plain text will result
# in unwanted formatting, lost tab characters, etc. The following code
# hacks around this behavior, which I consider to be a bug in Qt, by
# (crudely) resetting the document's style state.
cursor.movePosition(QtGui.QTextCursor.Left,
QtGui.QTextCursor.KeepAnchor)
if cursor.selection().toPlainText() == ' ':
cursor.removeSelectedText()
else:
cursor.movePosition(QtGui.QTextCursor.Right)
cursor.insertText(' ', QtGui.QTextCharFormat())
cursor.endEditBlock()
def _insert_html_fetching_plain_text(self, cursor, html):
""" Inserts HTML using the specified cursor, then returns its plain text
version.
"""
cursor.beginEditBlock()
cursor.removeSelectedText()
start = cursor.position()
self._insert_html(cursor, html)
end = cursor.position()
cursor.setPosition(start, QtGui.QTextCursor.KeepAnchor)
text = cursor.selection().toPlainText()
cursor.setPosition(end)
cursor.endEditBlock()
return text
def _viewport_at_end(self):
"""Check if the viewport is at the end of the document."""
viewport = self._control.viewport()
end_scroll_pos = self._control.cursorForPosition(
QtCore.QPoint(viewport.width() - 1, viewport.height() - 1)
).position()
end_doc_pos = self._get_end_pos()
return end_doc_pos - end_scroll_pos <= 1
def _scroll_to_end(self):
"""Scroll to the end of the document."""
end_scroll = (self._control.verticalScrollBar().maximum()
- self._control.verticalScrollBar().pageStep())
# Only scroll down
if end_scroll > self._control.verticalScrollBar().value():
self._control.verticalScrollBar().setValue(end_scroll)
def _insert_plain_text(self, cursor, text, flush=False):
""" Inserts plain text using the specified cursor, processing ANSI codes
if enabled.
"""
should_autoscroll = self._viewport_at_end()
# maximumBlockCount() can be different from self.buffer_size in
# case input prompt is active.
buffer_size = self._control.document().maximumBlockCount()
if (self._executing and not flush and
self._pending_text_flush_interval.isActive() and
cursor.position() == self._insert_text_cursor.position()):
# Queue the text to insert in case it is being inserted at end
self._pending_insert_text.append(text)
if buffer_size > 0:
self._pending_insert_text = self._get_last_lines_from_list(
self._pending_insert_text, buffer_size)
return
if self._executing and not self._pending_text_flush_interval.isActive():
self._pending_text_flush_interval.start()
# Clip the text to last `buffer_size` lines.
if buffer_size > 0:
text = self._get_last_lines(text, buffer_size)
cursor.beginEditBlock()
if self.ansi_codes:
for substring in self._ansi_processor.split_string(text):
for act in self._ansi_processor.actions:
# Unlike real terminal emulators, we don't distinguish
# between the screen and the scrollback buffer. A screen
# erase request clears everything.
if act.action == 'erase':
remove = False
fill = False
if act.area == 'screen':
cursor.select(QtGui.QTextCursor.Document)
remove = True
if act.area == 'line':
if act.erase_to == 'all':
cursor.select(QtGui.QTextCursor.LineUnderCursor)
remove = True
elif act.erase_to == 'start':
cursor.movePosition(
QtGui.QTextCursor.StartOfLine,
QtGui.QTextCursor.KeepAnchor)
remove = True
fill = True
elif act.erase_to == 'end':
cursor.movePosition(
QtGui.QTextCursor.EndOfLine,
QtGui.QTextCursor.KeepAnchor)
remove = True
if remove:
nspace=cursor.selectionEnd()-cursor.selectionStart() if fill else 0
cursor.removeSelectedText()
if nspace>0: cursor.insertText(' '*nspace) # replace text by space, to keep cursor position as specified
# Simulate a form feed by scrolling just past the last line.
elif act.action == 'scroll' and act.unit == 'page':
cursor.insertText('\n')
cursor.endEditBlock()
self._set_top_cursor(cursor)
cursor.joinPreviousEditBlock()
cursor.deletePreviousChar()
if os.name == 'nt':
cursor.select(QtGui.QTextCursor.Document)
cursor.removeSelectedText()
elif act.action == 'move' and act.unit == 'line':
if act.dir == 'up':
for i in range(act.count):
cursor.movePosition(
QtGui.QTextCursor.Up
)
elif act.dir == 'down':
for i in range(act.count):
cursor.movePosition(
QtGui.QTextCursor.Down
)
elif act.dir == 'leftup':
for i in range(act.count):
cursor.movePosition(
QtGui.QTextCursor.Up
)
cursor.movePosition(
QtGui.QTextCursor.StartOfLine,
QtGui.QTextCursor.MoveAnchor
)
elif act.action == 'carriage-return':
cursor.movePosition(
QtGui.QTextCursor.StartOfLine,
QtGui.QTextCursor.MoveAnchor)
elif act.action == 'beep':
QtWidgets.QApplication.instance().beep()
elif act.action == 'backspace':
if not cursor.atBlockStart():
cursor.movePosition(
QtGui.QTextCursor.PreviousCharacter,
QtGui.QTextCursor.MoveAnchor)
elif act.action == 'newline':
if (
cursor.block() != cursor.document().lastBlock()
and not cursor.document()
.toPlainText()
.endswith(self._prompt)
):
cursor.movePosition(QtGui.QTextCursor.NextBlock)
else:
cursor.movePosition(
QtGui.QTextCursor.EndOfLine,
QtGui.QTextCursor.MoveAnchor,
)
cursor.insertText("\n")
# simulate replacement mode
if substring is not None:
format = self._ansi_processor.get_format()
# Note that using _insert_mode means the \r ANSI sequence will not swallow characters.
if not (hasattr(cursor, '_insert_mode') and cursor._insert_mode):
pos = cursor.position()
cursor2 = QtGui.QTextCursor(cursor) # self._get_line_end_pos() is the previous line, don't use it
cursor2.movePosition(QtGui.QTextCursor.EndOfLine)
remain = cursor2.position() - pos # number of characters until end of line
n=len(substring)
swallow = min(n, remain) # number of character to swallow
cursor.setPosition(pos + swallow, QtGui.QTextCursor.KeepAnchor)
cursor.insertText(substring, format)
else:
cursor.insertText(text)
cursor.endEditBlock()
if should_autoscroll:
self._scroll_to_end()
def _insert_plain_text_into_buffer(self, cursor, text):
""" Inserts text into the input buffer using the specified cursor (which
must be in the input buffer), ensuring that continuation prompts are
inserted as necessary.
"""
lines = text.splitlines(True)
if lines:
if lines[-1].endswith('\n'):
# If the text ends with a newline, add a blank line so a new
# continuation prompt is produced.
lines.append('')
cursor.beginEditBlock()
cursor.insertText(lines[0])
for line in lines[1:]:
if self._continuation_prompt_html is None:
cursor.insertText(self._continuation_prompt)
else:
self._continuation_prompt = \
self._insert_html_fetching_plain_text(
cursor, self._continuation_prompt_html)
cursor.insertText(line)
cursor.endEditBlock()
def _in_buffer(self, position):
"""
Returns whether the specified position is inside the editing region.
"""
return position == self._move_position_in_buffer(position)
def _move_position_in_buffer(self, position):
"""
Return the next position in buffer.
"""
cursor = self._control.textCursor()
cursor.setPosition(position)
line = cursor.blockNumber()
prompt_line = self._get_prompt_cursor().blockNumber()
if line == prompt_line:
if position >= self._prompt_pos:
return position
return self._prompt_pos
if line > prompt_line:
cursor.movePosition(QtGui.QTextCursor.StartOfBlock)
prompt_pos = cursor.position() + len(self._continuation_prompt)
if position >= prompt_pos:
return position
return prompt_pos
return self._prompt_pos
def _keep_cursor_in_buffer(self):
""" Ensures that the cursor is inside the editing region. Returns
whether the cursor was moved.
"""
cursor = self._control.textCursor()
endpos = cursor.selectionEnd()
if endpos < self._prompt_pos:
cursor.setPosition(endpos)
line = cursor.blockNumber()
prompt_line = self._get_prompt_cursor().blockNumber()
if line == prompt_line:
# Cursor is on prompt line, move to start of buffer
cursor.setPosition(self._prompt_pos)
else:
# Cursor is not in buffer, move to the end
cursor.movePosition(QtGui.QTextCursor.End)
self._control.setTextCursor(cursor)
return True
startpos = cursor.selectionStart()
new_endpos = self._move_position_in_buffer(endpos)
new_startpos = self._move_position_in_buffer(startpos)
if new_endpos == endpos and new_startpos == startpos:
return False
cursor.setPosition(new_startpos)
cursor.setPosition(new_endpos, QtGui.QTextCursor.KeepAnchor)
self._control.setTextCursor(cursor)
return True
def _keyboard_quit(self):
""" Cancels the current editing task ala Ctrl-G in Emacs.
"""
if self._temp_buffer_filled :
self._cancel_completion()
self._clear_temporary_buffer()
else:
self.input_buffer = ''
def _page(self, text, html=False):
""" Displays text using the pager if it exceeds the height of the
viewport.
Parameters
----------
html : bool, optional (default False)
If set, the text will be interpreted as HTML instead of plain text.
"""
line_height = QtGui.QFontMetrics(self.font).height()
minlines = self._control.viewport().height() / line_height
if self.paging != 'none' and \
re.match("(?:[^\n]*\n){%i}" % minlines, text):
if self.paging == 'custom':
self.custom_page_requested.emit(text)
else:
# disable buffer truncation during paging
self._control.document().setMaximumBlockCount(0)
self._page_control.clear()
cursor = self._page_control.textCursor()
if html:
self._insert_html(cursor, text)
else:
self._insert_plain_text(cursor, text)
self._page_control.moveCursor(QtGui.QTextCursor.Start)
self._page_control.viewport().resize(self._control.size())
if self._splitter:
self._page_control.show()
self._page_control.setFocus()
else:
self.layout().setCurrentWidget(self._page_control)
elif html:
self._append_html(text)
else:
self._append_plain_text(text)
def _set_paging(self, paging):
"""
Change the pager to `paging` style.
Parameters
----------
paging : string
Either "hsplit", "vsplit", or "inside"
"""
if self._splitter is None:
raise NotImplementedError("""can only switch if --paging=hsplit or
--paging=vsplit is used.""")
if paging == 'hsplit':
self._splitter.setOrientation(QtCore.Qt.Horizontal)
elif paging == 'vsplit':
self._splitter.setOrientation(QtCore.Qt.Vertical)
elif paging == 'inside':
raise NotImplementedError("""switching to 'inside' paging not
supported yet.""")
else:
raise ValueError("unknown paging method '%s'" % paging)
self.paging = paging
def _prompt_finished(self):
""" Called immediately after a prompt is finished, i.e. when some input
will be processed and a new prompt displayed.
"""
self._control.setReadOnly(True)
self._prompt_finished_hook()
def _prompt_started(self):
""" Called immediately after a new prompt is displayed.
"""
# Temporarily disable the maximum block count to permit undo/redo and
# to ensure that the prompt position does not change due to truncation.
self._control.document().setMaximumBlockCount(0)
self._control.setUndoRedoEnabled(True)
# Work around bug in QPlainTextEdit: input method is not re-enabled
# when read-only is disabled.
self._control.setReadOnly(False)
self._control.setAttribute(QtCore.Qt.WA_InputMethodEnabled, True)
if not self._reading:
self._executing = False
self._prompt_started_hook()
# If the input buffer has changed while executing, load it.
if self._input_buffer_pending:
self.input_buffer = self._input_buffer_pending
self._input_buffer_pending = ''
self._control.moveCursor(QtGui.QTextCursor.End)
def _readline(self, prompt='', callback=None, password=False):
""" Reads one line of input from the user.
Parameters
----------
prompt : str, optional
The prompt to print before reading the line.
callback : callable, optional
A callback to execute with the read line. If not specified, input is
read *synchronously* and this method does not return until it has
been read.
Returns
-------
If a callback is specified, returns nothing. Otherwise, returns the
input string with the trailing newline stripped.
"""
if self._reading:
raise RuntimeError('Cannot read a line. Widget is already reading.')
if not callback and not self.isVisible():
# If the user cannot see the widget, this function cannot return.
raise RuntimeError('Cannot synchronously read a line if the widget '
'is not visible!')
self._reading = True
if password:
self._show_prompt('Warning: QtConsole does not support password mode, '
'the text you type will be visible.', newline=True)
if 'ipdb' not in prompt.lower():
# This is a prompt that asks for input from the user.
self._show_prompt(prompt, newline=False, separator=False)
else:
self._show_prompt(prompt, newline=False)
if callback is None:
self._reading_callback = None
while self._reading:
QtCore.QCoreApplication.processEvents()
return self._get_input_buffer(force=True).rstrip('\n')
else:
self._reading_callback = lambda: \
callback(self._get_input_buffer(force=True).rstrip('\n'))
def _set_continuation_prompt(self, prompt, html=False):
""" Sets the continuation prompt.
Parameters
----------
prompt : str
The prompt to show when more input is needed.
html : bool, optional (default False)
If set, the prompt will be inserted as formatted HTML. Otherwise,
the prompt will be treated as plain text, though ANSI color codes
will be handled.
"""
if html:
self._continuation_prompt_html = prompt
else:
self._continuation_prompt = prompt
self._continuation_prompt_html = None
def _set_cursor(self, cursor):
""" Convenience method to set the current cursor.
"""
self._control.setTextCursor(cursor)
def _set_top_cursor(self, cursor):
""" Scrolls the viewport so that the specified cursor is at the top.
"""
scrollbar = self._control.verticalScrollBar()
scrollbar.setValue(scrollbar.maximum())
original_cursor = self._control.textCursor()
self._control.setTextCursor(cursor)
self._control.ensureCursorVisible()
self._control.setTextCursor(original_cursor)
def _show_prompt(self, prompt=None, html=False, newline=True,
separator=True):
""" Writes a new prompt at the end of the buffer.
Parameters
----------
prompt : str, optional
The prompt to show. If not specified, the previous prompt is used.
html : bool, optional (default False)
Only relevant when a prompt is specified. If set, the prompt will
be inserted as formatted HTML. Otherwise, the prompt will be treated
as plain text, though ANSI color codes will be handled.
newline : bool, optional (default True)
If set, a new line will be written before showing the prompt if
there is not already a newline at the end of the buffer.
separator : bool, optional (default True)
If set, a separator will be written before the prompt.
"""
self._flush_pending_stream()
# This is necessary to solve out-of-order insertion of mixed stdin and
# stdout stream texts.
# Fixes spyder-ide/spyder#17710
if sys.platform == 'darwin':
# Although this makes our tests hang on Mac, users confirmed that
# it's needed on that platform too.
# Fixes spyder-ide/spyder#19888
if not os.environ.get('QTCONSOLE_TESTING'):
QtCore.QCoreApplication.processEvents()
else:
QtCore.QCoreApplication.processEvents()
cursor = self._get_end_cursor()
# Save the current position to support _append*(before_prompt=True).
# We can't leave the cursor at the end of the document though, because
# that would cause any further additions to move the cursor. Therefore,
# we move it back one place and move it forward again at the end of
# this method. However, we only do this if the cursor isn't already
# at the start of the text.
if cursor.position() == 0:
move_forward = False
else:
move_forward = True
self._append_before_prompt_cursor.setPosition(cursor.position() - 1)
# Insert a preliminary newline, if necessary.
if newline and cursor.position() > 0:
cursor.movePosition(QtGui.QTextCursor.Left,
QtGui.QTextCursor.KeepAnchor)
if cursor.selection().toPlainText() != '\n':
self._append_block()
# Write the prompt.
if separator:
self._append_plain_text(self._prompt_sep)
if prompt is None:
if self._prompt_html is None:
self._append_plain_text(self._prompt)
else:
self._append_html(self._prompt_html)
else:
if html:
self._prompt = self._append_html_fetching_plain_text(prompt)
self._prompt_html = prompt
else:
self._append_plain_text(prompt)
self._prompt = prompt
self._prompt_html = None
self._flush_pending_stream()
self._prompt_cursor.setPosition(self._get_end_pos() - 1)
if move_forward:
self._append_before_prompt_cursor.setPosition(
self._append_before_prompt_cursor.position() + 1)
else:
# cursor position was 0, set before prompt cursor
self._append_before_prompt_cursor.setPosition(0)
self._prompt_started()
#------ Signal handlers ----------------------------------------------------
def _adjust_scrollbars(self):
""" Expands the vertical scrollbar beyond the range set by Qt.
"""
# This code is adapted from _q_adjustScrollbars in qplaintextedit.cpp
# and qtextedit.cpp.
document = self._control.document()
scrollbar = self._control.verticalScrollBar()
viewport_height = self._control.viewport().height()
if isinstance(self._control, QtWidgets.QPlainTextEdit):
maximum = max(0, document.lineCount() - 1)
step = viewport_height / self._control.fontMetrics().lineSpacing()
else:
# QTextEdit does not do line-based layout and blocks will not in
# general have the same height. Therefore it does not make sense to
# attempt to scroll in line height increments.
maximum = document.size().height()
step = viewport_height
diff = maximum - scrollbar.maximum()
scrollbar.setRange(0, round(maximum))
scrollbar.setPageStep(round(step))
# Compensate for undesirable scrolling that occurs automatically due to
# maximumBlockCount() text truncation.
if diff < 0 and document.blockCount() == document.maximumBlockCount():
scrollbar.setValue(round(scrollbar.value() + diff))
def _custom_context_menu_requested(self, pos):
""" Shows a context menu at the given QPoint (in widget coordinates).
"""
menu = self._context_menu_make(pos)
menu.exec_(self._control.mapToGlobal(pos))
| ConsoleWidget |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/concepts/partitions_schedules_sensors/sensors/asset_sensors.py | {
"start": 116,
"end": 1368
} | class ____(Config):
asset_key: list[str]
# start_asset_sensor_marker
import dagster as dg
@dg.asset_sensor(asset_key=dg.AssetKey("my_table"), job=my_job)
def my_asset_sensor(context: dg.SensorEvaluationContext, asset_event: dg.EventLogEntry):
assert asset_event.dagster_event and asset_event.dagster_event.asset_key
yield RunRequest(
run_key=context.cursor,
run_config=dg.RunConfig(
ops={
"read_materialization": ReadMaterializationConfig(
asset_key=list(asset_event.dagster_event.asset_key.path)
)
}
),
)
# end_asset_sensor_marker
# start_asset_sensor_test_marker
from dagster import DagsterInstance, build_sensor_context, materialize
def test_my_asset_sensor():
@asset
def my_table():
return 1
instance = DagsterInstance.ephemeral()
ctx = build_sensor_context(instance)
result = list(my_asset_sensor(ctx))
assert len(result) == 1
assert isinstance(result[0], SkipReason)
materialize([my_table], instance=instance)
result = list(my_asset_sensor(ctx))
assert len(result) == 1
assert isinstance(result[0], RunRequest)
# end_asset_sensor_test_marker
| ReadMaterializationConfig |
python | pallets__jinja | src/jinja2/nodes.py | {
"start": 12363,
"end": 12626
} | class ____(Stmt):
"""A node that represents a block.
.. versionchanged:: 3.0.0
the `required` field was added.
"""
fields = ("name", "body", "scoped", "required")
name: str
body: list[Node]
scoped: bool
required: bool
| Block |
python | tensorflow__tensorflow | tensorflow/compiler/tests/xla_ops_test.py | {
"start": 29248,
"end": 46540
} | class ____(xla_test.XLATestCase, parameterized.TestCase):
def testDotShapeInference(self):
a = array_ops.placeholder(np.float32, shape=(1, 2, 3, 4))
b = array_ops.placeholder(np.float32, shape=(4, 5, 2, 6))
dim_nums = xla_data_pb2.DotDimensionNumbers()
dim_nums.lhs_contracting_dimensions.append(1)
dim_nums.rhs_contracting_dimensions.append(2)
dim_nums.lhs_batch_dimensions.append(3)
dim_nums.rhs_batch_dimensions.append(0)
c = xla.dot_general(a, b, dim_nums)
self.assertEqual(c.shape, tensor_shape.TensorShape([4, 1, 3, 5, 6]))
def testDotDifferentNumberOfContractingDimensions(self):
a = array_ops.placeholder(np.float32, shape=(4, 4, 4, 4))
b = array_ops.placeholder(np.float32, shape=(4, 4, 4, 4))
dim_nums = xla_data_pb2.DotDimensionNumbers()
dim_nums.lhs_contracting_dimensions.append(2)
dim_nums.rhs_contracting_dimensions.append(2)
dim_nums.rhs_contracting_dimensions.append(3)
with self.assertRaisesRegex(
ValueError,
'Must specify the same number of contracting '
'dimensions for lhs and rhs. Got: 1 and 2',
):
xla.dot_general(a, b, dim_nums)
def testDotDifferentContractingDimensionsSizes(self):
a = array_ops.placeholder(np.float32, shape=(2, 2, 2, 2))
b = array_ops.placeholder(np.float32, shape=(4, 4, 4, 4))
dim_nums = xla_data_pb2.DotDimensionNumbers()
dim_nums.lhs_contracting_dimensions.append(2)
dim_nums.rhs_contracting_dimensions.append(3)
with self.assertRaisesRegex(
ValueError, 'Dimensions must be equal, but are 2 and 4'
):
xla.dot_general(a, b, dim_nums)
def testDotDifferentNumberOfBatchDimensions(self):
a = array_ops.placeholder(np.float32, shape=(4, 4, 4, 4))
b = array_ops.placeholder(np.float32, shape=(4, 4, 4, 4))
dim_nums = xla_data_pb2.DotDimensionNumbers()
dim_nums.lhs_batch_dimensions.append(2)
dim_nums.rhs_batch_dimensions.append(2)
dim_nums.rhs_batch_dimensions.append(3)
with self.assertRaisesRegex(
ValueError,
'Must specify the same number of batch '
'dimensions for lhs and rhs. Got: 1 and 2',
):
xla.dot_general(a, b, dim_nums)
def testDotDifferentBatchDimensionsSizes(self):
a = array_ops.placeholder(np.float32, shape=(2, 2, 2, 2))
b = array_ops.placeholder(np.float32, shape=(4, 4, 4, 2))
dim_nums = xla_data_pb2.DotDimensionNumbers()
dim_nums.lhs_contracting_dimensions.append(2)
dim_nums.rhs_contracting_dimensions.append(3)
dim_nums.lhs_batch_dimensions.append(0)
dim_nums.rhs_batch_dimensions.append(0)
with self.assertRaisesRegex(
ValueError, 'Dimensions must be equal, but are 2 and 4'
):
xla.dot_general(a, b, dim_nums)
def testDotUnknownNonContractingDimension(self):
a = array_ops.placeholder(np.float32, shape=(None, 16))
b = array_ops.placeholder(np.float32, shape=(16, 2))
dim_nums = xla_data_pb2.DotDimensionNumbers()
dim_nums.lhs_contracting_dimensions.append(1)
dim_nums.rhs_contracting_dimensions.append(0)
c = xla.dot_general(a, b, dim_nums)
self.assertEqual(c.shape.as_list(), [None, 2])
def testDotUnknownContractingDimension(self):
a = array_ops.placeholder(np.float32, shape=(3, None))
b = array_ops.placeholder(np.float32, shape=(None, 2))
dim_nums = xla_data_pb2.DotDimensionNumbers()
dim_nums.lhs_contracting_dimensions.append(1)
dim_nums.rhs_contracting_dimensions.append(0)
c = xla.dot_general(a, b, dim_nums)
self.assertEqual(c.shape.as_list(), [3, 2])
def testDotUnknownAndKnownContractingDimension(self):
a = array_ops.placeholder(np.float32, shape=(3, 4))
b = array_ops.placeholder(np.float32, shape=(None, 2))
dim_nums = xla_data_pb2.DotDimensionNumbers()
dim_nums.lhs_contracting_dimensions.append(1)
dim_nums.rhs_contracting_dimensions.append(0)
c = xla.dot_general(a, b, dim_nums)
self.assertEqual(c.shape.as_list(), [3, 2])
def testDotUnknownBatchDimension(self):
a = array_ops.placeholder(np.float32, shape=(None, 3, 4))
b = array_ops.placeholder(np.float32, shape=(None, 4))
dim_nums = xla_data_pb2.DotDimensionNumbers()
dim_nums.lhs_contracting_dimensions.append(2)
dim_nums.rhs_contracting_dimensions.append(1)
dim_nums.lhs_batch_dimensions.append(0)
dim_nums.rhs_batch_dimensions.append(0)
c = xla.dot_general(a, b, dim_nums)
self.assertEqual(c.shape.as_list(), [None, 3])
def testDotUnknownAndKnownBatchDimension(self):
a = array_ops.placeholder(np.float32, shape=(2, 3, 4))
b = array_ops.placeholder(np.float32, shape=(None, 4))
dim_nums = xla_data_pb2.DotDimensionNumbers()
dim_nums.lhs_contracting_dimensions.append(2)
dim_nums.rhs_contracting_dimensions.append(1)
dim_nums.lhs_batch_dimensions.append(0)
dim_nums.rhs_batch_dimensions.append(0)
c = xla.dot_general(a, b, dim_nums)
self.assertEqual(c.shape.as_list(), [2, 3])
def testDynamicSlice(self):
start = array_ops.placeholder(np.int32, shape=(2, 3, 4))
# If slice_sizes are known, the operand shape does not matter.
# The shape of the output is equal to slice_sizes.
slice_sizes = np.array([1, 2, 4], dtype=np.int32)
for a_shape in [(2, 3, 4), (None, 3, 4), None]:
a = array_ops.placeholder(np.float32, shape=a_shape)
res = xla.dynamic_slice(a, start, slice_sizes)
self.assertEqual(res.shape.as_list(), [1, 2, 4])
# The first two dimension slice sizes are known
slice_sizes = array_ops_stack.stack(
[1, 2, array_ops.placeholder(np.int32, [])]
)
for a_shape in [(2, 3, 4), (None, 3, 4), None]:
a = array_ops.placeholder(np.float32, shape=a_shape)
res = xla.dynamic_slice(a, start, slice_sizes)
self.assertEqual(res.shape.as_list(), [1, 2, None])
# If slice_sizes has known rank and dimension, but is not a constant
# then output has the same rank, but with unknown dimensions.
slice_sizes = array_ops.placeholder(np.int32, [3])
for a_shape in [(2, 3, 4), (None, 3, 4), None]:
a = array_ops.placeholder(np.float32, shape=a_shape)
res = xla.dynamic_slice(a, start, slice_sizes)
self.assertEqual(res.shape.as_list(), [None, None, None])
# slice sizes has known rank, but unknown dimensions.
# then the output has the same rank as the operand, but with unknown
# dimensions.
slice_sizes = array_ops.placeholder(np.int32, [None])
for a_shape in [(2, 3, 4), (None, 3, 4)]:
a = array_ops.placeholder(np.float32, shape=a_shape)
res = xla.dynamic_slice(a, start, slice_sizes)
self.assertEqual(res.shape.as_list(), [None, None, None])
a = array_ops.placeholder(np.float32, shape=None)
slice_sizes = array_ops.placeholder(np.int32, [None])
res = xla.dynamic_slice(a, start, slice_sizes)
self.assertIsNone(res.shape.rank)
def testDynamicUpdateSlice(self):
a = array_ops.placeholder(np.float32, shape=(2, 3, 4))
upd = array_ops.placeholder(np.float32, shape=(1, 2, 3))
start_indices = array_ops.placeholder(np.int32, shape=(3,))
res = xla.dynamic_update_slice(a, upd, start_indices)
self.assertEqual(res.shape.as_list(), [2, 3, 4])
a = array_ops.placeholder(np.float32, shape=(None, 3, None))
res = xla.dynamic_update_slice(a, upd, start_indices)
self.assertEqual(res.shape.as_list(), [None, 3, None])
def testPadShapeInference(self):
a = array_ops.placeholder(np.float32, shape=(2, 3))
c = xla.pad(
a,
padding_value=7,
padding_low=[2, 1],
padding_high=[1, 2],
padding_interior=[1, 4],
)
self.assertEqual(c.shape, tensor_shape.TensorShape([6, 14]))
c = xla.pad(
a,
padding_value=7,
padding_low=[2, -2],
padding_high=[1, -2],
padding_interior=[1, 2],
)
self.assertEqual(c.shape, tensor_shape.TensorShape([6, 3]))
c = xla.pad(
array_ops.placeholder(np.float32, shape=(None, 2)),
padding_value=7,
padding_low=[0, 1],
padding_high=[0, 2],
padding_interior=[0, 4],
)
self.assertEqual(c.shape.as_list(), [None, 9])
# 0-sized input dimension and interior padding
c = xla.pad(
array_ops.placeholder(np.float32, shape=(2, 0)),
padding_value=7,
padding_low=[2, 1],
padding_high=[1, 1],
padding_interior=[1, 2],
)
self.assertEqual(c.shape, tensor_shape.TensorShape([6, 2]))
with self.assertRaisesRegex(
ValueError, 'padding_value input must be scalar, found rank 1 '
):
xla.pad(
a,
padding_value=[0, 1],
padding_low=[0, 0],
padding_high=[0, 0],
padding_interior=[0, 0],
)
with self.assertRaisesRegex(
ValueError, 'padding_low must be a 1D tensor of size 2 '
):
xla.pad(
a,
padding_value=7,
padding_low=[0, 0, 0],
padding_high=[0, 0],
padding_interior=[0, 0],
)
with self.assertRaisesRegex(
ValueError, 'padding_high must be a 1D tensor of size 2 '
):
xla.pad(
a,
padding_value=7,
padding_low=[0, 0],
padding_high=[0, 0, 0],
padding_interior=[0, 0],
)
with self.assertRaisesRegex(
ValueError, 'padding_interior must be a 1D tensor of size 2 '
):
xla.pad(
a,
padding_value=7,
padding_low=[0, 0],
padding_high=[0, 0],
padding_interior=[0],
)
with self.assertRaisesRegex(
ValueError,
'padding_interior must contain only non-negative values, found -2 ',
):
xla.pad(
a,
padding_value=7,
padding_low=[0, 0],
padding_high=[0, 0],
padding_interior=[-2, 0],
)
with self.assertRaisesRegex(
ValueError, 'resulting padded dimension has negative size -1 '
):
xla.pad(
a,
padding_value=7,
padding_low=[-3, 0],
padding_high=[0, 0],
padding_interior=[0, 0],
)
def testVariadicReduceV2SingleArg(self):
@def_function.function
def reducer_add(op_element, acc_val):
return (op_element + acc_val,)
dtype = np.float32
arg_spec = array_ops.zeros([], dtype) # pylint: disable=cell-var-from-loop
reducer_func = reducer_add.get_concrete_function(arg_spec, arg_spec)
res = xla.variadic_reduce(
(array_ops.placeholder(np.float32, shape=(3, 4, 5)),),
(array_ops.placeholder(np.float32, shape=()),),
dimensions_to_reduce=(1,),
reducer=reducer_func,
)
self.assertLen(res, 1)
self.assertEqual(res[0].shape, tensor_shape.TensorShape([3, 5]))
def testVariadicReduceV2MultipleArgs(self):
@def_function.function
def reducer_adds(
op_element_1,
op_element_2,
op_element_3,
acc_val_1,
acc_val_2,
acc_val_3,
):
return (
op_element_1 + acc_val_1,
op_element_2 + acc_val_2,
op_element_3 + acc_val_3,
)
dtype = np.float32
arg1_spec = array_ops.zeros([], dtype) # pylint: disable=cell-var-from-loop
arg2_spec = array_ops.zeros([], np.int32)
arg3_spec = array_ops.zeros([], np.int32)
reducer_func = reducer_adds.get_concrete_function(
arg1_spec, arg2_spec, arg3_spec, arg1_spec, arg2_spec, arg3_spec
)
def reduce_with_shapes(shape1, shape2, shape3, dimensions_to_reduce=(1,)):
inputs = (
array_ops.placeholder(np.float32, shape=shape1),
array_ops.placeholder(np.int32, shape=shape2),
array_ops.placeholder(np.int32, shape=shape3),
)
init_values = (
array_ops.placeholder(np.float32, shape=()),
array_ops.placeholder(np.int32, shape=()),
array_ops.placeholder(np.int32, shape=()),
)
return xla.variadic_reduce(
inputs,
init_values,
dimensions_to_reduce=dimensions_to_reduce,
reducer=reducer_func,
)
def assert_output_shapes(output, expected_shape):
self.assertLen(output, 3)
self.assertEqual(output[0].shape.as_list(), list(expected_shape))
self.assertEqual(output[1].shape.as_list(), list(expected_shape))
self.assertEqual(output[2].shape.as_list(), list(expected_shape))
output = reduce_with_shapes((3, 4, 5), (3, 4, 5), (3, 4, 5))
assert_output_shapes(output, (3, 5))
output = reduce_with_shapes(
(3, 4, 5), (3, 4, 5), (3, 4, 5), dimensions_to_reduce=()
)
assert_output_shapes(output, (3, 4, 5))
output = reduce_with_shapes(None, (3, None, 5), (None, 4, 5))
assert_output_shapes(output, (3, 5))
output = reduce_with_shapes(None, (3, None, 5), None)
assert_output_shapes(output, (3, 5))
output = reduce_with_shapes(None, (None, None, 5), None)
assert_output_shapes(output, (None, 5))
output = reduce_with_shapes(None, None, None)
self.assertLen(output, 3)
self.assertIsNone(output[0].shape.rank)
self.assertIsNone(output[1].shape.rank)
self.assertIsNone(output[2].shape.rank)
with self.assertRaisesRegex(
ValueError, 'All inputs must have the same shape'
):
reduce_with_shapes((3, 4, 5), (13, 4, 5), (3, 4, 5))
with self.assertRaisesRegex(
ValueError, 'All inputs must have the same shape'
):
reduce_with_shapes((None, 4, 5), (3, None, 5), (13, 4, 5))
with self.assertRaisesRegex(
ValueError, 'All inputs must have the same shape'
):
reduce_with_shapes((None, 4, 5), (3, None, 5), (13, 4, 5))
@parameterized.product(
algorithm=[random_ops_util.Algorithm.THREEFRY,
random_ops_util.Algorithm.PHILOX,
random_ops_util.Algorithm.AUTO_SELECT],
dtype=[np.uint8, np.uint64],
)
def testRngBitGenerator(self, algorithm, dtype):
initial_state = array_ops.placeholder(np.uint64, shape=(2,))
shape = (2, 3)
res = xla.rng_bit_generator(algorithm, initial_state, shape, dtype=dtype)
self.assertEqual(res[0].shape, initial_state.shape)
self.assertEqual(res[1].shape, shape)
# The initial_state has unknown dimension size
initial_state = array_ops.placeholder(np.uint64, shape=(None,))
shape = (2, 3)
res = xla.rng_bit_generator(algorithm, initial_state, shape, dtype=dtype)
self.assertEqual(res[0].shape.as_list(), initial_state.shape.as_list())
self.assertEqual(res[1].shape, shape)
# The initial_state has unknown rank
initial_state = array_ops.placeholder(np.uint64, shape=None)
shape = (2, 3)
res = xla.rng_bit_generator(algorithm, initial_state, shape, dtype=dtype)
self.assertEqual(res[0].shape.as_list(), [None])
self.assertEqual(res[1].shape, shape)
# The output shape has unknown dimension
initial_state = array_ops.placeholder(np.uint64, shape=(None,))
shape = (None, 3)
with self.assertRaisesRegex(
TypeError, 'Failed to convert elements .* to Tensor'
):
_ = xla.rng_bit_generator(algorithm, initial_state, shape, dtype=dtype)
def testGatherShapeInference(self):
operand = np.arange(10, dtype=np.int32).reshape([2, 5])
start_indices = np.array([2], np.int32)
slice_sizes = np.array([1, 3], np.int32)
dimension_numbers = xla_data_pb2.GatherDimensionNumbers(
offset_dims=[1],
collapsed_slice_dims=[0],
start_index_map=[0],
index_vector_dim=1,
)
res = xla.gather(operand, start_indices, dimension_numbers, slice_sizes)
self.assertEqual(res.shape, tensor_shape.TensorShape([1, 3]))
def testGatherShapeInferenceDynamicSlice(self):
operand = np.arange(12, dtype=np.int32).reshape([3, 2, 2])
start_indices = array_ops.placeholder(np.int32, shape=(3, None, 2))
slice_sizes = np.array([1, 2, 2], np.int32)
dimension_numbers = xla_data_pb2.GatherDimensionNumbers(
offset_dims=[2, 3],
collapsed_slice_dims=[0],
start_index_map=[0, 1],
index_vector_dim=2,
)
res = xla.gather(operand, start_indices, dimension_numbers, slice_sizes)
self.assertEqual(res.shape, tensor_shape.TensorShape([3, None, 2, 2]))
def testGatherShapeInferenceDynamicInput(self):
operand = array_ops.placeholder(np.int32, shape=(None, 5))
start_indices = np.array([2], np.int32)
slice_sizes = np.array([1, 3], np.int32)
dimension_numbers = xla_data_pb2.GatherDimensionNumbers()
res = xla.gather(operand, start_indices, dimension_numbers, slice_sizes)
self.assertEqual(res.shape, tensor_shape.unknown_shape())
def testGatherShapeInferenceUnknownSliceSizes(self):
operand = np.arange(10, dtype=np.int32).reshape([2, 5])
start_indices = np.array([2], np.int32)
slice_sizes = array_ops.placeholder(np.int32, shape=(2,))
dimension_numbers = xla_data_pb2.GatherDimensionNumbers()
res = xla.gather(operand, start_indices, dimension_numbers, slice_sizes)
self.assertEqual(res.shape, tensor_shape.unknown_shape())
if __name__ == '__main__':
# This test is using Tensorflow sessions which are not compatible with eager
# mode.
ops.disable_eager_execution()
googletest.main()
| XlaOpsShapeInferenceTest |
python | sphinx-doc__sphinx | sphinx/builders/latex/nodes.py | {
"start": 107,
"end": 235
} | class ____(nodes.container):
"""A node for a container of literal_block having a caption."""
pass
| captioned_literal_block |
python | jina-ai__jina | tests/integration/docarray_v2/test_issues.py | {
"start": 664,
"end": 1025
} | class ____(Executor):
@requests(on='/endpoint')
async def endpoint(self, docs: DocList[RootDoc], **kwargs) -> DocList[RootDoc]:
rets = DocList[RootDoc]()
rets.append(
RootDoc(
text='hello world', nested=Nested1Doc(nested=Nested2Doc(value='test'))
)
)
return rets
| NestedSchemaExecutor |
python | apache__airflow | airflow-ctl/src/airflowctl/api/datamodels/generated.py | {
"start": 72490,
"end": 72790
} | class ____(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
actions: Annotated[
list[
BulkCreateActionConnectionBody | BulkUpdateActionConnectionBody | BulkDeleteActionConnectionBody
],
Field(title="Actions"),
]
| BulkBodyConnectionBody |
python | apache__airflow | providers/ssh/tests/unit/ssh/operators/test_ssh.py | {
"start": 1837,
"end": 1921
} | class ____:
def __init__(self, hook):
self.hook = hook
| SSHClientSideEffect |
python | Netflix__metaflow | test/core/tests/basic_artifact.py | {
"start": 67,
"end": 974
} | class ____(MetaflowTest):
"""
Test that an artifact defined in the first step
is available in all steps downstream.
"""
PRIORITY = 0
SKIP_GRAPHS = [
"simple_switch",
"nested_switch",
"branch_in_switch",
"foreach_in_switch",
"switch_in_branch",
"switch_in_foreach",
"recursive_switch",
"recursive_switch_inside_foreach",
]
@steps(0, ["start"])
def step_start(self):
self.data = "abc"
@steps(1, ["join"])
def step_join(self):
import metaflow_test
inputset = {inp.data for inp in inputs}
assert_equals({"abc"}, inputset)
self.data = list(inputset)[0]
@steps(2, ["all"])
def step_all(self):
pass
def check_results(self, flow, checker):
for step in flow:
checker.assert_artifact(step.name, "data", "abc")
| BasicArtifactTest |
python | tensorflow__tensorflow | tensorflow/python/distribute/values_test.py | {
"start": 12495,
"end": 13679
} | class ____(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(dataset_fn_as_tf_function=[True, False]))
def testMapFnTracing(self, dataset_fn_as_tf_function):
# For a PerWorkerResource to correctly behave when used in dataset.map,
# it has to be that the map_fn is not traced only once such that
# PerWorkerResource.local_table can return the correct resource. This test
# can detect the potential breakage of this behavior on TAP.
self._traced_once = 0
def map_fn(x):
self._traced_once += 1
return x
def dataset_fn():
dataset = dataset_ops.DatasetV2.from_tensors([0, 1, 2]).repeat().batch(
2, drop_remainder=True)
dataset = dataset.map(map_fn)
return dataset
datasets = []
number_of_input_pipelines = 5
if dataset_fn_as_tf_function:
dataset_fn = def_function.function(dataset_fn)
expected_tracing_times = 1
else:
expected_tracing_times = number_of_input_pipelines
for _ in range(number_of_input_pipelines):
datasets.append(dataset_fn())
self.assertEqual(self._traced_once, expected_tracing_times)
| PerWorkerResourceTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.