language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | cython__cython | Demos/benchmarks/bm_richards_cclass.py | {
"start": 3118,
"end": 3331
} | class ____(object):
def __init__(self):
self.taskTab = [None] * TASKTABSIZE
self.taskList = None
self.holdCount = 0
self.qpktCount = 0
taskWorkArea = TaskWorkArea()
| TaskWorkArea |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_axis15.py | {
"start": 315,
"end": 1409
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_axis15.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [45705856, 54518528]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
chart.set_y_axis({"minor_unit": 0.4, "major_unit": 2})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | pallets__flask | src/flask/views.py | {
"start": 5146,
"end": 6962
} | class ____(View):
"""Dispatches request methods to the corresponding instance methods.
For example, if you implement a ``get`` method, it will be used to
handle ``GET`` requests.
This can be useful for defining a REST API.
:attr:`methods` is automatically set based on the methods defined on
the class.
See :doc:`views` for a detailed guide.
.. code-block:: python
class CounterAPI(MethodView):
def get(self):
return str(session.get("counter", 0))
def post(self):
session["counter"] = session.get("counter", 0) + 1
return redirect(url_for("counter"))
app.add_url_rule(
"/counter", view_func=CounterAPI.as_view("counter")
)
"""
def __init_subclass__(cls, **kwargs: t.Any) -> None:
super().__init_subclass__(**kwargs)
if "methods" not in cls.__dict__:
methods = set()
for base in cls.__bases__:
if getattr(base, "methods", None):
methods.update(base.methods) # type: ignore[attr-defined]
for key in http_method_funcs:
if hasattr(cls, key):
methods.add(key.upper())
if methods:
cls.methods = methods
def dispatch_request(self, **kwargs: t.Any) -> ft.ResponseReturnValue:
meth = getattr(self, request.method.lower(), None)
# If the request method is HEAD and we don't have a handler for it
# retry with GET.
if meth is None and request.method == "HEAD":
meth = getattr(self, "get", None)
assert meth is not None, f"Unimplemented method {request.method!r}"
return current_app.ensure_sync(meth)(**kwargs) # type: ignore[no-any-return]
| MethodView |
python | django-haystack__django-haystack | haystack/fields.py | {
"start": 10250,
"end": 10671
} | class ____(SearchField):
field_type = "boolean"
def __init__(self, **kwargs):
if kwargs.get("facet_class") is None:
kwargs["facet_class"] = FacetBooleanField
super().__init__(**kwargs)
def prepare(self, obj):
return self.convert(super().prepare(obj))
def convert(self, value):
if value is None:
return None
return bool(value)
| BooleanField |
python | tensorflow__tensorflow | tensorflow/python/debug/lib/check_numerics_callback_test.py | {
"start": 1701,
"end": 2657
} | class ____(test_util.TensorFlowTestCase):
def testLimitStringLengthWithExplicitLimit(self):
self.assertEqual(
check_numerics_callback.limit_string_length("", max_len=2), "")
self.assertEqual(
check_numerics_callback.limit_string_length("e", max_len=2), "e")
self.assertEqual(
check_numerics_callback.limit_string_length("de", max_len=2), "de")
self.assertEqual(
check_numerics_callback.limit_string_length("abcde", max_len=2),
"...de")
def testLimitStringLengthWithNoLimit(self):
self.assertEqual(check_numerics_callback.limit_string_length(
"A" * 100 + "B", max_len=None), "A" * 100 + "B")
self.assertEqual(
check_numerics_callback.limit_string_length("", max_len=None), "")
def testLimitStringLengthWithDefaultLimit(self):
self.assertEqual(
check_numerics_callback.limit_string_length("A" * 50 + "B"),
"..." + "A" * 49 + "B")
| LimitStringLengthTest |
python | dagster-io__dagster | python_modules/libraries/dagster-shared/dagster_shared/yaml_utils/source_position.py | {
"start": 238,
"end": 294
} | class ____(NamedTuple):
line: int
col: int
| LineCol |
python | PrefectHQ__prefect | tests/server/database/test_queries.py | {
"start": 211,
"end": 8131
} | class ____:
@pytest.fixture
async def work_queue_1(self, session):
return await models.work_queues.create_work_queue(
session=session, work_queue=schemas.actions.WorkQueueCreate(name="q1")
)
@pytest.fixture
async def work_queue_2(self, session):
return await models.work_queues.create_work_queue(
session=session, work_queue=schemas.actions.WorkQueueCreate(name="q2")
)
@pytest.fixture
async def deployment_1(self, session, flow, work_queue_1):
return await models.deployments.create_deployment(
session=session,
deployment=schemas.core.Deployment(
name="d1", flow_id=flow.id, work_queue_name=work_queue_1.name
),
)
@pytest.fixture
async def deployment_2(self, session, flow, work_queue_1):
return await models.deployments.create_deployment(
session=session,
deployment=schemas.core.Deployment(
name="d2", flow_id=flow.id, work_queue_name=work_queue_1.name
),
)
@pytest.fixture
async def deployment_3(self, session, flow, work_queue_2):
return await models.deployments.create_deployment(
session=session,
deployment=schemas.core.Deployment(
name="d3", flow_id=flow.id, work_queue_name=work_queue_2.name
),
)
@pytest.fixture
async def fr_1(self, session, deployment_1):
flow_run = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
name="fr1",
flow_id=deployment_1.flow_id,
deployment_id=deployment_1.id,
work_queue_name=deployment_1.work_queue_name,
state=schemas.states.Scheduled(now("UTC") - timedelta(minutes=2)),
),
)
return flow_run
@pytest.fixture
async def fr_2(self, session, deployment_2):
return await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
name="fr2",
flow_id=deployment_2.flow_id,
deployment_id=deployment_2.id,
work_queue_name=deployment_2.work_queue_name,
state=schemas.states.Scheduled(now("UTC") - timedelta(minutes=1)),
),
)
@pytest.fixture
async def fr_3(self, session, deployment_3):
return await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
name="fr3",
flow_id=deployment_3.flow_id,
deployment_id=deployment_3.id,
work_queue_name=deployment_3.work_queue_name,
state=schemas.states.Scheduled(now("UTC")),
),
)
@pytest.fixture(autouse=True)
async def commit_all(self, session, fr_1, fr_2, fr_3):
await session.commit()
async def test_get_runs_in_queue_query(
self, session, db, fr_1, fr_2, fr_3, work_queue_1, work_queue_2
):
query = db.queries.get_scheduled_flow_runs_from_work_queues()
result = await session.execute(query)
runs = result.all()
assert [r[0].id for r in runs] == [fr_1.id, fr_2.id, fr_3.id]
assert [r.wq_id for r in runs] == [
work_queue_1.id,
work_queue_1.id,
work_queue_2.id,
]
async def test_get_runs_in_queue_query_with_scalars(
self, session, db, fr_1, fr_2, fr_3, work_queue_1, work_queue_2
):
query = db.queries.get_scheduled_flow_runs_from_work_queues()
result = await session.execute(query)
# will only capture the flow run object
runs = result.scalars().unique().all()
assert [r.id for r in runs] == [fr_1.id, fr_2.id, fr_3.id]
async def test_get_runs_in_queue_limit(self, session, db, fr_1, fr_2, fr_3):
query = db.queries.get_scheduled_flow_runs_from_work_queues(limit_per_queue=1)
result = await session.execute(query)
runs = result.all()
assert [r[0].id for r in runs] == [fr_1.id, fr_3.id]
async def test_get_runs_in_queue_limit_sorts_correctly(
self, session, db, deployment_1
):
"""
Tests that the query sorts by scheduled time correctly; the unit tests with a small number of runs
can return the correct order even though no sort is applied.
https://github.com/PrefectHQ/prefect/pull/7457
"""
# clear all runs
await session.execute(sa.delete(db.FlowRun))
right_now = now("UTC")
# add a bunch of runs whose physical order is the opposite of the order they should be returned in
# in order to make it more likely (but not guaranteed!) that unsorted queries return the wrong value
for i in range(10, -10, -1):
await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
name="fr1",
flow_id=deployment_1.flow_id,
deployment_id=deployment_1.id,
work_queue_name=deployment_1.work_queue_name,
state=schemas.states.Scheduled(right_now + timedelta(minutes=i)),
),
)
await session.commit()
query = db.queries.get_scheduled_flow_runs_from_work_queues(limit_per_queue=1)
result = await session.execute(query)
runs = result.all()
assert len(runs) == 1
assert runs[0][0].next_scheduled_start_time == right_now - timedelta(minutes=9)
async def test_get_runs_in_queue_scheduled_before(
self, session, db, fr_1, fr_2, fr_3
):
query = db.queries.get_scheduled_flow_runs_from_work_queues(
scheduled_before=now("UTC") - timedelta(seconds=90)
)
result = await session.execute(query)
runs = result.all()
assert [r[0].id for r in runs] == [fr_1.id]
async def test_get_runs_in_queue_work_queue_ids(
self, session, db, fr_1, fr_2, fr_3, work_queue_2
):
query = db.queries.get_scheduled_flow_runs_from_work_queues(
work_queue_ids=[work_queue_2.id]
)
result = await session.execute(query)
runs = result.all()
assert [r[0].id for r in runs] == [fr_3.id]
async def test_use_query_to_filter_deployments(
self, session, db, fr_1, fr_2, fr_3, work_queue_2
):
query = db.queries.get_scheduled_flow_runs_from_work_queues(
work_queue_ids=[work_queue_2.id]
)
# join query to deployments and filter for d3
query = query.cte("scheduled_runs_query")
query = (
sa.select(sa.orm.aliased(db.FlowRun, query))
.join(db.Deployment, query.c.deployment_id == db.Deployment.id)
.where(db.Deployment.name == "d3")
)
result = await session.execute(query)
runs = result.all()
assert [r[0].id for r in runs] == [fr_3.id]
async def test_query_skips_locked(self, db):
"""Concurrent queries should not both receive runs"""
if db.database_config.connection_url.startswith("sqlite"):
pytest.skip("FOR UPDATE SKIP LOCKED is not supported on SQLite")
query = db.queries.get_scheduled_flow_runs_from_work_queues()
session1 = await db.session()
session2 = await db.session()
async with session1:
async with session2:
async with session1.begin():
async with session2.begin():
result1 = (await session1.execute(query)).all()
result2 = (await session2.execute(query)).all()
assert len(result1) == 3
assert len(result2) == 0
| TestGetRunsInQueueQuery |
python | langchain-ai__langchain | libs/core/langchain_core/messages/ai.py | {
"start": 1175,
"end": 2088
} | class ____(TypedDict, total=False):
"""Breakdown of input token counts.
Does *not* need to sum to full input token count. Does *not* need to have all keys.
Example:
```python
{
"audio": 10,
"cache_creation": 200,
"cache_read": 100,
}
```
May also hold extra provider-specific keys.
!!! version-added "Added in `langchain-core` 0.3.9"
"""
audio: int
"""Audio input tokens."""
cache_creation: int
"""Input tokens that were cached and there was a cache miss.
Since there was a cache miss, the cache was created from these tokens.
"""
cache_read: int
"""Input tokens that were cached and there was a cache hit.
Since there was a cache hit, the tokens were read from the cache. More precisely,
the model state given these tokens was read from the cache.
"""
| InputTokenDetails |
python | matplotlib__matplotlib | lib/matplotlib/backend_managers.py | {
"start": 549,
"end": 840
} | class ____:
"""
Event carrying messages from toolmanager.
Messages usually get displayed to the user by the toolbar.
"""
def __init__(self, name, sender, message):
self.name = name
self.sender = sender
self.message = message
| ToolManagerMessageEvent |
python | agronholm__apscheduler | src/apscheduler/triggers/cron/expressions.py | {
"start": 2109,
"end": 4626
} | class ____(AllExpression):
value_re: ClassVar[Pattern] = re.compile(
r"(?P<first>\d+)(?:-(?P<last>\d+))?(?:/(?P<step>\d+))?$"
)
first: int = attrs.field(
converter=as_int, validator=[instance_of(int), non_negative_number]
)
last: int | None = attrs.field(
converter=as_int,
validator=optional([instance_of(int), non_negative_number]),
default=None,
)
def __attrs_post_init__(self) -> None:
if self.last is None and self.step is None:
self.last = self.first
if self.last is not None and self.first > self.last:
raise ValueError(
"The minimum value in a range must not be higher than the maximum"
)
def validate_range(self, field_name: str, min_value: int, max_value: int) -> None:
super().validate_range(field_name, min_value, max_value)
if self.first < min_value:
raise ValueError(
f"the first value ({self.first}) is lower than the minimum value "
f"({min_value})"
)
if self.last is not None and self.last > max_value:
raise ValueError(
f"the last value ({self.last}) is higher than the maximum value "
f"({max_value})"
)
value_range = (self.last or max_value) - self.first
if self.step and self.step > value_range:
raise ValueError(
f"the step value ({self.step}) is higher than the total range of the "
f"expression ({value_range})"
)
def get_next_value(self, dateval: datetime, field: BaseField) -> int | None:
startval = field.get_value(dateval)
minval = field.get_min(dateval)
maxval = field.get_max(dateval)
# Apply range limits
minval = max(minval, self.first)
maxval = min(maxval, self.last) if self.last is not None else maxval
nextval = max(minval, startval)
# Apply the step if defined
if self.step:
distance_to_next = (self.step - (nextval - minval)) % self.step
nextval += distance_to_next
return nextval if nextval <= maxval else None
def __str__(self) -> str:
if self.last != self.first and self.last is not None:
rangeval = f"{self.first}-{self.last}"
else:
rangeval = str(self.first)
if self.step:
return f"{rangeval}/{self.step}"
return rangeval
| RangeExpression |
python | doocs__leetcode | solution/2000-2099/2075.Decode the Slanted Ciphertext/Solution.py | {
"start": 0,
"end": 363
} | class ____:
def decodeCiphertext(self, encodedText: str, rows: int) -> str:
ans = []
cols = len(encodedText) // rows
for j in range(cols):
x, y = 0, j
while x < rows and y < cols:
ans.append(encodedText[x * cols + y])
x, y = x + 1, y + 1
return ''.join(ans).rstrip()
| Solution |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_bitcoin_tx_is_confirmed.py | {
"start": 1906,
"end": 4776
} | class ____(ColumnMapExpectation):
"""Expect column values Bitcoin transaction hash is confirmed."""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"all_valid": [
"7f0881d44ad2c5972457825c0846ba330a53b159251e3f53a7d0de4cd2727590",
"4eeeaf8216f8ba3c5258dc1d4fc489a35381dda8259e5160d885b851e21e319d",
"6d12598311328a63c324ee7164b6d97e8d38b6af42cd4017643602fd982cc40c",
"ac6080a633e4bcf39e437ba160c95c8edfbb9fabe3525058a7fca2d890cc4ef5",
],
"some_other": [
"1BoatSLRHtKNngkdXEeobR76b53LETtpyT",
"n2nzi7xDTrMVK9stGpbK3BtrpBCJfH7LRQ",
"3QJmV3qfvL9SuYo34YihAf3sRCW3qSinyC",
"bc1qxneu85dnhx33asv8da45x55qyeu44ek9h3vngxdsare",
],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "all_valid"},
"out": {
"success": True,
},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "some_other", "mostly": 1},
"out": {
"success": False,
},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.bitcoin_tx_is_confirmed"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental",
"tags": [
"hackathon-22",
"experimental",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@szecsip", # Don't forget to add your github handle here!
],
"requirements": ["blockcypher"],
}
if __name__ == "__main__":
ExpectColumnValuesBitcoinTxIsConfirmed().print_diagnostic_checklist()
| ExpectColumnValuesBitcoinTxIsConfirmed |
python | tornadoweb__tornado | tornado/http1connection.py | {
"start": 3438,
"end": 30323
} | class ____(httputil.HTTPConnection):
"""Implements the HTTP/1.x protocol.
This class can be on its own for clients, or via `HTTP1ServerConnection`
for servers.
"""
def __init__(
self,
stream: iostream.IOStream,
is_client: bool,
params: Optional[HTTP1ConnectionParameters] = None,
context: Optional[object] = None,
) -> None:
"""
:arg stream: an `.IOStream`
:arg bool is_client: client or server
:arg params: a `.HTTP1ConnectionParameters` instance or ``None``
:arg context: an opaque application-defined object that can be accessed
as ``connection.context``.
"""
self.is_client = is_client
self.stream = stream
if params is None:
params = HTTP1ConnectionParameters()
self.params = params
self.context = context
self.no_keep_alive = params.no_keep_alive
# The body limits can be altered by the delegate, so save them
# here instead of just referencing self.params later.
self._max_body_size = (
self.params.max_body_size
if self.params.max_body_size is not None
else self.stream.max_buffer_size
)
self._body_timeout = self.params.body_timeout
# _write_finished is set to True when finish() has been called,
# i.e. there will be no more data sent. Data may still be in the
# stream's write buffer.
self._write_finished = False
# True when we have read the entire incoming body.
self._read_finished = False
# _finish_future resolves when all data has been written and flushed
# to the IOStream.
self._finish_future = Future() # type: Future[None]
# If true, the connection should be closed after this request
# (after the response has been written in the server side,
# and after it has been read in the client)
self._disconnect_on_finish = False
self._clear_callbacks()
# Save the start lines after we read or write them; they
# affect later processing (e.g. 304 responses and HEAD methods
# have content-length but no bodies)
self._request_start_line = None # type: Optional[httputil.RequestStartLine]
self._response_start_line = None # type: Optional[httputil.ResponseStartLine]
self._request_headers = None # type: Optional[httputil.HTTPHeaders]
# True if we are writing output with chunked encoding.
self._chunking_output = False
# While reading a body with a content-length, this is the
# amount left to read.
self._expected_content_remaining = None # type: Optional[int]
# A Future for our outgoing writes, returned by IOStream.write.
self._pending_write = None # type: Optional[Future[None]]
def read_response(self, delegate: httputil.HTTPMessageDelegate) -> Awaitable[bool]:
"""Read a single HTTP response.
Typical client-mode usage is to write a request using `write_headers`,
`write`, and `finish`, and then call ``read_response``.
:arg delegate: a `.HTTPMessageDelegate`
Returns a `.Future` that resolves to a bool after the full response has
been read. The result is true if the stream is still open.
"""
if self.params.decompress:
delegate = _GzipMessageDelegate(delegate, self.params.chunk_size)
return self._read_message(delegate)
async def _read_message(self, delegate: httputil.HTTPMessageDelegate) -> bool:
need_delegate_close = False
try:
header_future = self.stream.read_until_regex(
b"\r?\n\r?\n", max_bytes=self.params.max_header_size
)
if self.params.header_timeout is None:
header_data = await header_future
else:
try:
header_data = await gen.with_timeout(
self.stream.io_loop.time() + self.params.header_timeout,
header_future,
quiet_exceptions=iostream.StreamClosedError,
)
except gen.TimeoutError:
self.close()
return False
start_line_str, headers = self._parse_headers(header_data)
if self.is_client:
resp_start_line = httputil.parse_response_start_line(start_line_str)
self._response_start_line = resp_start_line
start_line = (
resp_start_line
) # type: Union[httputil.RequestStartLine, httputil.ResponseStartLine]
# TODO: this will need to change to support client-side keepalive
self._disconnect_on_finish = False
else:
req_start_line = httputil.parse_request_start_line(start_line_str)
self._request_start_line = req_start_line
self._request_headers = headers
start_line = req_start_line
self._disconnect_on_finish = not self._can_keep_alive(
req_start_line, headers
)
need_delegate_close = True
with _ExceptionLoggingContext(app_log):
header_recv_future = delegate.headers_received(start_line, headers)
if header_recv_future is not None:
await header_recv_future
if self.stream is None:
# We've been detached.
need_delegate_close = False
return False
skip_body = False
if self.is_client:
assert isinstance(start_line, httputil.ResponseStartLine)
if (
self._request_start_line is not None
and self._request_start_line.method == "HEAD"
):
skip_body = True
code = start_line.code
if code == 304:
# 304 responses may include the content-length header
# but do not actually have a body.
# http://tools.ietf.org/html/rfc7230#section-3.3
skip_body = True
if 100 <= code < 200:
# 1xx responses should never indicate the presence of
# a body.
if "Content-Length" in headers or "Transfer-Encoding" in headers:
raise httputil.HTTPInputError(
"Response code %d cannot have body" % code
)
# TODO: client delegates will get headers_received twice
# in the case of a 100-continue. Document or change?
await self._read_message(delegate)
else:
if headers.get("Expect") == "100-continue" and not self._write_finished:
self.stream.write(b"HTTP/1.1 100 (Continue)\r\n\r\n")
if not skip_body:
body_future = self._read_body(
resp_start_line.code if self.is_client else 0, headers, delegate
)
if body_future is not None:
if self._body_timeout is None:
await body_future
else:
try:
await gen.with_timeout(
self.stream.io_loop.time() + self._body_timeout,
body_future,
quiet_exceptions=iostream.StreamClosedError,
)
except gen.TimeoutError:
gen_log.info("Timeout reading body from %s", self.context)
self.stream.close()
return False
self._read_finished = True
if not self._write_finished or self.is_client:
need_delegate_close = False
with _ExceptionLoggingContext(app_log):
delegate.finish()
# If we're waiting for the application to produce an asynchronous
# response, and we're not detached, register a close callback
# on the stream (we didn't need one while we were reading)
if (
not self._finish_future.done()
and self.stream is not None
and not self.stream.closed()
):
self.stream.set_close_callback(self._on_connection_close)
await self._finish_future
if self.is_client and self._disconnect_on_finish:
self.close()
if self.stream is None:
return False
except httputil.HTTPInputError as e:
gen_log.info("Malformed HTTP message from %s: %s", self.context, e)
if not self.is_client:
await self.stream.write(b"HTTP/1.1 400 Bad Request\r\n\r\n")
self.close()
return False
finally:
if need_delegate_close:
with _ExceptionLoggingContext(app_log):
delegate.on_connection_close()
header_future = None # type: ignore
self._clear_callbacks()
return True
def _clear_callbacks(self) -> None:
"""Clears the callback attributes.
This allows the request handler to be garbage collected more
quickly in CPython by breaking up reference cycles.
"""
self._write_callback = None
self._write_future = None # type: Optional[Future[None]]
self._close_callback = None # type: Optional[Callable[[], None]]
if self.stream is not None:
self.stream.set_close_callback(None)
def set_close_callback(self, callback: Optional[Callable[[], None]]) -> None:
"""Sets a callback that will be run when the connection is closed.
Note that this callback is slightly different from
`.HTTPMessageDelegate.on_connection_close`: The
`.HTTPMessageDelegate` method is called when the connection is
closed while receiving a message. This callback is used when
there is not an active delegate (for example, on the server
side this callback is used if the client closes the connection
after sending its request but before receiving all the
response.
"""
self._close_callback = callback
def _on_connection_close(self) -> None:
# Note that this callback is only registered on the IOStream
# when we have finished reading the request and are waiting for
# the application to produce its response.
if self._close_callback is not None:
callback = self._close_callback
self._close_callback = None
callback()
if not self._finish_future.done():
future_set_result_unless_cancelled(self._finish_future, None)
self._clear_callbacks()
def close(self) -> None:
if self.stream is not None:
self.stream.close()
self._clear_callbacks()
if not self._finish_future.done():
future_set_result_unless_cancelled(self._finish_future, None)
def detach(self) -> iostream.IOStream:
"""Take control of the underlying stream.
Returns the underlying `.IOStream` object and stops all further
HTTP processing. May only be called during
`.HTTPMessageDelegate.headers_received`. Intended for implementing
protocols like websockets that tunnel over an HTTP handshake.
"""
self._clear_callbacks()
stream = self.stream
self.stream = None # type: ignore
if not self._finish_future.done():
future_set_result_unless_cancelled(self._finish_future, None)
return stream
def set_body_timeout(self, timeout: float) -> None:
"""Sets the body timeout for a single request.
Overrides the value from `.HTTP1ConnectionParameters`.
"""
self._body_timeout = timeout
def set_max_body_size(self, max_body_size: int) -> None:
"""Sets the body size limit for a single request.
Overrides the value from `.HTTP1ConnectionParameters`.
"""
self._max_body_size = max_body_size
def write_headers(
self,
start_line: Union[httputil.RequestStartLine, httputil.ResponseStartLine],
headers: httputil.HTTPHeaders,
chunk: Optional[bytes] = None,
) -> "Future[None]":
"""Implements `.HTTPConnection.write_headers`."""
lines = []
if self.is_client:
assert isinstance(start_line, httputil.RequestStartLine)
self._request_start_line = start_line
lines.append(utf8(f"{start_line[0]} {start_line[1]} HTTP/1.1"))
# Client requests with a non-empty body must have either a
# Content-Length or a Transfer-Encoding. If Content-Length is not
# present we'll add our Transfer-Encoding below.
self._chunking_output = (
start_line.method in ("POST", "PUT", "PATCH")
and "Content-Length" not in headers
)
else:
assert isinstance(start_line, httputil.ResponseStartLine)
assert self._request_start_line is not None
assert self._request_headers is not None
self._response_start_line = start_line
lines.append(utf8("HTTP/1.1 %d %s" % (start_line[1], start_line[2])))
self._chunking_output = (
# TODO: should this use
# self._request_start_line.version or
# start_line.version?
self._request_start_line.version == "HTTP/1.1"
# Omit payload header field for HEAD request.
and self._request_start_line.method != "HEAD"
# 1xx, 204 and 304 responses have no body (not even a zero-length
# body), and so should not have either Content-Length or
# Transfer-Encoding headers.
and start_line.code not in (204, 304)
and (start_line.code < 100 or start_line.code >= 200)
# No need to chunk the output if a Content-Length is specified.
and "Content-Length" not in headers
)
# If connection to a 1.1 client will be closed, inform client
if (
self._request_start_line.version == "HTTP/1.1"
and self._disconnect_on_finish
):
headers["Connection"] = "close"
# If a 1.0 client asked for keep-alive, add the header.
if (
self._request_start_line.version == "HTTP/1.0"
and self._request_headers.get("Connection", "").lower() == "keep-alive"
):
headers["Connection"] = "Keep-Alive"
if self._chunking_output:
headers["Transfer-Encoding"] = "chunked"
if not self.is_client and (
self._request_start_line.method == "HEAD"
or cast(httputil.ResponseStartLine, start_line).code == 304
):
self._expected_content_remaining = 0
elif "Content-Length" in headers:
self._expected_content_remaining = parse_int(headers["Content-Length"])
else:
self._expected_content_remaining = None
# TODO: headers are supposed to be of type str, but we still have some
# cases that let bytes slip through. Remove these native_str calls when those
# are fixed.
header_lines = (
native_str(n) + ": " + native_str(v) for n, v in headers.get_all()
)
lines.extend(line.encode("latin1") for line in header_lines)
for line in lines:
if CR_OR_LF_RE.search(line):
raise ValueError("Illegal characters (CR or LF) in header: %r" % line)
future = None
if self.stream.closed():
future = self._write_future = Future()
future.set_exception(iostream.StreamClosedError())
future.exception()
else:
future = self._write_future = Future()
data = b"\r\n".join(lines) + b"\r\n\r\n"
if chunk:
data += self._format_chunk(chunk)
self._pending_write = self.stream.write(data)
future_add_done_callback(self._pending_write, self._on_write_complete)
return future
def _format_chunk(self, chunk: bytes) -> bytes:
if self._expected_content_remaining is not None:
self._expected_content_remaining -= len(chunk)
if self._expected_content_remaining < 0:
# Close the stream now to stop further framing errors.
self.stream.close()
raise httputil.HTTPOutputError(
"Tried to write more data than Content-Length"
)
if self._chunking_output and chunk:
# Don't write out empty chunks because that means END-OF-STREAM
# with chunked encoding
return utf8("%x" % len(chunk)) + b"\r\n" + chunk + b"\r\n"
else:
return chunk
def write(self, chunk: bytes) -> "Future[None]":
"""Implements `.HTTPConnection.write`.
For backwards compatibility it is allowed but deprecated to
skip `write_headers` and instead call `write()` with a
pre-encoded header block.
"""
future = None
if self.stream.closed():
future = self._write_future = Future()
self._write_future.set_exception(iostream.StreamClosedError())
self._write_future.exception()
else:
future = self._write_future = Future()
self._pending_write = self.stream.write(self._format_chunk(chunk))
future_add_done_callback(self._pending_write, self._on_write_complete)
return future
def finish(self) -> None:
"""Implements `.HTTPConnection.finish`."""
if (
self._expected_content_remaining is not None
and self._expected_content_remaining != 0
and not self.stream.closed()
):
self.stream.close()
raise httputil.HTTPOutputError(
"Tried to write %d bytes less than Content-Length"
% self._expected_content_remaining
)
if self._chunking_output:
if not self.stream.closed():
self._pending_write = self.stream.write(b"0\r\n\r\n")
self._pending_write.add_done_callback(self._on_write_complete)
self._write_finished = True
# If the app finished the request while we're still reading,
# divert any remaining data away from the delegate and
# close the connection when we're done sending our response.
# Closing the connection is the only way to avoid reading the
# whole input body.
if not self._read_finished:
self._disconnect_on_finish = True
# No more data is coming, so instruct TCP to send any remaining
# data immediately instead of waiting for a full packet or ack.
self.stream.set_nodelay(True)
if self._pending_write is None:
self._finish_request(None)
else:
future_add_done_callback(self._pending_write, self._finish_request)
def _on_write_complete(self, future: "Future[None]") -> None:
exc = future.exception()
if exc is not None and not isinstance(exc, iostream.StreamClosedError):
future.result()
if self._write_callback is not None:
callback = self._write_callback
self._write_callback = None
self.stream.io_loop.add_callback(callback)
if self._write_future is not None:
future = self._write_future
self._write_future = None
future_set_result_unless_cancelled(future, None)
def _can_keep_alive(
self, start_line: httputil.RequestStartLine, headers: httputil.HTTPHeaders
) -> bool:
if self.params.no_keep_alive:
return False
connection_header = headers.get("Connection")
if connection_header is not None:
connection_header = connection_header.lower()
if start_line.version == "HTTP/1.1":
return connection_header != "close"
elif (
"Content-Length" in headers
or is_transfer_encoding_chunked(headers)
or getattr(start_line, "method", None) in ("HEAD", "GET")
):
# start_line may be a request or response start line; only
# the former has a method attribute.
return connection_header == "keep-alive"
return False
def _finish_request(self, future: "Optional[Future[None]]") -> None:
self._clear_callbacks()
if not self.is_client and self._disconnect_on_finish:
self.close()
return
# Turn Nagle's algorithm back on, leaving the stream in its
# default state for the next request.
self.stream.set_nodelay(False)
if not self._finish_future.done():
future_set_result_unless_cancelled(self._finish_future, None)
def _parse_headers(self, data: bytes) -> Tuple[str, httputil.HTTPHeaders]:
# The lstrip removes newlines that some implementations sometimes
# insert between messages of a reused connection. Per RFC 7230,
# we SHOULD ignore at least one empty line before the request.
# http://tools.ietf.org/html/rfc7230#section-3.5
data_str = native_str(data.decode("latin1")).lstrip("\r\n")
# RFC 7230 section allows for both CRLF and bare LF.
eol = data_str.find("\n")
start_line = data_str[:eol].rstrip("\r")
headers = httputil.HTTPHeaders.parse(data_str[eol:])
return start_line, headers
def _read_body(
self,
code: int,
headers: httputil.HTTPHeaders,
delegate: httputil.HTTPMessageDelegate,
) -> Optional[Awaitable[None]]:
if "Content-Length" in headers:
if "," in headers["Content-Length"]:
# Proxies sometimes cause Content-Length headers to get
# duplicated. If all the values are identical then we can
# use them but if they differ it's an error.
pieces = re.split(r",\s*", headers["Content-Length"])
if any(i != pieces[0] for i in pieces):
raise httputil.HTTPInputError(
"Multiple unequal Content-Lengths: %r"
% headers["Content-Length"]
)
headers["Content-Length"] = pieces[0]
try:
content_length: Optional[int] = parse_int(headers["Content-Length"])
except ValueError:
# Handles non-integer Content-Length value.
raise httputil.HTTPInputError(
"Only integer Content-Length is allowed: %s"
% headers["Content-Length"]
)
if cast(int, content_length) > self._max_body_size:
raise httputil.HTTPInputError("Content-Length too long")
else:
content_length = None
is_chunked = is_transfer_encoding_chunked(headers)
if code == 204:
# This response code is not allowed to have a non-empty body,
# and has an implicit length of zero instead of read-until-close.
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.3
if is_chunked or content_length not in (None, 0):
raise httputil.HTTPInputError(
"Response with code %d should not have body" % code
)
content_length = 0
if is_chunked:
return self._read_chunked_body(delegate)
if content_length is not None:
return self._read_fixed_body(content_length, delegate)
if self.is_client:
return self._read_body_until_close(delegate)
return None
async def _read_fixed_body(
self, content_length: int, delegate: httputil.HTTPMessageDelegate
) -> None:
while content_length > 0:
body = await self.stream.read_bytes(
min(self.params.chunk_size, content_length), partial=True
)
content_length -= len(body)
if not self._write_finished or self.is_client:
with _ExceptionLoggingContext(app_log):
ret = delegate.data_received(body)
if ret is not None:
await ret
async def _read_chunked_body(self, delegate: httputil.HTTPMessageDelegate) -> None:
# TODO: "chunk extensions" http://tools.ietf.org/html/rfc2616#section-3.6.1
total_size = 0
while True:
chunk_len_str = await self.stream.read_until(b"\r\n", max_bytes=64)
try:
chunk_len = parse_hex_int(native_str(chunk_len_str[:-2]))
except ValueError:
raise httputil.HTTPInputError("invalid chunk size")
if chunk_len == 0:
crlf = await self.stream.read_bytes(2)
if crlf != b"\r\n":
raise httputil.HTTPInputError(
"improperly terminated chunked request"
)
return
total_size += chunk_len
if total_size > self._max_body_size:
raise httputil.HTTPInputError("chunked body too large")
bytes_to_read = chunk_len
while bytes_to_read:
chunk = await self.stream.read_bytes(
min(bytes_to_read, self.params.chunk_size), partial=True
)
bytes_to_read -= len(chunk)
if not self._write_finished or self.is_client:
with _ExceptionLoggingContext(app_log):
ret = delegate.data_received(chunk)
if ret is not None:
await ret
# chunk ends with \r\n
crlf = await self.stream.read_bytes(2)
assert crlf == b"\r\n"
async def _read_body_until_close(
self, delegate: httputil.HTTPMessageDelegate
) -> None:
body = await self.stream.read_until_close()
if not self._write_finished or self.is_client:
with _ExceptionLoggingContext(app_log):
ret = delegate.data_received(body)
if ret is not None:
await ret
| HTTP1Connection |
python | dask__distributed | distributed/worker_state_machine.py | {
"start": 23783,
"end": 23981
} | class ____(StateMachineEvent):
"""Abstract base event for all the possible outcomes of a :class:`Compute`
instruction
"""
key: Key
__slots__ = ("key",)
@dataclass
| ExecuteDoneEvent |
python | django-haystack__django-haystack | haystack/forms.py | {
"start": 3615,
"end": 3994
} | class ____(ModelSearchForm):
selected_facets = forms.CharField(required=False, widget=forms.HiddenInput)
def search(self):
sqs = super().search()
if hasattr(self, "cleaned_data") and self.cleaned_data["selected_facets"]:
sqs = sqs.narrow(self.cleaned_data["selected_facets"])
return sqs.models(*self.get_models())
| FacetedModelSearchForm |
python | pydantic__pydantic | pydantic/v1/fields.py | {
"start": 50366,
"end": 50645
} | class ____:
"""
Used to postpone field preparation, while creating recursive generic models.
"""
def is_finalvar_with_default_val(type_: Type[Any], val: Any) -> bool:
return is_finalvar(type_) and val is not Undefined and not isinstance(val, FieldInfo)
| DeferredType |
python | pytorch__pytorch | torch/_dynamo/variables/user_defined.py | {
"start": 77894,
"end": 79397
} | class ____(UserDefinedObjectVariable):
def __init__(self, value, **kwargs):
super().__init__(value, **kwargs)
self.exc_vt = variables.ExceptionVariable(self.value_type, ())
@property
def fn(self):
return self.value_type
def call_method(self, tx, name, args, kwargs):
if (
name == "__init__"
and (method := self._maybe_get_baseclass_method(name))
and inspect.ismethoddescriptor(method)
and len(kwargs) == 0
):
self.exc_vt.args = args
self.value.args = args
return variables.ConstantVariable(None)
elif (
name == "__setattr__"
and len(args) == 2
and isinstance(args[0], variables.ConstantVariable)
and args[0].value
in ("__cause__", "__context__", "__suppress_context__", "__traceback__")
):
self.exc_vt.call_setattr(tx, args[0], args[1])
elif name == "with_traceback":
return self.exc_vt.call_method(tx, name, args, kwargs)
return super().call_method(tx, name, args, kwargs)
@property
def __context__(self):
return self.exc_vt.__context__
@property
def args(self):
return self.exc_vt.args
def set_context(self, context: "variables.ExceptionVariable"):
return self.exc_vt.set_context(context)
@property
def exc_type(self):
return self.exc_vt.exc_type
| UserDefinedExceptionObjectVariable |
python | ansible__ansible | test/lib/ansible_test/_internal/provider/layout/collection.py | {
"start": 246,
"end": 6131
} | class ____(LayoutProvider):
"""Layout provider for Ansible collections."""
@staticmethod
def is_content_root(path: str) -> bool:
"""Return True if the given path is a content root for this provider."""
if os.path.basename(os.path.dirname(os.path.dirname(path))) == 'ansible_collections':
return True
return False
def create(self, root: str, paths: list[str]) -> ContentLayout:
"""Create a Layout using the given root and paths."""
plugin_paths = dict((p, os.path.join('plugins', p)) for p in self.PLUGIN_TYPES)
collection_root = os.path.dirname(os.path.dirname(root))
collection_dir = os.path.relpath(root, collection_root)
collection_namespace: str
collection_name: str
collection_namespace, collection_name = collection_dir.split(os.path.sep)
collection_root = os.path.dirname(collection_root)
sanity_messages = LayoutMessages()
integration_messages = LayoutMessages()
unit_messages = LayoutMessages()
# these apply to all test commands
self.__check_test_path(paths, sanity_messages)
self.__check_test_path(paths, integration_messages)
self.__check_test_path(paths, unit_messages)
# these apply to specific test commands
integration_targets_path = self.__check_integration_path(paths, integration_messages)
self.__check_unit_path(paths, unit_messages)
errors: list[str] = []
if not is_valid_identifier(collection_namespace):
errors.append(f'The namespace "{collection_namespace}" is an invalid identifier or a reserved keyword.')
if not is_valid_identifier(collection_name):
errors.append(f'The name "{collection_name}" is an invalid identifier or a reserved keyword.')
return ContentLayout(
root,
paths,
plugin_paths=plugin_paths,
collection=CollectionDetail(
name=collection_name,
namespace=collection_namespace,
root=collection_root,
),
test_path='tests',
results_path='tests/output',
sanity_path='tests/sanity',
sanity_messages=sanity_messages,
integration_path='tests/integration',
integration_targets_path=integration_targets_path.rstrip(os.path.sep),
integration_vars_path='tests/integration/integration_config.yml',
integration_messages=integration_messages,
unit_path='tests/unit',
unit_module_path='tests/unit/plugins/modules',
unit_module_utils_path='tests/unit/plugins/module_utils',
unit_messages=unit_messages,
unsupported=errors,
)
@staticmethod
def __check_test_path(paths: list[str], messages: LayoutMessages) -> None:
modern_test_path = 'tests/'
modern_test_path_found = any(path.startswith(modern_test_path) for path in paths)
legacy_test_path = 'test/'
legacy_test_path_found = any(path.startswith(legacy_test_path) for path in paths)
if modern_test_path_found and legacy_test_path_found:
messages.warning.append('Ignoring tests in "%s" in favor of "%s".' % (legacy_test_path, modern_test_path))
elif legacy_test_path_found:
messages.warning.append('Ignoring tests in "%s" that should be in "%s".' % (legacy_test_path, modern_test_path))
@staticmethod
def __check_integration_path(paths: list[str], messages: LayoutMessages) -> str:
modern_integration_path = 'roles/test/'
modern_integration_path_found = any(path.startswith(modern_integration_path) for path in paths)
legacy_integration_path = 'tests/integration/targets/'
legacy_integration_path_found = any(path.startswith(legacy_integration_path) for path in paths)
if modern_integration_path_found and legacy_integration_path_found:
messages.warning.append('Ignoring tests in "%s" in favor of "%s".' % (legacy_integration_path, modern_integration_path))
integration_targets_path = modern_integration_path
elif legacy_integration_path_found:
messages.info.append('Falling back to tests in "%s" because "%s" was not found.' % (legacy_integration_path, modern_integration_path))
integration_targets_path = legacy_integration_path
elif modern_integration_path_found:
messages.info.append('Loading tests from "%s".' % modern_integration_path)
integration_targets_path = modern_integration_path
else:
messages.error.append('Cannot run integration tests without "%s" or "%s".' % (modern_integration_path, legacy_integration_path))
integration_targets_path = modern_integration_path
return integration_targets_path
@staticmethod
def __check_unit_path(paths: list[str], messages: LayoutMessages) -> None:
modern_unit_path = 'tests/unit/'
modern_unit_path_found = any(path.startswith(modern_unit_path) for path in paths)
legacy_unit_path = 'tests/units/' # test/units/ will be covered by the warnings for test/ vs tests/
legacy_unit_path_found = any(path.startswith(legacy_unit_path) for path in paths)
if modern_unit_path_found and legacy_unit_path_found:
messages.warning.append('Ignoring tests in "%s" in favor of "%s".' % (legacy_unit_path, modern_unit_path))
elif legacy_unit_path_found:
messages.warning.append('Rename "%s" to "%s" to run unit tests.' % (legacy_unit_path, modern_unit_path))
elif modern_unit_path_found:
pass # unit tests only run from one directory so no message is needed
else:
messages.error.append('Cannot run unit tests without "%s".' % modern_unit_path)
| CollectionLayout |
python | spyder-ide__spyder | spyder/api/widgets/mixins.py | {
"start": 5071,
"end": 8708
} | class ____:
"""
Provide methods to create, add and get toolbars.
"""
def add_item_to_toolbar(self, action_or_widget, toolbar, section=None,
before=None, before_section=None):
"""
If you provide a `before` action, the action will be placed before this
one, so the section option will be ignored, since the action will now
be placed in the same section as the `before` action.
"""
toolbar.add_item(action_or_widget, section=section, before=before,
before_section=before_section)
def create_stretcher(self, id_=None):
"""
Create a stretcher widget to be used in a Qt toolbar.
"""
stretcher = QWidget(self)
stretcher.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
if id_ is not None:
stretcher.ID = id_
return stretcher
def create_toolbar(
self,
name: str,
register: bool = True
) -> SpyderToolbar:
"""
Create a Spyder toolbar.
Parameters
----------
name: str
Name of the toolbar to create.
register: bool
Whether to register the toolbar in the global registry.
"""
toolbar = SpyderToolbar(self, name)
toolbar.setStyleSheet(str(PANES_TOOLBAR_STYLESHEET))
if register:
TOOLBAR_REGISTRY.register_reference(
toolbar, name, self.PLUGIN_NAME, self.CONTEXT_NAME
)
return toolbar
def get_toolbar(self, name: str, context: Optional[str] = None,
plugin: Optional[str] = None) -> QToolBar:
"""
Return toolbar by name, plugin and context.
Parameters
----------
name: str
Name of the toolbar to retrieve.
context: Optional[str]
Widget or context identifier under which the toolbar was stored.
If None, then `CONTEXT_NAME` is used instead
plugin: Optional[str]
Name of the plugin where the toolbar was defined. If None, then
`PLUGIN_NAME` is used.
Returns
-------
toolbar: QToolBar
The corresponding toolbar stored under the given `name`, `context`
and `plugin`.
Raises
------
KeyError
If either of `name`, `context` or `plugin` keys do not exist in the
toolbar registry.
"""
plugin = self.PLUGIN_NAME if plugin is None else plugin
context = self.CONTEXT_NAME if context is None else context
return TOOLBAR_REGISTRY.get_reference(name, plugin, context)
def get_toolbars(self, context: Optional[str] = None,
plugin: Optional[str] = None) -> Dict[str, QToolBar]:
"""
Return all toolbars defined by a context on a given plugin.
Parameters
----------
context: Optional[str]
Widget or context identifier under which the toolbars were stored.
If None, then `CONTEXT_NAME` is used instead
plugin: Optional[str]
Name of the plugin where the toolbars were defined. If None, then
`PLUGIN_NAME` is used.
Returns
-------
toolbars: Dict[str, QToolBar]
A dictionary that maps string keys to their corresponding toolbars.
"""
plugin = self.PLUGIN_NAME if plugin is None else plugin
context = self.CONTEXT_NAME if context is None else context
return TOOLBAR_REGISTRY.get_references(plugin, context)
| SpyderToolbarMixin |
python | getsentry__sentry | src/sentry/testutils/hybrid_cloud.py | {
"start": 812,
"end": 2708
} | class ____:
@property
def ScheduledDeletion(self) -> type[BaseScheduledDeletion]:
return get_regional_scheduled_deletion(SiloMode.get_current_mode())
@assume_test_silo_mode(SiloMode.CONTROL)
def assert_org_member_mapping(self, org_member: OrganizationMember, expected=None):
org_member.refresh_from_db()
org_member_mapping_query = OrganizationMemberMapping.objects.filter(
organization_id=org_member.organization_id,
organizationmember_id=org_member.id,
)
assert org_member_mapping_query.count() == 1
org_member_mapping = org_member_mapping_query.get()
email = org_member_mapping.email
user_id = org_member_mapping.user_id
# only either user_id or email should have a value, but not both.
assert (email is None and user_id) or (email and user_id is None)
assert org_member_mapping.role == org_member.role
if org_member.inviter_id:
assert org_member_mapping.inviter_id == org_member.inviter_id
else:
assert org_member_mapping.inviter_id is None
assert org_member_mapping.invite_status == org_member.invite_status
if expected:
for key, expected_value in expected.items():
assert getattr(org_member_mapping, key) == expected_value
@assume_test_silo_mode(SiloMode.CONTROL)
def assert_org_member_mapping_not_exists(self, org_member: OrganizationMember):
email = org_member.email
user_id = org_member.user_id
# only either user_id or email should have a value, but not both.
assert (email is None and user_id) or (email and user_id is None)
assert not OrganizationMemberMapping.objects.filter(
organization_id=org_member.organization_id,
organizationmember_id=org_member.id,
).exists()
| HybridCloudTestMixin |
python | pytorch__pytorch | tools/experimental/torchfuzz/operators/matrix_multiply.py | {
"start": 1246,
"end": 3935
} | class ____(MatrixMultiplyOperator):
"""Operator for matrix multiplication (torch.mm)."""
def __init__(self):
super().__init__("mm")
self.weight = 5.0
@property
def torch_op_name(self) -> str | None:
"""Return the torch operation name."""
return "torch.mm"
def can_produce(self, output_spec: Spec) -> bool:
"""MM requires exactly 2D tensors."""
if not isinstance(output_spec, TensorSpec):
return False
# Must have exactly 2 dimensions for torch.mm
if len(output_spec.size) != 2:
return False
# Matrix multiply doesn't work with bool or integer types for gradients
if output_spec.dtype in [
torch.bool,
torch.int8,
torch.int16,
torch.int32,
torch.int64,
]:
return False
return True
def fuzz_inputs_specs(self, output_spec: Spec) -> list[Spec]:
"""Generate input specs for matrix multiplication."""
if not isinstance(output_spec, TensorSpec):
raise ValueError("MMOperator can only produce TensorSpec outputs")
if len(output_spec.size) != 2:
raise ValueError("torch.mm requires 2D tensors")
m, n = output_spec.size
# Choose a random inner dimension k
k = random.randint(1, 16)
dtypes = self._get_compatible_dtype(output_spec.dtype)
# First tensor: [m, k]
input1_spec = TensorSpec(
size=(m, k),
stride=(k, 1), # Contiguous stride
dtype=dtypes[0],
)
# Second tensor: [k, n]
input2_spec = TensorSpec(
size=(k, n),
stride=(n, 1), # Contiguous stride
dtype=dtypes[1] if len(dtypes) > 1 else dtypes[0],
)
return [input1_spec, input2_spec]
def codegen(
self, output_name: str, input_names: list[str], output_spec: Spec
) -> str:
"""Generate code for matrix multiplication."""
if len(input_names) != 2:
raise ValueError("torch.mm requires exactly 2 inputs")
# Get target dtype
if isinstance(output_spec, TensorSpec):
target_dtype_str = f"torch.{output_spec.dtype}".replace(
"torch.torch.", "torch."
)
# Cast inputs to ensure compatible types
return (
f"{output_name} = torch.mm("
f"{input_names[0]}.to({target_dtype_str}), "
f"{input_names[1]}.to({target_dtype_str}))"
)
else:
return f"{output_name} = torch.mm({input_names[0]}, {input_names[1]})"
| MMOperator |
python | sympy__sympy | sympy/integrals/manualintegrate.py | {
"start": 13238,
"end": 13506
} | class ____(AtomicRule):
n: Expr
a: Expr
b: Expr
def eval(self) -> Expr:
n, a, b, x = self.n, self.a, self.b, self.variable
if n == 0:
return Heaviside(a+b*x)/b
return DiracDelta(a+b*x, n-1)/b
@dataclass
| DiracDeltaRule |
python | apache__airflow | providers/microsoft/azure/src/airflow/providers/microsoft/azure/operators/synapse.py | {
"start": 6896,
"end": 12409
} | class ____(BaseOperator):
"""
Execute a Synapse Pipeline.
:param pipeline_name: The name of the pipeline to execute.
:param azure_synapse_conn_id: The Airflow connection ID for Azure Synapse.
:param azure_synapse_workspace_dev_endpoint: The Azure Synapse workspace development endpoint.
:param wait_for_termination: Flag to wait on a pipeline run's termination.
:param reference_pipeline_run_id: The pipeline run identifier. If this run ID is specified the parameters
of the specified run will be used to create a new run.
:param is_recovery: Recovery mode flag. If recovery mode is set to `True`, the specified referenced
pipeline run and the new run will be grouped under the same ``groupId``.
:param start_activity_name: In recovery mode, the rerun will start from this activity. If not specified,
all activities will run.
:param parameters: Parameters of the pipeline run. These parameters are referenced in a pipeline via
``@pipeline().parameters.parameterName`` and will be used only if the ``reference_pipeline_run_id`` is
not specified.
:param timeout: Time in seconds to wait for a pipeline to reach a terminal status for non-asynchronous
waits. Used only if ``wait_for_termination`` is True.
:param check_interval: Time in seconds to check on a pipeline run's status for non-asynchronous waits.
Used only if ``wait_for_termination`` is True.
"""
template_fields: Sequence[str] = ("azure_synapse_conn_id",)
operator_extra_links = (AzureSynapsePipelineRunLink(),)
def __init__(
self,
pipeline_name: str,
azure_synapse_conn_id: str,
azure_synapse_workspace_dev_endpoint: str,
wait_for_termination: bool = True,
reference_pipeline_run_id: str | None = None,
is_recovery: bool | None = None,
start_activity_name: str | None = None,
parameters: dict[str, Any] | None = None,
timeout: int = 60 * 60 * 24 * 7,
check_interval: int = 60,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.azure_synapse_conn_id = azure_synapse_conn_id
self.pipeline_name = pipeline_name
self.azure_synapse_workspace_dev_endpoint = azure_synapse_workspace_dev_endpoint
self.wait_for_termination = wait_for_termination
self.reference_pipeline_run_id = reference_pipeline_run_id
self.is_recovery = is_recovery
self.start_activity_name = start_activity_name
self.parameters = parameters
self.timeout = timeout
self.check_interval = check_interval
@cached_property
def hook(self):
"""Create and return an AzureSynapsePipelineHook (cached)."""
return AzureSynapsePipelineHook(
azure_synapse_conn_id=self.azure_synapse_conn_id,
azure_synapse_workspace_dev_endpoint=self.azure_synapse_workspace_dev_endpoint,
)
def execute(self, context) -> None:
self.log.info("Executing the %s pipeline.", self.pipeline_name)
response = self.hook.run_pipeline(
pipeline_name=self.pipeline_name,
reference_pipeline_run_id=self.reference_pipeline_run_id,
is_recovery=self.is_recovery,
start_activity_name=self.start_activity_name,
parameters=self.parameters,
)
self.run_id = vars(response)["run_id"]
# Push the ``run_id`` value to XCom regardless of what happens during execution. This allows for
# retrieval the executed pipeline's ``run_id`` for downstream tasks especially if performing an
# asynchronous wait.
context["ti"].xcom_push(key="run_id", value=self.run_id)
if self.wait_for_termination:
self.log.info("Waiting for pipeline run %s to terminate.", self.run_id)
if self.hook.wait_for_pipeline_run_status(
run_id=self.run_id,
expected_statuses=AzureSynapsePipelineRunStatus.SUCCEEDED,
check_interval=self.check_interval,
timeout=self.timeout,
):
self.log.info("Pipeline run %s has completed successfully.", self.run_id)
else:
raise AzureSynapsePipelineRunException(
f"Pipeline run {self.run_id} has failed or has been cancelled."
)
def execute_complete(self, event: dict[str, str]) -> None:
"""
Return immediately - callback for when the trigger fires.
Relies on trigger to throw an exception, otherwise it assumes execution was successful.
"""
if event:
if event["status"] == "error":
raise AirflowException(event["message"])
self.log.info(event["message"])
def on_kill(self) -> None:
if self.run_id:
self.hook.cancel_run_pipeline(run_id=self.run_id)
# Check to ensure the pipeline run was cancelled as expected.
if self.hook.wait_for_pipeline_run_status(
run_id=self.run_id,
expected_statuses=AzureSynapsePipelineRunStatus.CANCELLED,
check_interval=self.check_interval,
timeout=self.timeout,
):
self.log.info("Pipeline run %s has been cancelled successfully.", self.run_id)
else:
raise AzureSynapsePipelineRunException(f"Pipeline run {self.run_id} was not cancelled.")
| AzureSynapseRunPipelineOperator |
python | PyCQA__pylint | tests/functional/m/missing/missing_kwoa.py | {
"start": 1218,
"end": 1641
} | class ____(Parent):
def __init__(self, *, first, second):
super().__init__(first=first, second=second)
self._first = first + second
@contextlib.contextmanager
def run(*, a):
yield
def test_context_managers(**kw):
run(**kw)
with run(**kw):
pass
with run(**kw), run(**kw):
pass
with run(**kw), run(): # [missing-kwoa]
pass
test_context_managers(a=1)
| Child |
python | kamyu104__LeetCode-Solutions | Python/distribute-candies-among-children-i.py | {
"start": 772,
"end": 1086
} | class ____(object):
def distributeCandies(self, n, limit):
"""
:type n: int
:type limit: int
:rtype: int
"""
return sum(min(limit, n-i)-max((n-i)-limit, 0)+1 for i in xrange(max(n-2*limit, 0), min(limit, n)+1))
# Time: O(n^2)
# Space: O(1)
# brute force
| Solution2 |
python | pyqtgraph__pyqtgraph | pyqtgraph/flowchart/library/Data.py | {
"start": 12438,
"end": 12801
} | class ____(CtrlNode):
"""Calculate the maximum of an array across an axis.
"""
nodeName = 'Max'
uiTemplate = [
('axis', 'intSpin', {'value': 0, 'min': -1, 'max': 1000000}),
]
def processData(self, data):
s = self.stateGroup.state()
ax = None if s['axis'] == -1 else s['axis']
return data.max(axis=ax)
| Max |
python | plotly__plotly.py | plotly/graph_objs/layout/scene/xaxis/_title.py | {
"start": 235,
"end": 2861
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.scene.xaxis"
_path_str = "layout.scene.xaxis.title"
_valid_props = {"font", "text"}
@property
def font(self):
"""
Sets this axis' title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.scene.xaxis.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.layout.scene.xaxis.title.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def text(self):
"""
Sets the title of this axis.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def _prop_descriptions(self):
return """\
font
Sets this axis' title font.
text
Sets the title of this axis.
"""
def __init__(self, arg=None, font=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.scene.xaxis.Title`
font
Sets this axis' title font.
text
Sets the title of this axis.
Returns
-------
Title
"""
super().__init__("title")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.scene.xaxis.Title
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.scene.xaxis.Title`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("font", arg, font)
self._set_property("text", arg, text)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Title |
python | google__jax | tests/pallas/tpu_pallas_distributed_test.py | {
"start": 1071,
"end": 16097
} | class ____(parameterized.TestCase):
def setUp(self):
super().setUp()
if jax.device_count() < 2:
self.skipTest('Only >=2 devices are supported.')
if not jtu.is_device_tpu(5, 'e'):
self.skipTest('Only works with TPU v5e.')
@parameterized.named_parameters(
('vmem', pltpu.VMEM),
('hbm', pltpu.ANY),
)
def test_basic_remote_vmem_dma(self, mem):
# Implements very simple collective permute
def kernel(x_ref, y_ref):
def body(ready_sem, send_sem, recv_sem):
other_dev_id = 1 - lax.axis_index('x')
pltpu.semaphore_signal(ready_sem, device_id=other_dev_id,
device_id_type=pltpu.DeviceIdType.LOGICAL)
pltpu.semaphore_wait(ready_sem)
copy_done = pltpu.async_remote_copy(
x_ref, y_ref, send_sem, recv_sem, other_dev_id,
device_id_type=pltpu.DeviceIdType.LOGICAL,
)
copy_done.wait_send()
copy_done.wait_recv()
pl.run_scoped(
body,
pltpu.SemaphoreType.REGULAR,
pltpu.SemaphoreType.DMA,
pltpu.SemaphoreType.DMA,
)
x = jnp.arange(2 * 8 * 128.0).reshape((2 * 8, 128))
def body(x):
return pl.pallas_call(
kernel,
in_specs=[pl.BlockSpec(memory_space=mem)],
out_specs=pl.BlockSpec(memory_space=mem),
out_shape=jax.ShapeDtypeStruct((8, 128), jnp.float32, vma=frozenset('x')),
)(x)
devices = jax.devices()[:2]
mesh = jax.sharding.Mesh(devices, ['x'])
f = jax.jit(
shard_map.shard_map(
body, mesh=mesh, in_specs=P('x'), out_specs=P('x'),
)
)
jaxpr = f.trace(x).jaxpr
self.assertNotIn('pvary', str(jaxpr))
y = f(x)
expected = jnp.concatenate([x[8:], x[:8]])
np.testing.assert_allclose(y, expected)
def test_vma_error(self):
def kernel(x_ref, y_ref):
def body(ready_sem, send_sem, recv_sem):
other_dev_id = 1 - lax.axis_index('x')
pltpu.semaphore_signal(ready_sem, device_id=other_dev_id,
device_id_type=pltpu.DeviceIdType.LOGICAL)
pltpu.semaphore_wait(ready_sem)
copy_done = pltpu.async_remote_copy(
x_ref, y_ref, send_sem, recv_sem, other_dev_id,
device_id_type=pltpu.DeviceIdType.LOGICAL,
)
copy_done.wait_send()
copy_done.wait_recv()
pl.run_scoped(
body,
pltpu.SemaphoreType.REGULAR,
pltpu.SemaphoreType.DMA,
pltpu.SemaphoreType.DMA,
)
x = jnp.arange(2 * 8 * 128.0).reshape((2 * 8, 128))
def body(x):
return pl.pallas_call(
kernel,
in_specs=[pl.BlockSpec(memory_space=pltpu.ANY)],
out_specs=pl.BlockSpec(memory_space=pltpu.ANY),
out_shape=jax.ShapeDtypeStruct((8, 128), jnp.float32),
)(x)
devices = jax.devices()[:2]
mesh = jax.sharding.Mesh(devices, ['x'])
f = jax.jit(
shard_map.shard_map(
body, mesh=mesh, in_specs=P('x'), out_specs=P('x'),
)
)
with self.assertRaisesRegex(
ValueError,
'When `check_vma=True` on `jax.shard_map`, `vma` on'
' `jax.ShapeDtypeStruct` must not be `None`'):
f(x)
@parameterized.named_parameters(
('left', 'left'),
('right', 'right')
)
def test_pallas_call_axis_index(self, direction):
# Implements very simple collective permute
def kernel(x_ref, y_ref):
def body(ready_sem, send_sem, recv_sem):
my_id = lax.axis_index('x')
num_devices = lax.axis_size('x')
if direction == 'right':
neighbor = lax.rem(my_id + 1, num_devices)
else:
neighbor = lax.rem(my_id - 1, num_devices)
# Neighbor might be negative here so we add num_devices in case
neighbor = jnp.where(neighbor < 0, neighbor + num_devices, neighbor)
pltpu.semaphore_signal(ready_sem, device_id=neighbor)
pltpu.semaphore_wait(ready_sem)
copy_done = pltpu.async_remote_copy(
x_ref, y_ref, send_sem, recv_sem, device_id=neighbor
)
copy_done.wait_send()
copy_done.wait_recv()
pl.run_scoped(
body,
pltpu.SemaphoreType.REGULAR,
pltpu.SemaphoreType.DMA,
pltpu.SemaphoreType.DMA,
)
num_devices = jax.local_device_count()
x = jnp.arange(num_devices * 8 * 128).reshape((num_devices * 8, 128))
def body(x):
return pl.pallas_call(
kernel,
in_specs=[pl.BlockSpec(memory_space=pltpu.VMEM)],
out_specs=pl.BlockSpec(memory_space=pltpu.VMEM),
out_shape=x,
)(x)
device_mesh = mesh_utils.create_device_mesh(
(jax.device_count(),), jax.devices())
mesh = jax.sharding.Mesh(device_mesh, ['x'])
y = jax.jit(
shard_map.shard_map(
body, mesh=mesh, in_specs=P('x'), out_specs=P('x'), check_vma=False
)
)(x)
if direction == 'right':
expected = jnp.concatenate([x[-8:], x[:-8]])
else:
expected = jnp.concatenate([x[8:], x[:8]])
np.testing.assert_allclose(y, expected)
@parameterized.named_parameters(('left', 'left'), ('right', 'right'))
def test_pallas_call_axis_index_2d_mesh(self, direction):
# Implements very simple collective permute in a 2D mesh.
def kernel(x_ref, y_ref):
def body(ready_sem, send_sem, recv_sem):
my_id = lax.axis_index('x')
my_other_id = lax.axis_index('y')
axis_size = lax.axis_size('x')
if direction == 'right':
neighbor = lax.rem(my_id + 1, axis_size)
else:
neighbor = lax.rem(my_id - 1, axis_size)
# Neighbor might be negative here so we add num_devices in case
neighbor = jnp.where(neighbor < 0, neighbor + axis_size, neighbor)
pltpu.semaphore_signal(ready_sem, device_id=(my_other_id, neighbor))
pltpu.semaphore_wait(ready_sem)
copy_done = pltpu.async_remote_copy(
x_ref, y_ref, send_sem, recv_sem, device_id=(my_other_id, neighbor)
)
copy_done.wait_send()
copy_done.wait_recv()
pl.run_scoped(
body,
pltpu.SemaphoreType.REGULAR,
pltpu.SemaphoreType.DMA,
pltpu.SemaphoreType.DMA,
)
axis_size = jax.device_count() // 2
x = jnp.arange(axis_size * 8 * 128).reshape((axis_size * 8, 128))
def body(x):
return pl.pallas_call(
kernel,
in_specs=[pl.BlockSpec(memory_space=pltpu.VMEM)],
out_specs=pl.BlockSpec(memory_space=pltpu.VMEM),
out_shape=x,
)(x)
device_mesh = mesh_utils.create_device_mesh(
(2, axis_size), jax.devices()
)
mesh = jax.sharding.Mesh(device_mesh, ['y', 'x'])
y = jax.jit(
shard_map.shard_map(
body,
mesh=mesh,
in_specs=P('x', None),
out_specs=P('x', None),
check_vma=False,
)
)(x)
if direction == 'right':
expected = jnp.concatenate([x[-8:], x[:-8]])
else:
expected = jnp.concatenate([x[8:], x[:8]])
np.testing.assert_allclose(y, expected)
def test_barrier_semaphore(self):
def kernel(x_ref, y_ref):
def body(ready_sem, send_sem, recv_sem):
my_id = lax.axis_index('x')
num_devices = lax.axis_size('x')
neighbor = lax.rem(my_id + 1, num_devices)
barrier_sem = pltpu.get_barrier_semaphore()
pltpu.semaphore_signal(barrier_sem, device_id=neighbor)
pltpu.semaphore_wait(barrier_sem)
pltpu.semaphore_signal(ready_sem, device_id=neighbor)
pltpu.semaphore_wait(ready_sem)
pltpu.async_remote_copy(
x_ref, y_ref, send_sem, recv_sem, device_id=neighbor
).wait()
pl.run_scoped(
body,
pltpu.SemaphoreType.REGULAR,
pltpu.SemaphoreType.DMA,
pltpu.SemaphoreType.DMA,
)
num_devices = jax.local_device_count()
x = jnp.arange(num_devices * 8 * 128).reshape((num_devices * 8, 128))
def body(x):
return pl.pallas_call(
kernel,
in_specs=[pl.BlockSpec(memory_space=pltpu.VMEM)],
out_specs=pl.BlockSpec(memory_space=pltpu.VMEM),
out_shape=x,
compiler_params=pltpu.CompilerParams(collective_id=0),
)(x)
device_mesh = mesh_utils.create_device_mesh(
(jax.device_count(),), jax.devices())
mesh = jax.sharding.Mesh(device_mesh, ['x'])
y = jax.jit(
shard_map.shard_map(
body, mesh=mesh, in_specs=P('x'), out_specs=P('x'), check_vma=False
)
)(x)
expected = jnp.concatenate([x[-8:], x[:-8]])
np.testing.assert_allclose(y, expected)
def test_barrier_semaphore_no_axis_name(self):
def kernel(x_ref, y_ref):
num_devices = lax.axis_size('x')
barrier_sem = pltpu.get_barrier_semaphore()
for i in range(num_devices):
pltpu.semaphore_signal(barrier_sem, device_id=i)
pltpu.semaphore_wait(barrier_sem, num_devices)
pltpu.sync_copy(x_ref, y_ref)
x = jnp.arange(8 * 128).reshape((8, 128))
def body(x):
return pl.pallas_call(
kernel,
in_specs=[pl.BlockSpec(memory_space=pltpu.VMEM)],
out_specs=pl.BlockSpec(memory_space=pltpu.VMEM),
out_shape=x,
compiler_params=pltpu.CompilerParams(collective_id=0),
)(x)
device_mesh = mesh_utils.create_device_mesh(
(jax.device_count(),), jax.devices())
mesh = jax.sharding.Mesh(device_mesh, ['x'])
y = jax.jit(
shard_map.shard_map(
body, mesh=mesh, in_specs=P('x'), out_specs=P('x'), check_vma=False
)
)(x)
np.testing.assert_allclose(y, x)
@parameterized.product(joint_axis=[True, False])
def test_axis_dict_with_core_multi_device(self, joint_axis):
if jax.device_count() < 2:
self.skipTest('Requires at least 2 devices for DMAs.')
if (cdim := jax.devices()[0].num_cores) < 2:
self.skipTest('Requires a TPU with at least 2 cores.')
mesh = jax.make_mesh(
(jax.device_count(),),
('device',),
axis_types=(jax.sharding.AxisType.Auto,),
)
ddim = jax.device_count()
tcmesh = pltpu.create_tensorcore_mesh('core')
pspec = P('device', None)
sharding = jax.sharding.NamedSharding(mesh, pspec)
# Array is fully sharded.
xlocal, ylocal = 8, 256
input_arr = jnp.arange(xlocal * ddim * ylocal, dtype=jnp.int32).reshape(
(xlocal * ddim, ylocal)
)
input_arr = jax.device_put(input_arr, sharding)
def core_copy(refs):
in_ref, out_ref = refs
@pl.core_map(tcmesh, compiler_params=pltpu.CompilerParams(collective_id=7))
def _():
num_cores = jax.lax.axis_size('core')
slc_size = ylocal // num_cores
vmem_shape = (xlocal, slc_size)
# This runs on every core, for every vmem iterations
def alloc(out_vmem_ref, sem, send_sem, recv_sem):
core_index = jax.lax.axis_index('core')
device_index = jax.lax.axis_index('device')
slc = pl.ds(core_index * slc_size, slc_size)
# Make sure all cores have entered run_scoped.
sem0 = pltpu.get_barrier_semaphore()
for i in range(ddim):
for j in range(num_cores):
pltpu.semaphore_signal(
sem0, 1, device_id={'device': i, 'core': j},
device_id_type=pltpu.DeviceIdType.MESH)
pltpu.semaphore_wait(sem0, ddim * num_cores)
# Identity function by default
pltpu.async_copy(in_ref.at[:, slc], out_ref.at[:, slc], sem).wait()
if joint_axis:
device_id = {('device', 'core'): cdim + 1}
else:
device_id = {'device': 1, 'core': 1}
copy_d0c0_to_d1c1 = pltpu.make_async_remote_copy(
src_ref=in_ref.at[:, slc],
dst_ref=out_vmem_ref,
send_sem=send_sem,
recv_sem=recv_sem,
device_id=device_id,
device_id_type=pltpu.DeviceIdType.MESH,
)
@pl.when(device_index == 0)
def _():
@pl.when(core_index == 0)
def _():
copy_d0c0_to_d1c1.start()
copy_d0c0_to_d1c1.wait_send()
@pl.when(device_index == 1)
def _():
@pl.when(core_index == 1)
def _():
copy_d0c0_to_d1c1.wait_recv()
pltpu.async_copy(out_vmem_ref, out_ref.at[:, slc], sem).wait()
pl.run_scoped(
alloc,
pltpu.VMEM(vmem_shape, out_ref.dtype),
*([pltpu.SemaphoreType.DMA] * 3),
)
@partial(jax.shard_map, mesh=mesh, in_specs=pspec, out_specs=pspec, check_vma=False)
def run_core_kernel(input):
output = jnp.zeros_like(input)
_, output = pl.run_state(core_copy)((input, output))
return output
pallas_out = jax.jit(run_core_kernel)(input_arr)
# The device=1 core=1 slice was flushed with device=0 core=0 contents
np.testing.assert_array_equal(pallas_out[8:16, 128:], input_arr[:8, :128])
# Mask that slice out and all should be the same.
mask = jnp.zeros((8, 128), jnp.int32)
masked_in = jax.lax.dynamic_update_slice(input_arr, mask, (8, 128))
masked_out = jax.lax.dynamic_update_slice(pallas_out, mask, (8, 128))
np.testing.assert_array_equal(masked_in, masked_out)
def test_no_barrier_semaphore(self):
def alloc_sem(_):
num_devices = lax.axis_size('x')
barrier_sem = pltpu.get_barrier_semaphore()
for i in range(num_devices):
pltpu.semaphore_signal(barrier_sem, device_id=i)
pltpu.semaphore_wait(barrier_sem, num_devices)
def barrier_kernel(x_ref, sem_ref, out_ref):
num_devices = lax.axis_size('x')
for i in range(num_devices):
pltpu.semaphore_signal(sem_ref, device_id=i)
pltpu.semaphore_wait(sem_ref, num_devices)
out_ref[...] = x_ref[...] + 1
x = jnp.arange(8 * 128).reshape((8, 128))
def body(x):
sem = pl.pallas_call(
alloc_sem,
in_specs=[],
out_specs=pl.BlockSpec(memory_space=pltpu.SEMAPHORE),
out_shape=pltpu.SemaphoreType.REGULAR(()),
compiler_params=pltpu.CompilerParams(collective_id=0),
)()
return pl.pallas_call(
barrier_kernel,
in_specs=[
pl.BlockSpec(memory_space=pltpu.VMEM),
pl.BlockSpec(memory_space=pltpu.SEMAPHORE),
],
out_specs=pl.BlockSpec(memory_space=pltpu.VMEM),
out_shape=x,
compiler_params=pltpu.CompilerParams(skip_device_barrier=True),
)(x, sem)
device_mesh = mesh_utils.create_device_mesh(
(jax.device_count(),), jax.devices())
mesh = jax.sharding.Mesh(device_mesh, ['x'])
y = jax.jit(
shard_map.shard_map(
body, mesh=mesh, in_specs=P('x'), out_specs=P('x'), check_vma=False
)
)(x)
np.testing.assert_allclose(y, x + 1)
| PallasCallRemoteDMATest |
python | keras-team__keras | keras/src/losses/losses.py | {
"start": 21958,
"end": 26905
} | class ____(LossFunctionWrapper):
"""Computes the cross-entropy loss between true labels and predicted labels.
Use this cross-entropy loss for binary (0 or 1) classification applications.
The loss function requires the following inputs:
- `y_true` (true label): This is either 0 or 1.
- `y_pred` (predicted value): This is the model's prediction, i.e, a single
floating-point value which either represents a
[logit](https://en.wikipedia.org/wiki/Logit), (i.e, value in [-inf, inf]
when `from_logits=True`) or a probability (i.e, value in [0., 1.] when
`from_logits=False`).
Args:
from_logits: Whether to interpret `y_pred` as a tensor of
[logit](https://en.wikipedia.org/wiki/Logit) values. By default, we
assume that `y_pred` is probabilities (i.e., values in [0, 1]).
label_smoothing: Float in range [0, 1]. When 0, no smoothing occurs.
When > 0, we compute the loss between the predicted labels
and a smoothed version of the true labels, where the smoothing
squeezes the labels towards 0.5. Larger values of
`label_smoothing` correspond to heavier smoothing.
axis: The axis along which to compute crossentropy (the features axis).
Defaults to `-1`.
reduction: Type of reduction to apply to the loss. In almost all cases
this should be `"sum_over_batch_size"`. Supported options are
`"sum"`, `"sum_over_batch_size"`, `"mean"`,
`"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
`"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
sample size, and `"mean_with_sample_weight"` sums the loss and
divides by the sum of the sample weights. `"none"` and `None`
perform no aggregation. Defaults to `"sum_over_batch_size"`.
name: Optional name for the loss instance.
dtype: The dtype of the loss's computations. Defaults to `None`, which
means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
`"float32"` unless set to different value
(via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
provided, then the `compute_dtype` will be utilized.
Examples:
**Recommended Usage:** (set `from_logits=True`)
With `compile()` API:
```python
model.compile(
loss=keras.losses.BinaryCrossentropy(from_logits=True),
...
)
```
As a standalone function:
>>> # Example 1: (batch_size = 1, number of samples = 4)
>>> y_true = np.array([0, 1, 0, 0])
>>> y_pred = np.array([-18.6, 0.51, 2.94, -12.8])
>>> bce = keras.losses.BinaryCrossentropy(from_logits=True)
>>> bce(y_true, y_pred)
0.8654
>>> # Example 2: (batch_size = 2, number of samples = 4)
>>> y_true = np.array([[0, 1], [0, 0]])
>>> y_pred = np.array([[-18.6, 0.51], [2.94, -12.8]])
>>> # Using default 'auto'/'sum_over_batch_size' reduction type.
>>> bce = keras.losses.BinaryCrossentropy(from_logits=True)
>>> bce(y_true, y_pred)
0.8654
>>> # Using 'sample_weight' attribute
>>> bce(y_true, y_pred, sample_weight=[0.8, 0.2])
0.243
>>> # Using 'sum' reduction` type.
>>> bce = keras.losses.BinaryCrossentropy(from_logits=True,
... reduction="sum")
>>> bce(y_true, y_pred)
1.730
>>> # Using 'none' reduction type.
>>> bce = keras.losses.BinaryCrossentropy(from_logits=True,
... reduction=None)
>>> bce(y_true, y_pred)
array([0.235, 1.496], dtype=float32)
**Default Usage:** (set `from_logits=False`)
>>> # Make the following updates to the above "Recommended Usage" section
>>> # 1. Set `from_logits=False`
>>> keras.losses.BinaryCrossentropy() # OR ...('from_logits=False')
>>> # 2. Update `y_pred` to use probabilities instead of logits
>>> y_pred = [0.6, 0.3, 0.2, 0.8] # OR [[0.6, 0.3], [0.2, 0.8]]
"""
def __init__(
self,
from_logits=False,
label_smoothing=0.0,
axis=-1,
reduction="sum_over_batch_size",
name="binary_crossentropy",
dtype=None,
):
super().__init__(
binary_crossentropy,
name=name,
reduction=reduction,
dtype=dtype,
from_logits=from_logits,
label_smoothing=label_smoothing,
axis=axis,
)
self.from_logits = from_logits
self.label_smoothing = label_smoothing
self.axis = axis
def get_config(self):
config = Loss.get_config(self)
config.update(
{
"from_logits": self.from_logits,
"label_smoothing": self.label_smoothing,
"axis": self.axis,
}
)
return config
@keras_export("keras.losses.BinaryFocalCrossentropy")
| BinaryCrossentropy |
python | spack__spack | lib/spack/spack/spec_parser.py | {
"start": 8411,
"end": 11439
} | class ____(spack.error.SpecSyntaxError):
"""Syntax error in a spec string"""
def __init__(self, tokens: List[Token], text: str):
message = f"unexpected characters in the spec string\n{text}\n"
underline = ""
for token in tokens:
is_error = token.kind == SpecTokens.UNEXPECTED
underline += ("^" if is_error else " ") * (token.end - token.start)
message += color.colorize(f"@*r{{{underline}}}")
super().__init__(message)
def _warn_about_variant_after_compiler(literal_str: str, issues: List[str]):
"""Issue a warning if variant or other token is preceded by a compiler token. The warning is
only issued if it's actionable: either we know the config file it originates from, or we have
call site that's not internal to Spack."""
ignore = [spack.paths.lib_path, spack.paths.bin_path]
mark = spack.util.spack_yaml.get_mark_from_yaml_data(literal_str)
issue_str = ", ".join(issues)
error = f"{issue_str} in `{literal_str}`"
# warning from config file
if mark:
warnings.warn(f"{mark.name}:{mark.line + 1}: {error}")
return
# warning from hopefully package.py
for frame in reversed(traceback.extract_stack()):
if frame.lineno and not any(frame.filename.startswith(path) for path in ignore):
warnings.warn_explicit(
error,
category=spack.error.SpackAPIWarning,
filename=frame.filename,
lineno=frame.lineno,
)
return
def parse_virtual_assignment(context: TokenContext) -> Tuple[str]:
"""Look at subvalues and, if present, extract virtual and a push a substitute token.
This handles things like:
* ``^c=gcc``
* ``^c,cxx=gcc``
* ``%[when=+bar] c=gcc``
* ``%[when=+bar] c,cxx=gcc``
Virtual assignment can happen anywhere a dependency node can appear. It is
shorthand for ``%[virtuals=c,cxx] gcc``.
The ``virtuals=substitute`` key value pair appears in the subvalues of
:attr:`~spack.spec_parser.SpecTokens.DEPENDENCY` and
:attr:`~spack.spec_parser.SpecTokens.END_EDGE_PROPERTIES` tokens. We extract the virtuals and
create a token from the substitute, which is then pushed back on the parser stream so that the
head of the stream can be parsed like a regular node.
Returns:
the virtuals assigned, or None if there aren't any
"""
assert context.current_token is not None
subvalues = context.current_token.subvalues
if not subvalues:
return ()
# build a token for the substitute that we can put back on the stream
pkg = subvalues["substitute"]
token_type = SpecTokens.UNQUALIFIED_PACKAGE_NAME
if "." in pkg:
token_type = SpecTokens.FULLY_QUALIFIED_PACKAGE_NAME
start = context.current_token.value.index(pkg)
token = Token(token_type, pkg, start, start + len(pkg))
context.push_front(token)
return tuple(subvalues["virtuals"].split(","))
| SpecTokenizationError |
python | pytorch__pytorch | test/distributed/_composable/fsdp/test_fully_shard_grad_scaler.py | {
"start": 613,
"end": 4169
} | class ____(FSDPTest):
@skip_if_lt_x_gpu(4)
def test_gradient_scaler(self):
self.run_subtests(
{"has_inf": [True, False], "test_2d": [True, False]},
self._test_gradient_scaler,
)
def _test_gradient_scaler(self, has_inf: bool, test_2d: bool):
torch.manual_seed(0)
model = nn.Sequential(
*[nn.Linear(4, 4, device=device_type, bias=False) for _ in range(2)]
)
for layer in model:
fully_shard(layer)
fully_shard(model)
input = torch.randn([4, 4], device=device_type)
if test_2d:
mesh_2d = init_device_mesh(
device_type.type, (2, self.world_size // 2), mesh_dim_names=("dp", "tp")
)
dp_mesh, tp_mesh = mesh_2d["dp"], mesh_2d["tp"]
model = nn.Sequential(MLP(2), MLP(2), MLP(2))
tp_parallelize_plan = {
"0.in_proj": ColwiseParallel(),
"0.out_proj": RowwiseParallel(),
"1.in_proj": ColwiseParallel(),
"1.out_proj": RowwiseParallel(),
"2.in_proj": ColwiseParallel(),
"2.out_proj": RowwiseParallel(),
}
model = parallelize_module(
model,
device_mesh=tp_mesh,
parallelize_plan=tp_parallelize_plan,
)
for module in model:
fully_shard(module, mesh=dp_mesh)
fully_shard(model, mesh=dp_mesh)
input = torch.randn((2,), device=device_type)
loss = model(input).sum()
scaler = GradScaler(init_scale=2.0, enabled=True, device=device_type.type)
opt = torch.optim.Adam(model.parameters(), lr=1e-2)
scaler.scale(loss).backward()
inv_scale = scaler._scale.double().reciprocal().float()
if (
has_inf is True
and opt.param_groups[0]["params"][0].grad._local_tensor.device.index == 1
):
opt.param_groups[0]["params"][0].grad._local_tensor[0, 0].fill_(
float("inf")
)
initial_grad = opt.param_groups[0]["params"][0].grad.to_local().clone()
scaler.unscale_(opt)
for found_inf in scaler._per_optimizer_states[id(opt)][
"found_inf_per_device"
].values():
self.assertEqual(found_inf, has_inf)
self.assertEqual(
scaler._per_optimizer_states[id(opt)]["stage"].value,
OptState.UNSCALED.value,
)
unscaled_grad = opt.param_groups[0]["params"][0].grad.to_local().clone()
self.assertEqual(unscaled_grad, initial_grad * inv_scale)
initial_scale = scaler.get_scale()
initial_state = copy.copy(opt.state)
scaler.step(opt)
steped_state = copy.copy(opt.state)
if has_inf:
# assert parameters are the same before/after
self.assertEqual(steped_state, initial_state)
else:
# new parameters here if no inf found during .unscale_()
self.assertNotEqual(steped_state.items(), initial_state.items())
scaler.update()
updated_scale = scaler.get_scale()
if has_inf:
# assert scale is updated
backoff_factor = scaler.get_backoff_factor()
self.assertEqual(updated_scale, initial_scale * backoff_factor)
else:
# scale is not updated
self.assertEqual(updated_scale, initial_scale)
if __name__ == "__main__":
run_tests()
| TestFullyShardGradientScaler |
python | ethereum__web3.py | web3/contract/base_contract.py | {
"start": 53911,
"end": 54188
} | class ____:
@staticmethod
def _raise_exception() -> NoReturn:
raise ABIReceiveNotFound("No receive function was found in the contract ABI.")
def __getattr__(self, attr: Any) -> Callable[[], None]:
return self._raise_exception
| NonExistentReceiveFunction |
python | ansible__ansible | test/units/module_utils/basic/test_run_command.py | {
"start": 3508,
"end": 4656
} | class ____:
# Format is command as passed to run_command, command to Popen as list, command to Popen as string
ARGS_DATA = (
(['/bin/ls', 'a', 'b', 'c'], [b'/bin/ls', b'a', b'b', b'c'], b'/bin/ls a b c'),
('/bin/ls a " b" "c "', [b'/bin/ls', b'a', b' b', b'c '], b'/bin/ls a " b" "c "'),
)
@pytest.mark.parametrize('cmd, expected, shell, stdin',
((arg, cmd_str if sh else cmd_lst, sh, {})
for (arg, cmd_lst, cmd_str), sh in product(ARGS_DATA, (True, False))),
indirect=['stdin'])
def test_args(self, cmd, expected, shell, rc_am):
rc_am.run_command(cmd, use_unsafe_shell=shell)
assert rc_am._subprocess.Popen.called
args, kwargs = rc_am._subprocess.Popen.call_args
assert args == (expected, )
assert kwargs['shell'] == shell
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_tuple_as_args(self, rc_am):
with pytest.raises(SystemExit):
rc_am.run_command(('ls', '/'))
assert rc_am.fail_json.called
| TestRunCommandArgs |
python | conda__conda | conda/models/version.py | {
"start": 17320,
"end": 18819
} | class ____:
def __init__(self, spec_str, matcher, is_exact):
self.spec_str = spec_str
self._is_exact = is_exact
self.match = matcher
@property
def spec(self):
return self.spec_str
def is_exact(self):
return self._is_exact
def __eq__(self, other):
try:
other_spec = other.spec
except AttributeError:
other_spec = self.__class__(other).spec
return self.spec == other_spec
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.spec)
def __str__(self):
return self.spec
def __repr__(self):
return f"{self.__class__.__name__}('{self.spec}')"
@property
def raw_value(self):
return self.spec
@property
def exact_value(self):
return self.is_exact() and self.spec or None
def merge(self, other):
raise NotImplementedError()
def regex_match(self, spec_str):
return bool(self.regex.match(spec_str))
def operator_match(self, spec_str):
return self.operator_func(VersionOrder(str(spec_str)), self.matcher_vo)
def any_match(self, spec_str):
return any(s.match(spec_str) for s in self.tup)
def all_match(self, spec_str):
return all(s.match(spec_str) for s in self.tup)
def exact_match(self, spec_str):
return self.spec == spec_str
def always_true_match(self, spec_str):
return True
| BaseSpec |
python | getsentry__sentry | tests/sentry/tasks/test_commit_context.py | {
"start": 4228,
"end": 39352
} | class ____(TestCommitContextIntegration):
def setUp(self) -> None:
super().setUp()
self.blame_recent = FileBlameInfo(
repo=self.repo,
path="sentry/recent.py",
ref="master",
code_mapping=self.code_mapping,
lineno=30,
commit=CommitInfo(
commitId="commit-id-recent",
committedDate=datetime.now(tz=datetime_timezone.utc) - timedelta(days=1),
commitMessage="recent commit message",
commitAuthorName=None,
commitAuthorEmail="recent@localhost",
),
)
self.blame_too_old = FileBlameInfo(
repo=self.repo,
path="sentry/recent.py",
ref="master",
code_mapping=self.code_mapping,
lineno=30,
commit=CommitInfo(
commitId="commit-id-old",
committedDate=datetime.now(tz=datetime_timezone.utc) - timedelta(days=70),
commitMessage="old commit message",
commitAuthorName=None,
commitAuthorEmail="old@localhost",
),
)
self.blame_existing_commit = FileBlameInfo(
repo=self.repo,
path="sentry/models/release.py",
ref="master",
code_mapping=self.code_mapping,
lineno=39,
commit=CommitInfo(
commitId="existing-commit",
committedDate=datetime.now(tz=datetime_timezone.utc) - timedelta(days=7),
commitMessage="placeholder commit message",
commitAuthorName=None,
commitAuthorEmail="admin@localhost",
),
)
self.blame_no_existing_commit = FileBlameInfo(
repo=self.repo,
path="sentry/not_existing.py",
ref="master",
code_mapping=self.code_mapping,
lineno=40,
commit=CommitInfo(
commitId="commit-id",
committedDate=datetime.now(tz=datetime_timezone.utc) - timedelta(days=14),
commitMessage="no existing commit message",
commitAuthorName=None,
commitAuthorEmail="admin2@localhost",
),
)
@patch(
"sentry.integrations.github.integration.GitHubIntegration.get_commit_context_all_frames",
)
def test_inactive_integration(self, mock_get_commit_context: MagicMock) -> None:
"""
Early return if the integration is not active
"""
with assume_test_silo_mode(SiloMode.CONTROL):
self.integration.update(status=ObjectStatus.DISABLED)
with self.tasks():
assert not GroupOwner.objects.filter(group=self.event.group).exists()
existing_commit = self.create_commit(
project=self.project,
repo=self.repo,
author=self.commit_author,
key="existing-commit",
)
existing_commit.update(message="")
assert Commit.objects.count() == 2
event_frames = get_frame_paths(self.event)
process_commit_context(
event_id=self.event.event_id,
event_platform=self.event.platform,
event_frames=event_frames,
group_id=self.event.group_id,
project_id=self.event.project_id,
)
assert not mock_get_commit_context.called
@patch("sentry.analytics.record")
@patch(
"sentry.integrations.github.integration.GitHubIntegration.get_commit_context_all_frames",
)
def test_success_existing_commit(
self, mock_get_commit_context: MagicMock, mock_record: MagicMock
) -> None:
"""
Tests a simple successful case, where get_commit_context_all_frames returns
a single blame item. A GroupOwner should be created, but Commit and CommitAuthor
already exist so should not.
"""
mock_get_commit_context.return_value = [self.blame_existing_commit]
with self.tasks():
assert not GroupOwner.objects.filter(group=self.event.group).exists()
existing_commit = self.create_commit(
project=self.project,
repo=self.repo,
author=self.commit_author,
key="existing-commit",
)
existing_commit.update(message="")
assert Commit.objects.count() == 2
event_frames = get_frame_paths(self.event)
process_commit_context(
event_id=self.event.event_id,
event_platform=self.event.platform,
event_frames=event_frames,
group_id=self.event.group_id,
project_id=self.event.project_id,
)
created_group_owner = GroupOwner.objects.get(
group=self.event.group,
project=self.event.project,
organization=self.event.project.organization,
type=GroupOwnerType.SUSPECT_COMMIT.value,
)
# Number of commit objects should remain the same
assert Commit.objects.count() == 2
commit = Commit.objects.get(key="existing-commit")
# Message should be updated
assert commit.message == "placeholder commit message"
assert created_group_owner
assert created_group_owner.context == {
"commitId": existing_commit.id,
"suspectCommitStrategy": SuspectCommitStrategy.SCM_BASED,
}
assert_any_analytics_event(
mock_record,
IntegrationsSuccessfullyFetchedCommitContextAllFrames(
organization_id=self.organization.id,
project_id=self.project.id,
group_id=self.event.group_id,
event_id=self.event.event_id,
num_frames=1,
num_unique_commits=1,
num_unique_commit_authors=1,
num_successfully_mapped_frames=1,
selected_frame_index=0,
selected_provider="github",
selected_code_mapping_id=self.code_mapping.id,
),
)
@patch("sentry.analytics.record")
@patch(
"sentry.integrations.github.integration.GitHubIntegration.get_commit_context_all_frames",
)
def test_success_updating_group_owner(self, mock_get_commit_context, mock_record):
"""
Runs through process_commit_context twice to make sure we aren't creating duplicate
GroupOwners for the same suggestion.
"""
mock_get_commit_context.return_value = [self.blame_existing_commit]
with self.tasks():
assert not GroupOwner.objects.filter(group=self.event.group).exists()
existing_commit = self.create_commit(
project=self.project,
repo=self.repo,
author=self.commit_author,
key="existing-commit",
)
existing_commit.update(message="")
assert Commit.objects.count() == 2
event_frames = get_frame_paths(self.event)
process_commit_context(
event_id=self.event.event_id,
event_platform=self.event.platform,
event_frames=event_frames,
group_id=self.event.group_id,
project_id=self.event.project_id,
)
created_group_owner = GroupOwner.objects.get(
group=self.event.group,
project=self.event.project,
organization=self.event.project.organization,
type=GroupOwnerType.SUSPECT_COMMIT.value,
)
# Number of commit objects should remain the same
assert Commit.objects.count() == 2
commit = Commit.objects.get(key="existing-commit")
# Message should be updated
assert commit.message == "placeholder commit message"
assert created_group_owner
assert created_group_owner.context == {
"commitId": existing_commit.id,
"suspectCommitStrategy": SuspectCommitStrategy.SCM_BASED,
}
with self.tasks():
assert GroupOwner.objects.filter(group=self.event.group).count() == 1
event_frames = get_frame_paths(self.event)
process_commit_context(
event_id=self.event.event_id,
event_platform=self.event.platform,
event_frames=event_frames,
group_id=self.event.group_id,
project_id=self.event.project_id,
)
assert GroupOwner.objects.filter(group=self.event.group).count() == 1
updated_group_owner = GroupOwner.objects.get(
group=self.event.group,
project=self.event.project,
organization=self.event.project.organization,
type=GroupOwnerType.SUSPECT_COMMIT.value,
)
# Number of commit objects should remain the same
assert Commit.objects.count() == 2
commit = Commit.objects.get(key="existing-commit")
# Message should be unchanged
assert commit.message == "placeholder commit message"
assert updated_group_owner
assert updated_group_owner.context == {
"commitId": existing_commit.id,
"suspectCommitStrategy": SuspectCommitStrategy.SCM_BASED,
}
@patch("sentry.analytics.record")
@patch(
"sentry.integrations.github.integration.GitHubIntegration.get_commit_context_all_frames",
)
def test_success_create_commit(
self, mock_get_commit_context: MagicMock, mock_record: MagicMock
) -> None:
"""
A simple success case where a new commit needs to be created.
"""
mock_get_commit_context.return_value = [self.blame_no_existing_commit]
with self.tasks():
assert not GroupOwner.objects.filter(group=self.event.group).exists()
event_frames = get_frame_paths(self.event)
process_commit_context(
event_id=self.event.event_id,
event_platform=self.event.platform,
event_frames=event_frames,
group_id=self.event.group_id,
project_id=self.event.project_id,
)
created_commit_author = CommitAuthor.objects.get(
organization_id=self.organization.id, email="admin2@localhost"
)
created_commit = Commit.objects.get(key="commit-id")
assert created_commit.author is not None
assert created_commit.author.id == created_commit_author.id
assert created_commit.organization_id == self.organization.id
assert created_commit.repository_id == self.repo.id
assert created_commit.date_added == self.blame_no_existing_commit.commit.committedDate
assert created_commit.message == "no existing commit message"
assert GroupOwner.objects.get(
group=self.event.group,
project=self.event.project,
organization=self.event.project.organization,
type=GroupOwnerType.SUSPECT_COMMIT.value,
)
assert GroupOwner.objects.get(
group=self.event.group,
project=self.event.project,
organization=self.event.project.organization,
type=GroupOwnerType.SUSPECT_COMMIT.value,
).context == {
"commitId": created_commit.id,
"suspectCommitStrategy": SuspectCommitStrategy.SCM_BASED,
}
@patch("sentry.analytics.record")
@patch(
"sentry.integrations.github.integration.GitHubIntegration.get_commit_context_all_frames",
)
def test_success_external_author_no_user(self, mock_get_commit_context: MagicMock, _) -> None:
"""
Test that process_commit_context creates GroupOwner with user_id=None
when commit author has no Sentry user mapping.
"""
# Create blame info with external commit author (no Sentry user)
blame_external = FileBlameInfo(
repo=self.repo,
path="sentry/external.py",
ref="master",
code_mapping=self.code_mapping,
lineno=50,
commit=CommitInfo(
commitId="external-commit-id",
committedDate=datetime.now(tz=datetime_timezone.utc) - timedelta(hours=2),
commitMessage="external commit by non-user",
commitAuthorName="External Developer",
commitAuthorEmail="external@example.com",
),
)
mock_get_commit_context.return_value = [blame_external]
with self.tasks():
assert not GroupOwner.objects.filter(group=self.event.group).exists()
event_frames = get_frame_paths(self.event)
process_commit_context(
event_id=self.event.event_id,
event_platform=self.event.platform,
event_frames=event_frames,
group_id=self.event.group_id,
project_id=self.event.project_id,
)
# Verify GroupOwner created with user_id=None
created_group_owner = GroupOwner.objects.get(
group=self.event.group,
project=self.project,
organization_id=self.organization.id,
type=GroupOwnerType.SUSPECT_COMMIT.value,
)
assert created_group_owner.user_id is None
# Verify the created commit and author
created_commit = Commit.objects.get(key="external-commit-id")
assert created_commit.message == "external commit by non-user"
assert created_commit.author is not None
assert created_commit.author.name == "External Developer"
assert created_commit.author.email == "external@example.com"
assert created_group_owner.context == {
"commitId": created_commit.id,
"suspectCommitStrategy": SuspectCommitStrategy.SCM_BASED,
}
@patch("sentry.analytics.record")
@patch(
"sentry.integrations.github.integration.GitHubIntegration.get_commit_context_all_frames",
)
def test_success_multiple_blames(
self, mock_get_commit_context: MagicMock, mock_record: MagicMock
) -> None:
"""
A simple success case where multiple blames are returned.
The most recent blame should be selected.
"""
mock_get_commit_context.return_value = [
self.blame_existing_commit,
self.blame_recent,
self.blame_no_existing_commit,
]
with self.tasks():
assert not GroupOwner.objects.filter(group=self.event.group).exists()
event_frames = get_frame_paths(self.event)
process_commit_context(
event_id=self.event.event_id,
event_platform=self.event.platform,
event_frames=event_frames,
group_id=self.event.group_id,
project_id=self.event.project_id,
)
created_group_owner = GroupOwner.objects.get(
group=self.event.group,
project=self.event.project,
organization=self.event.project.organization,
type=GroupOwnerType.SUSPECT_COMMIT.value,
)
created_commit = Commit.objects.get(key="commit-id-recent")
assert created_group_owner.context == {
"commitId": created_commit.id,
"suspectCommitStrategy": SuspectCommitStrategy.SCM_BASED,
}
@patch("sentry.analytics.record")
@patch(
"sentry.integrations.github.integration.GitHubIntegration.get_commit_context_all_frames",
)
def test_maps_correct_files(
self, mock_get_commit_context: MagicMock, mock_record: MagicMock
) -> None:
"""
Tests that the get_commit_context_all_frames function is called with the correct
files. Code mappings should be applied properly and non-matching files thrown out.
Code mappings should also be checked in the correct order, with empty stack roots
checked last.
"""
mock_get_commit_context.return_value = [self.blame_existing_commit]
# Code mapping with empty stack root should not be used event though it was created earlier
self.create_code_mapping(
repo=self.repo,
project=self.project,
stack_root="",
source_root="foo/",
)
# This code mapping has a defined stack root and matches the filename so should be used
code_mapping_defined_stack_root = self.create_code_mapping(
repo=self.repo,
project=self.project,
stack_root="other/",
source_root="bar/",
)
frames = [
{
"in_app": True,
"lineno": 39,
"filename": "other/models/release.py",
}
]
with self.tasks():
assert not GroupOwner.objects.filter(group=self.event.group).exists()
process_commit_context(
event_id=self.event.event_id,
event_platform=self.event.platform,
event_frames=frames,
group_id=self.event.group_id,
project_id=self.event.project_id,
)
assert GroupOwner.objects.get(
group=self.event.group,
project=self.event.project,
organization=self.event.project.organization,
type=GroupOwnerType.SUSPECT_COMMIT.value,
)
mock_get_commit_context.assert_called_once_with(
[
SourceLineInfo(
lineno=39,
path="bar/models/release.py",
ref="master",
repo=code_mapping_defined_stack_root.repository,
code_mapping=code_mapping_defined_stack_root,
)
],
extra={
"event": self.event.event_id,
"group": self.event.group_id,
"organization": self.event.project.organization_id,
},
)
@patch("sentry.tasks.groupowner.process_suspect_commits.delay")
@patch("sentry.analytics.record")
@patch(
"sentry.integrations.github.integration.GitHubIntegration.get_commit_context_all_frames",
)
def test_failure_no_inapp_frames(
self, mock_get_commit_context, mock_record, mock_process_suspect_commits
):
"""
A simple failure case where the event has no in app frames, so we bail out.
"""
self.event_with_no_inapp_frames = self.store_event(
data={
"message": "Kaboom!",
"platform": "python",
"timestamp": before_now(seconds=10).isoformat(),
"stacktrace": {
"frames": [
{
"function": "handle_set_commits",
"abs_path": "/usr/src/sentry/src/sentry/tasks.py",
"module": "sentry.tasks",
"in_app": False,
"lineno": 30,
"filename": "sentry/tasks.py",
},
{
"function": "set_commits",
"abs_path": "/usr/src/sentry/src/sentry/models/release.py",
"module": "sentry.models.release",
"in_app": False,
"lineno": 39,
"filename": "sentry/models/release.py",
},
]
},
"tags": {"sentry:release": self.release.version},
"fingerprint": ["put-me-in-the-control-group"],
},
project_id=self.project.id,
)
with self.tasks():
assert not GroupOwner.objects.filter(group=self.event.group).exists()
event_frames = get_frame_paths(self.event_with_no_inapp_frames)
process_commit_context(
event_id=self.event.event_id,
event_platform=self.event.platform,
event_frames=event_frames,
group_id=self.event.group_id,
project_id=self.event.project_id,
sdk_name="sentry.python",
)
assert not mock_get_commit_context.called
assert not GroupOwner.objects.filter(group=self.event.group).exists()
mock_process_suspect_commits.assert_not_called()
assert_any_analytics_event(
mock_record,
IntegrationsFailedToFetchCommitContextAllFrames(
organization_id=self.organization.id,
project_id=self.project.id,
group_id=self.event.group_id,
event_id=self.event.event_id,
num_frames=0,
num_successfully_mapped_frames=0,
reason="could_not_find_in_app_stacktrace_frame",
),
)
@patch("sentry.integrations.utils.commit_context.logger.info")
@patch("sentry.tasks.groupowner.process_suspect_commits.delay")
@patch("sentry.analytics.record")
@patch(
"sentry.integrations.github.integration.GitHubIntegration.get_commit_context_all_frames",
)
def test_failure_no_blames(
self, mock_get_commit_context, mock_record, mock_process_suspect_commits, mock_logger_info
):
"""
A simple failure case where no blames are returned. We bail out.
"""
mock_get_commit_context.return_value = []
with self.tasks():
assert not GroupOwner.objects.filter(group=self.event.group).exists()
event_frames = get_frame_paths(self.event)
process_commit_context(
event_id=self.event.event_id,
event_platform=self.event.platform,
event_frames=event_frames,
group_id=self.event.group_id,
project_id=self.event.project_id,
sdk_name="sentry.python",
)
assert not GroupOwner.objects.filter(group=self.event.group).exists()
mock_process_suspect_commits.assert_not_called()
assert_any_analytics_event(
mock_record,
IntegrationsFailedToFetchCommitContextAllFrames(
organization_id=self.organization.id,
project_id=self.project.id,
group_id=self.event.group_id,
event_id=self.event.event_id,
num_frames=1,
num_successfully_mapped_frames=1,
reason="no_commits_found",
),
)
mock_logger_info.assert_any_call(
"process_commit_context_all_frames.find_commit_context_failed",
extra={
"organization": self.organization.id,
"group": self.event.group_id,
"event": self.event.event_id,
"project_id": self.project.id,
"reason": "no_commits_found",
"num_frames": 1,
},
)
@patch("sentry.tasks.groupowner.process_suspect_commits.delay")
@patch("sentry.analytics.record")
@patch(
"sentry.integrations.github.integration.GitHubIntegration.get_commit_context_all_frames",
)
def test_time_threshold_filtering(
self, mock_get_commit_context, mock_record, mock_process_suspect_commits
):
"""
A hacky way to run a parameterized test in TestCase.
Tests the logic that filters commits by age relative to issue first_seen.
"""
# Test cases: each tuple contains (test_case_name, blames_setup, group_first_seen_days_ago, expected_group_owner_exists, expected_commit_id)
test_cases = [
("all_commits_too_young", ["blame_recent"], 3, False, None), # All commits too young
(
"all_outside_range",
["blame_recent", "blame_too_old"],
10,
False,
None,
), # All outside range
(
"skip_young_find_valid",
["blame_recent", "blame_existing_commit"],
5,
True,
"existing-commit",
), # Skip young, find valid
(
"all_valid_picks_most_recent",
["blame_existing_commit", "blame_no_existing_commit"],
5,
True,
"existing-commit",
), # All valid, picks most recent
("only_old_commits", ["blame_too_old"], 10, False, None), # All commits too old
("empty_blames", [], 10, False, None), # No blames
]
existing_commit = self.create_commit(
project=self.project,
repo=self.repo,
author=self.commit_author,
key="existing-commit",
)
existing_commit.update(message="")
# Map blame names to actual blame objects
blame_mapping = {
"blame_recent": self.blame_recent,
"blame_too_old": self.blame_too_old,
"blame_existing_commit": self.blame_existing_commit,
"blame_no_existing_commit": self.blame_no_existing_commit,
}
for (
case_name,
blames_setup,
group_first_seen_days_ago,
expected_group_owner_exists,
expected_commit_id,
) in test_cases:
with self.subTest(case=case_name):
# Reset mocks for each test case
mock_get_commit_context.reset_mock()
mock_record.reset_mock()
# Clean up any existing GroupOwners from previous test cases
GroupOwner.objects.filter(group=self.event.group).delete()
# Setup group first_seen
group_first_seen = datetime.now(tz=datetime_timezone.utc) - timedelta(
days=group_first_seen_days_ago
)
self.event.group.first_seen = group_first_seen
self.event.group.save()
# Build the mock return value
mock_blames = [blame_mapping[blame_name] for blame_name in blames_setup]
mock_get_commit_context.return_value = mock_blames
with self.tasks():
assert not GroupOwner.objects.filter(group=self.event.group).exists()
event_frames = get_frame_paths(self.event)
with self.options({"issues.suspect-commit-strategy": True}):
process_commit_context(
event_id=self.event.event_id,
event_platform=self.event.platform,
event_frames=event_frames,
group_id=self.event.group_id,
project_id=self.event.project_id,
sdk_name="sentry.python",
)
# Assert GroupOwner existence
actual_exists = GroupOwner.objects.filter(group=self.event.group).exists()
self.assertEqual(
actual_exists,
expected_group_owner_exists,
f"GroupOwner existence assertion failed for case: {case_name}",
)
# Assert correct commit selected
if expected_group_owner_exists and expected_commit_id:
created_commit = Commit.objects.get(key=expected_commit_id)
group_owner = GroupOwner.objects.get(group=self.event.group)
self.assertEqual(
group_owner.context["commitId"],
created_commit.id,
f"Wrong commit selected for case: {case_name}",
)
@patch("sentry.tasks.groupowner.process_suspect_commits.delay")
@patch(
"sentry.integrations.github.integration.GitHubIntegration.get_commit_context_all_frames",
side_effect=ApiError("File not found", code=404),
)
def test_no_retry_on_non_retryable_api_error(
self, mock_get_commit_context, mock_process_suspect_commits
):
"""
A failure case where the integration hits a 404 error.
This type of failure should immediately bail with no retries.
"""
with self.tasks():
assert not GroupOwner.objects.filter(group=self.event.group).exists()
event_frames = get_frame_paths(self.event)
process_commit_context(
event_id=self.event.event_id,
event_platform=self.event.platform,
event_frames=event_frames,
group_id=self.event.group_id,
project_id=self.event.project_id,
sdk_name="sentry.python",
)
assert not GroupOwner.objects.filter(group=self.event.group).exists()
mock_process_suspect_commits.assert_not_called()
@patch("sentry.integrations.utils.commit_context.logger.exception")
@patch("sentry.tasks.groupowner.process_suspect_commits.delay")
@patch(
"sentry.integrations.github.integration.GitHubIntegration.get_commit_context_all_frames",
side_effect=Exception("some other error"),
)
def test_failure_unknown(
self,
mock_get_commit_context,
mock_process_suspect_commits,
mock_logger_exception,
):
"""
A failure case where the integration returned an API error.
The error should be recorded.
"""
with self.tasks():
assert not GroupOwner.objects.filter(group=self.event.group).exists()
event_frames = get_frame_paths(self.event)
process_commit_context(
event_id=self.event.event_id,
event_platform=self.event.platform,
event_frames=event_frames,
group_id=self.event.group_id,
project_id=self.event.project_id,
sdk_name="sentry.python",
)
assert not GroupOwner.objects.filter(group=self.event.group).exists()
mock_process_suspect_commits.assert_not_called()
mock_logger_exception.assert_any_call(
"process_commit_context_all_frames.get_commit_context_all_frames.unknown_error",
extra={
"organization": self.organization.id,
"group": self.event.group_id,
"event": self.event.event_id,
"project_id": self.project.id,
"integration_id": self.integration.id,
"provider": "github",
},
)
@patch("sentry.analytics.record")
@patch(
"sentry.integrations.github.integration.GitHubIntegration.get_commit_context_all_frames",
)
def test_filters_invalid_and_dedupes_frames(
self, mock_get_commit_context: MagicMock, mock_record: MagicMock
) -> None:
"""
Tests that invalid frames are filtered out and that duplicate frames are deduped.
"""
mock_get_commit_context.return_value = [self.blame_existing_commit]
frames_with_dups = [
{
"function": "handle_set_commits",
"abs_path": "/usr/src/sentry/src/sentry/tasks.py",
"module": "sentry.tasks",
"in_app": False, # Not an In-App frame
"lineno": 30,
"filename": "sentry/tasks.py",
},
{
"function": "something_else",
"abs_path": "/usr/src/sentry/src/sentry/tasks.py",
"module": "sentry.tasks",
"in_app": True,
"filename": "sentry/tasks.py",
# No lineno
},
{
"function": "something_else",
"abs_path": "/usr/src/sentry/src/sentry/invalid_2.py",
"module": "sentry.invalid_2",
"in_app": True,
# Bad path with quotes
"filename": 'sentry/"invalid_2".py',
"lineno": 39,
},
{
"function": "set_commits",
"abs_path": "/usr/src/sentry/src/sentry/models/release.py",
"module": "sentry.models.release",
"in_app": True,
"lineno": 39,
"filename": "sentry/models/release.py",
},
{
"function": "set_commits",
"abs_path": "/usr/src/sentry/src/sentry/models/release.py",
"module": "sentry.models.release",
"in_app": True,
"lineno": 39,
"filename": "sentry/models/release.py",
},
]
with self.tasks():
assert not GroupOwner.objects.filter(group=self.event.group).exists()
process_commit_context(
event_id=self.event.event_id,
event_platform=self.event.platform,
event_frames=frames_with_dups,
group_id=self.event.group_id,
project_id=self.event.project_id,
sdk_name="sentry.python",
)
mock_get_commit_context.assert_called_with(
[
SourceLineInfo(
lineno=39,
path="sentry/models/release.py",
ref="master",
repo=self.repo,
code_mapping=self.code_mapping,
),
],
extra={
"event": self.event.event_id,
"group": self.event.group_id,
"organization": self.organization.id,
},
)
assert_any_analytics_event(
mock_record,
IntegrationsSuccessfullyFetchedCommitContextAllFrames(
organization_id=self.organization.id,
project_id=self.project.id,
group_id=self.event.group_id,
event_id=self.event.event_id,
# 1 was a duplicate, 2 filtered out because of missing properties
num_frames=2,
num_unique_commits=1,
num_unique_commit_authors=1,
# Only 1 successfully mapped frame of the 6 total
num_successfully_mapped_frames=1,
selected_frame_index=0,
selected_provider="github",
selected_code_mapping_id=self.code_mapping.id,
),
)
@patch(
"sentry.integrations.github.integration.GitHubIntegration.get_commit_context_all_frames",
return_value=[],
)
@patch("sentry.integrations.source_code_management.tasks.pr_comment_workflow.delay")
| TestCommitContextAllFrames |
python | huggingface__transformers | src/transformers/models/sam_hq/modeling_sam_hq.py | {
"start": 32924,
"end": 35021
} | class ____(nn.Module):
def __init__(self, config: SamHQMaskDecoderConfig):
super().__init__()
self.config = config
self.num_hidden_layers = config.num_hidden_layers
self.layers = nn.ModuleList()
for i in range(self.num_hidden_layers):
self.layers.append(SamHQTwoWayAttentionBlock(config, skip_first_layer_pe=(i == 0)))
self.final_attn_token_to_image = SamHQAttention(config)
self.layer_norm_final_attn = nn.LayerNorm(config.hidden_size)
def forward(
self,
point_embeddings: Tensor,
image_embeddings: Tensor,
image_positional_embeddings: Tensor,
attention_similarity: Tensor,
target_embedding=None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, BaseModelOutput]:
if image_embeddings is None:
raise ValueError("You have to specify an image_embedding")
image_embeddings = image_embeddings.flatten(2).permute(0, 2, 1).unsqueeze(1)
image_positional_embeddings = image_positional_embeddings.flatten(2).permute(0, 2, 1).unsqueeze(1)
# Prepare queries
queries = point_embeddings
keys = image_embeddings
# Apply transformer blocks and final layernorm
for layer in self.layers:
if target_embedding is not None:
queries += target_embedding
queries, keys, _ = layer(
queries=queries,
keys=keys,
query_point_embedding=point_embeddings,
key_point_embedding=image_positional_embeddings,
attention_similarity=attention_similarity,
**kwargs,
)
# Apply the final attention layer from the points to the image
query = queries + point_embeddings
key = keys + image_positional_embeddings
attn_out, _ = self.final_attn_token_to_image(query=query, key=key, value=keys)
queries = queries + attn_out
queries = self.layer_norm_final_attn(queries)
return queries, keys
| SamHQTwoWayTransformer |
python | django__django | tests/custom_lookups/models.py | {
"start": 31,
"end": 332
} | class ____(models.Model):
name = models.CharField(max_length=20)
alias = models.CharField(max_length=20)
age = models.IntegerField(null=True)
birthdate = models.DateField(null=True)
average_rating = models.FloatField(null=True)
def __str__(self):
return self.name
| Author |
python | wandb__wandb | wandb/sdk/artifacts/storage_handlers/tracking_handler.py | {
"start": 495,
"end": 2476
} | class ____(StorageHandler):
_scheme: str
def __init__(self, scheme: str = "") -> None:
"""Track paths with no modification or special processing.
Useful when paths being tracked are on file systems mounted at a standardized
location.
For example, if the data to track is located on an NFS share mounted on
`/data`, then it is sufficient to just track the paths.
"""
self._scheme = scheme
def can_handle(self, parsed_url: ParseResult) -> bool:
return parsed_url.scheme == self._scheme
def load_path(
self,
manifest_entry: ArtifactManifestEntry,
local: bool = False,
) -> URIStr | FilePathStr:
if local:
# Likely a user error. The tracking handler is
# oblivious to the underlying paths, so it has
# no way of actually loading it.
url = urlparse(manifest_entry.ref)
raise ValueError(
f"Cannot download file at path {str(manifest_entry.ref)}, scheme {str(url.scheme)} not recognized"
)
# TODO(spencerpearson): should this go through util.to_native_slash_path
# instead of just getting typecast?
return FilePathStr(manifest_entry.path)
def store_path(
self,
artifact: Artifact,
path: URIStr | FilePathStr,
name: StrPath | None = None,
checksum: bool = True,
max_objects: int | None = None,
) -> list[ArtifactManifestEntry]:
url = urlparse(path)
if name is None:
raise ValueError(
f'You must pass name="<entry_name>" when tracking references with unknown schemes. ref: {path}'
)
termwarn(
f"Artifact references with unsupported schemes cannot be checksummed: {path}"
)
name = name or url.path[1:] # strip leading slash
return [ArtifactManifestEntry(path=name, ref=path, digest=path)]
| TrackingHandler |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-oracleai/llama_index/readers/oracleai/base.py | {
"start": 802,
"end": 1618
} | class ____(HTMLParser):
"""Parse Oracle doc metadata..."""
def __init__(self) -> None:
super().__init__()
self.reset()
self.match = False
self.metadata = {}
def handle_starttag(self, tag, attrs):
if tag == "meta":
entry = ""
for name, value in attrs:
if name == "name":
entry = value
if name == "content":
if entry:
self.metadata[entry] = value
elif tag == "title":
self.match = True
def handle_data(self, data):
if self.match:
self.metadata["title"] = data
self.match = False
def get_metadata(self):
return self.metadata
"""OracleDocReader class"""
| ParseOracleDocMetadata |
python | walkccc__LeetCode | solutions/3448. Count Substrings Divisible By Last Digit/3448-2.py | {
"start": 0,
"end": 527
} | class ____:
def countSubstrings(self, s: str) -> int:
ans = 0
# dp[num][rem] := the number of substrings so far that have a remainder of
# `rem` when divided by `num`
dp = [[0] * 10 for _ in range(10)]
for c in s:
digit = int(c)
newDp = [[0] * 10 for _ in range(10)]
for num in range(1, 10):
for rem in range(num):
newDp[num][(rem * 10 + digit) % num] += dp[num][rem]
newDp[num][digit % num] += 1
dp = newDp
ans += dp[digit][0]
return ans
| Solution |
python | kamyu104__LeetCode-Solutions | Python/flatten-nested-list-iterator.py | {
"start": 100,
"end": 945
} | class ____(object):
def __init__(self, nestedList):
"""
Initialize your data structure here.
:type nestedList: List[NestedInteger]
"""
self.__depth = [[nestedList, 0]]
def next(self):
"""
:rtype: int
"""
nestedList, i = self.__depth[-1]
self.__depth[-1][1] += 1
return nestedList[i].getInteger()
def hasNext(self):
"""
:rtype: bool
"""
while self.__depth:
nestedList, i = self.__depth[-1]
if i == len(nestedList):
self.__depth.pop()
elif nestedList[i].isInteger():
return True
else:
self.__depth[-1][1] += 1
self.__depth.append([nestedList[i].getList(), 0])
return False
| NestedIterator |
python | huggingface__transformers | tests/models/seamless_m4t_v2/test_modeling_seamless_m4t_v2.py | {
"start": 22550,
"end": 25396
} | class ____(ModelTesterMixin, unittest.TestCase):
is_encoder_decoder = True
test_missing_keys = False
test_resize_embeddings = True
all_model_classes = (
(
SeamlessM4Tv2Model,
SeamlessM4Tv2ForTextToSpeech,
SeamlessM4Tv2ForTextToText,
)
if is_torch_available()
else ()
)
# Doesn't run generation tests. Has custom generation method with a different interface
all_generative_model_classes = ()
def setUp(self):
self.model_tester = SeamlessM4Tv2ModelTester(self, input_modality="text")
self.config_tester = ConfigTester(self, config_class=SeamlessM4Tv2Config)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
model_name = "facebook/seamless-m4t-v2-large"
model = SeamlessM4Tv2Model.from_pretrained(model_name)
self.assertIsNotNone(model)
@unittest.skip(
reason="Expected missing keys serve when using SeamlessM4Tv2ForXXX.from_pretrained from a checkpoint saved by SeamlessM4Tv2Model.save_pretrained."
)
def test_model_weights_reload_no_missing_tied_weights(self):
pass
@unittest.skip(reason="SeamlessM4Tv2Model can take input_ids or input_features")
def test_forward_signature(self):
pass
def test_decoder_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@unittest.skip(
reason="This architecture has tied weights by default and there is no way to remove it, check: https://github.com/huggingface/transformers/pull/31771#issuecomment-2210915245"
)
def test_load_save_without_tied_weights(self):
pass
@require_torch
| SeamlessM4Tv2ModelWithTextInputTest |
python | milvus-io__pymilvus | tests/test_check.py | {
"start": 443,
"end": 1946
} | class ____:
@pytest.mark.parametrize("valid_address", [
"localhost:19530",
"example.com:19530"
])
def test_check_is_legal_address_true(self, valid_address):
valid = is_legal_address(valid_address)
assert valid is True
@pytest.mark.parametrize("invalid_address", [
"-1",
"localhost",
":19530",
"localhost:localhost",
])
def test_check_is_legal_address_false(self, invalid_address):
valid = is_legal_address(invalid_address)
assert valid is False
@pytest.mark.parametrize("valid_host", [
"localhost",
"example.com"
])
def test_check_is_legal_host_true(self, valid_host):
valid = is_legal_host(valid_host)
assert valid is True
@pytest.mark.parametrize("invalid_host", [
-1,
1.0,
"",
is_legal_address,
])
def test_check_is_legal_host_false(self, invalid_host):
valid = is_legal_host(invalid_host)
assert valid is False
@pytest.mark.parametrize("valid_port", [
"19530",
"222",
123,
])
def test_check_is_legal_port_true(self, valid_port):
valid = is_legal_port(valid_port)
assert valid is True
@pytest.mark.parametrize("invalid_port", [
is_legal_address,
"abc",
0.3,
])
def test_check_is_legal_port_false(self, invalid_port):
valid = is_legal_port(invalid_port)
assert valid is False
| TestChecks |
python | huggingface__transformers | src/transformers/models/perceiver/modeling_perceiver.py | {
"start": 98953,
"end": 103552
} | class ____(nn.Module):
"""Downsamples 4x by applying a 2D convolution and doing max pooling."""
def __init__(
self,
num_layers: int = 1,
in_channels: int = 3,
out_channels: int = 64,
use_batchnorm: bool = True,
):
"""
Constructs a Conv2DDownsample model.
Args:
in_channels (`int`, *optional*, defaults to 3):
The number of input channels.
out_channels (`int`, *optional*, defaults to 64):
The number of conv output channels.
use_batchnorm (`bool`, *optional*, defaults to `True`):
Whether to use batchnorm.
"""
super().__init__()
self.conv = Conv2dSamePadding(
in_channels=in_channels, out_channels=out_channels, kernel_size=7, stride=2, bias=False
)
self.batchnorm = nn.BatchNorm2d(num_features=out_channels) if use_batchnorm else nn.Identity()
self.relu = nn.ReLU()
self.max_pool = nn.MaxPool2d(kernel_size=3, stride=2)
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
out = self.conv(inputs)
out = self.batchnorm(out)
out = self.relu(out)
out = self.max_pool(out)
return out
def generate_fourier_features(pos, num_bands, max_resolution=(224, 224), concat_pos=True, sine_only=False):
"""
Generate a Fourier frequency position encoding with linear spacing.
Args:
pos (`torch.LongTensor` of shape `(batch_size, sequence_length, dim)`):
The Tensor containing the position of n points in d dimensional space.
num_bands (`int`):
The number of frequency bands (K) to use.
max_resolution (`tuple[int]`, *optional*, defaults to (224, 224)):
The maximum resolution (i.e. the number of pixels per dim). A tuple representing resolution for each dimension.
concat_pos (`bool`, *optional*, defaults to `True`):
Whether to concatenate the input position encoding to the Fourier features.
sine_only (`bool`, *optional*, defaults to `False`):
Whether to use a single phase (sin) or two (sin/cos) for each frequency band.
Returns:
`torch.FloatTensor` of shape `(batch_size, sequence_length, n_channels)`: The Fourier position embeddings. If
`concat_pos` is `True` and `sine_only` is `False`, output dimensions are ordered as: [dim_1, dim_2, ..., dim_d,
sin(pi*f_1*dim_1), ..., sin(pi*f_K*dim_1), ..., sin(pi*f_1*dim_d), ..., sin(pi*f_K*dim_d), cos(pi*f_1*dim_1),
..., cos(pi*f_K*dim_1), ..., cos(pi*f_1*dim_d), ..., cos(pi*f_K*dim_d)], where dim_i is pos[:, i] and f_k is the
kth frequency band.
"""
batch_size = pos.shape[0]
min_freq = 1.0
# Nyquist frequency at the target resolution:
freq_bands = torch.stack(
[torch.linspace(start=min_freq, end=res / 2, steps=num_bands) for res in max_resolution], dim=0
)
# Get frequency bands for each spatial dimension.
# Output is size [n, d * num_bands]
per_pos_features = pos[0, :, :][:, :, None] * freq_bands[None, :, :]
per_pos_features = torch.reshape(per_pos_features, [-1, np.prod(per_pos_features.shape[1:])])
if sine_only:
# Output is size [n, d * num_bands]
per_pos_features = torch.sin(np.pi * (per_pos_features))
else:
# Output is size [n, 2 * d * num_bands]
per_pos_features = torch.cat(
[torch.sin(np.pi * per_pos_features), torch.cos(np.pi * per_pos_features)], dim=-1
)
# Concatenate the raw input positions.
if concat_pos:
# Adds d bands to the encoding.
per_pos_features = torch.cat([pos, per_pos_features.expand(batch_size, -1, -1)], dim=-1)
return per_pos_features
def build_linear_positions(index_dims, output_range=(-1.0, 1.0)):
"""
Generate an array of position indices for an N-D input array.
Args:
index_dims (`list[int]`):
The shape of the index dimensions of the input array.
output_range (`tuple[float]`, *optional*, defaults to `(-1.0, 1.0)`):
The min and max values taken by each input index dimension.
Returns:
`torch.FloatTensor` of shape `(index_dims[0], index_dims[1], .., index_dims[-1], N)`.
"""
def _linspace(n_xels_per_dim):
return torch.linspace(start=output_range[0], end=output_range[1], steps=n_xels_per_dim, dtype=torch.float32)
dim_ranges = [_linspace(n_xels_per_dim) for n_xels_per_dim in index_dims]
array_index_grid = meshgrid(*dim_ranges, indexing="ij")
return torch.stack(array_index_grid, dim=-1)
| Conv2DDownsample |
python | protocolbuffers__protobuf | python/google/protobuf/internal/reflection_test.py | {
"start": 96397,
"end": 108423
} | class ____(unittest.TestCase):
def setUp(self):
self.proto = unittest_pb2.TestAllTypes()
self.extended_proto = more_extensions_pb2.ExtendedMessage()
self.packed_proto = unittest_pb2.TestPackedTypes()
self.packed_extended_proto = unittest_pb2.TestPackedExtensions()
def Size(self):
return self.proto.ByteSize()
def testEmptyMessage(self):
self.assertEqual(0, self.proto.ByteSize())
def testSizedOnKwargs(self):
# Use a separate message to ensure testing right after creation.
proto = unittest_pb2.TestAllTypes()
self.assertEqual(0, proto.ByteSize())
proto_kwargs = unittest_pb2.TestAllTypes(optional_int64 = 1)
# One byte for the tag, one to encode varint 1.
self.assertEqual(2, proto_kwargs.ByteSize())
def testVarints(self):
def Test(i, expected_varint_size):
self.proto.Clear()
self.proto.optional_int64 = i
# Add one to the varint size for the tag info
# for tag 1.
self.assertEqual(expected_varint_size + 1, self.Size())
Test(0, 1)
Test(1, 1)
for i, num_bytes in zip(range(7, 63, 7), range(1, 10000)):
Test((1 << i) - 1, num_bytes)
Test(-1, 10)
Test(-2, 10)
Test(-(1 << 63), 10)
def testStrings(self):
self.proto.optional_string = ''
# Need one byte for tag info (tag #14), and one byte for length.
self.assertEqual(2, self.Size())
self.proto.optional_string = 'abc'
# Need one byte for tag info (tag #14), and one byte for length.
self.assertEqual(2 + len(self.proto.optional_string), self.Size())
self.proto.optional_string = 'x' * 128
# Need one byte for tag info (tag #14), and TWO bytes for length.
self.assertEqual(3 + len(self.proto.optional_string), self.Size())
def testOtherNumerics(self):
self.proto.optional_fixed32 = 1234
# One byte for tag and 4 bytes for fixed32.
self.assertEqual(5, self.Size())
self.proto = unittest_pb2.TestAllTypes()
self.proto.optional_fixed64 = 1234
# One byte for tag and 8 bytes for fixed64.
self.assertEqual(9, self.Size())
self.proto = unittest_pb2.TestAllTypes()
self.proto.optional_float = 1.234
# One byte for tag and 4 bytes for float.
self.assertEqual(5, self.Size())
self.proto = unittest_pb2.TestAllTypes()
self.proto.optional_double = 1.234
# One byte for tag and 8 bytes for float.
self.assertEqual(9, self.Size())
self.proto = unittest_pb2.TestAllTypes()
self.proto.optional_sint32 = 64
# One byte for tag and 2 bytes for zig-zag-encoded 64.
self.assertEqual(3, self.Size())
self.proto = unittest_pb2.TestAllTypes()
def testComposites(self):
# 3 bytes.
self.proto.optional_nested_message.bb = (1 << 14)
# Plus one byte for bb tag.
# Plus 1 byte for optional_nested_message serialized size.
# Plus two bytes for optional_nested_message tag.
self.assertEqual(3 + 1 + 1 + 2, self.Size())
def testGroups(self):
# 4 bytes.
self.proto.optionalgroup.a = (1 << 21)
# Plus two bytes for |a| tag.
# Plus 2 * two bytes for START_GROUP and END_GROUP tags.
self.assertEqual(4 + 2 + 2*2, self.Size())
def testRepeatedScalars(self):
self.proto.repeated_int32.append(10) # 1 byte.
self.proto.repeated_int32.append(128) # 2 bytes.
# Also need 2 bytes for each entry for tag.
self.assertEqual(1 + 2 + 2*2, self.Size())
def testRepeatedScalarsExtend(self):
self.proto.repeated_int32.extend([10, 128]) # 3 bytes.
# Also need 2 bytes for each entry for tag.
self.assertEqual(1 + 2 + 2*2, self.Size())
def testRepeatedScalarsRemove(self):
self.proto.repeated_int32.append(10) # 1 byte.
self.proto.repeated_int32.append(128) # 2 bytes.
# Also need 2 bytes for each entry for tag.
self.assertEqual(1 + 2 + 2*2, self.Size())
self.proto.repeated_int32.remove(128)
self.assertEqual(1 + 2, self.Size())
def testRepeatedComposites(self):
# Empty message. 2 bytes tag plus 1 byte length.
foreign_message_0 = self.proto.repeated_nested_message.add()
# 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int.
foreign_message_1 = self.proto.repeated_nested_message.add()
foreign_message_1.bb = 7
self.assertEqual(2 + 1 + 2 + 1 + 1 + 1, self.Size())
def testRepeatedCompositesDelete(self):
# Empty message. 2 bytes tag plus 1 byte length.
foreign_message_0 = self.proto.repeated_nested_message.add()
# 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int.
foreign_message_1 = self.proto.repeated_nested_message.add()
foreign_message_1.bb = 9
self.assertEqual(2 + 1 + 2 + 1 + 1 + 1, self.Size())
repeated_nested_message = copy.deepcopy(
self.proto.repeated_nested_message)
# 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int.
del self.proto.repeated_nested_message[0]
self.assertEqual(2 + 1 + 1 + 1, self.Size())
# Now add a new message.
foreign_message_2 = self.proto.repeated_nested_message.add()
foreign_message_2.bb = 12
# 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int.
# 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int.
self.assertEqual(2 + 1 + 1 + 1 + 2 + 1 + 1 + 1, self.Size())
# 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int.
del self.proto.repeated_nested_message[1]
self.assertEqual(2 + 1 + 1 + 1, self.Size())
del self.proto.repeated_nested_message[0]
self.assertEqual(0, self.Size())
self.assertEqual(2, len(repeated_nested_message))
del repeated_nested_message[0:1]
self.assertEqual(1, len(repeated_nested_message))
del repeated_nested_message[-1]
self.assertEqual(0, len(repeated_nested_message))
def testRepeatedGroups(self):
# 2-byte START_GROUP plus 2-byte END_GROUP.
group_0 = self.proto.repeatedgroup.add()
# 2-byte START_GROUP plus 2-byte |a| tag + 1-byte |a|
# plus 2-byte END_GROUP.
group_1 = self.proto.repeatedgroup.add()
group_1.a = 7
self.assertEqual(2 + 2 + 2 + 2 + 1 + 2, self.Size())
def testExtensions(self):
proto = unittest_pb2.TestAllExtensions()
self.assertEqual(0, proto.ByteSize())
extension = unittest_pb2.optional_int32_extension # Field #1, 1 byte.
proto.Extensions[extension] = 23
# 1 byte for tag, 1 byte for value.
self.assertEqual(2, proto.ByteSize())
field = unittest_pb2.TestAllTypes.DESCRIPTOR.fields_by_name[
'optional_int32']
with self.assertRaises(KeyError):
proto.Extensions[field] = 23
def testCacheInvalidationForNonrepeatedScalar(self):
# Test non-extension.
self.proto.optional_int32 = 1
self.assertEqual(2, self.proto.ByteSize())
self.proto.optional_int32 = 128
self.assertEqual(3, self.proto.ByteSize())
self.proto.ClearField('optional_int32')
self.assertEqual(0, self.proto.ByteSize())
# Test within extension.
extension = more_extensions_pb2.optional_int_extension
self.extended_proto.Extensions[extension] = 1
self.assertEqual(2, self.extended_proto.ByteSize())
self.extended_proto.Extensions[extension] = 128
self.assertEqual(3, self.extended_proto.ByteSize())
self.extended_proto.ClearExtension(extension)
self.assertEqual(0, self.extended_proto.ByteSize())
def testCacheInvalidationForRepeatedScalar(self):
# Test non-extension.
self.proto.repeated_int32.append(1)
self.assertEqual(3, self.proto.ByteSize())
self.proto.repeated_int32.append(1)
self.assertEqual(6, self.proto.ByteSize())
self.proto.repeated_int32[1] = 128
self.assertEqual(7, self.proto.ByteSize())
self.proto.ClearField('repeated_int32')
self.assertEqual(0, self.proto.ByteSize())
# Test within extension.
extension = more_extensions_pb2.repeated_int_extension
repeated = self.extended_proto.Extensions[extension]
repeated.append(1)
self.assertEqual(2, self.extended_proto.ByteSize())
repeated.append(1)
self.assertEqual(4, self.extended_proto.ByteSize())
repeated[1] = 128
self.assertEqual(5, self.extended_proto.ByteSize())
self.extended_proto.ClearExtension(extension)
self.assertEqual(0, self.extended_proto.ByteSize())
def testCacheInvalidationForNonrepeatedMessage(self):
# Test non-extension.
self.proto.optional_foreign_message.c = 1
self.assertEqual(5, self.proto.ByteSize())
self.proto.optional_foreign_message.c = 128
self.assertEqual(6, self.proto.ByteSize())
self.proto.optional_foreign_message.ClearField('c')
self.assertEqual(3, self.proto.ByteSize())
self.proto.ClearField('optional_foreign_message')
self.assertEqual(0, self.proto.ByteSize())
# This is only possible in pure-Python implementation of the API.
child = self.proto.optional_foreign_message
self.proto.ClearField('optional_foreign_message')
child.c = 128
self.assertEqual(0, self.proto.ByteSize())
# Test within extension.
extension = more_extensions_pb2.optional_message_extension
child = self.extended_proto.Extensions[extension]
self.assertEqual(0, self.extended_proto.ByteSize())
child.foreign_message_int = 1
self.assertEqual(4, self.extended_proto.ByteSize())
child.foreign_message_int = 128
self.assertEqual(5, self.extended_proto.ByteSize())
self.extended_proto.ClearExtension(extension)
self.assertEqual(0, self.extended_proto.ByteSize())
def testCacheInvalidationForRepeatedMessage(self):
# Test non-extension.
child0 = self.proto.repeated_foreign_message.add()
self.assertEqual(3, self.proto.ByteSize())
self.proto.repeated_foreign_message.add()
self.assertEqual(6, self.proto.ByteSize())
child0.c = 1
self.assertEqual(8, self.proto.ByteSize())
self.proto.ClearField('repeated_foreign_message')
self.assertEqual(0, self.proto.ByteSize())
# Test within extension.
extension = more_extensions_pb2.repeated_message_extension
child_list = self.extended_proto.Extensions[extension]
child0 = child_list.add()
self.assertEqual(2, self.extended_proto.ByteSize())
child_list.add()
self.assertEqual(4, self.extended_proto.ByteSize())
child0.foreign_message_int = 1
self.assertEqual(6, self.extended_proto.ByteSize())
child0.ClearField('foreign_message_int')
self.assertEqual(4, self.extended_proto.ByteSize())
self.extended_proto.ClearExtension(extension)
self.assertEqual(0, self.extended_proto.ByteSize())
def testPackedRepeatedScalars(self):
self.assertEqual(0, self.packed_proto.ByteSize())
self.packed_proto.packed_int32.append(10) # 1 byte.
self.packed_proto.packed_int32.append(128) # 2 bytes.
# The tag is 2 bytes (the field number is 90), and the varint
# storing the length is 1 byte.
int_size = 1 + 2 + 3
self.assertEqual(int_size, self.packed_proto.ByteSize())
self.packed_proto.packed_double.append(4.2) # 8 bytes
self.packed_proto.packed_double.append(3.25) # 8 bytes
# 2 more tag bytes, 1 more length byte.
double_size = 8 + 8 + 3
self.assertEqual(int_size+double_size, self.packed_proto.ByteSize())
self.packed_proto.ClearField('packed_int32')
self.assertEqual(double_size, self.packed_proto.ByteSize())
def testPackedExtensions(self):
self.assertEqual(0, self.packed_extended_proto.ByteSize())
extension = self.packed_extended_proto.Extensions[
unittest_pb2.packed_fixed32_extension]
extension.extend([1, 2, 3, 4]) # 16 bytes
# Tag is 3 bytes.
self.assertEqual(19, self.packed_extended_proto.ByteSize())
# Issues to be sure to cover include:
# * Handling of unrecognized tags ("uninterpreted_bytes").
# * Handling of MessageSets.
# * Consistent ordering of tags in the wire format,
# including ordering between extensions and non-extension
# fields.
# * Consistent serialization of negative numbers, especially
# negative int32s.
# * Handling of empty submessages (with and without "has"
# bits set).
@testing_refleaks.TestCase
| ByteSizeTest |
python | getlogbook__logbook | src/logbook/utils.py | {
"start": 118,
"end": 1609
} | class ____:
def __init__(self, threshold, func):
self.timer = threading.Timer(threshold, func)
def __enter__(self):
self.timer.start()
return self
def __exit__(self, *_):
self.timer.cancel()
_slow_logger = Logger("Slow")
def logged_if_slow(*args, **kwargs):
"""Context manager that logs if operations within take longer than
`threshold` seconds.
:param threshold: Number of seconds (or fractions thereof) allwoed before
logging occurs. The default is 1 second.
:param logger: :class:`~logbook.Logger` to use. The default is a 'slow'
logger.
:param level: Log level. The default is `DEBUG`.
:param func: (Deprecated). Function to call to perform logging.
The remaining parameters are passed to the
:meth:`~logbook.base.LoggerMixin.log` method.
"""
threshold = kwargs.pop("threshold", 1)
func = kwargs.pop("func", None)
if func is None:
logger = kwargs.pop("logger", _slow_logger)
level = kwargs.pop("level", DEBUG)
func = functools.partial(logger.log, level, *args, **kwargs)
else:
if "logger" in kwargs or "level" in kwargs:
raise TypeError(
"If using deprecated func parameter, 'logger' and"
" 'level' arguments cannot be passed."
)
func = functools.partial(func, *args, **kwargs)
return _SlowContextNotifier(threshold, func)
| _SlowContextNotifier |
python | huggingface__transformers | src/transformers/models/nystromformer/modeling_nystromformer.py | {
"start": 24467,
"end": 25363
} | class ____(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
self.config = config
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = ACT2FN[self.config.hidden_act](x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@auto_docstring(
custom_intro="""
Nyströmformer Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
"""
)
| NystromformerClassificationHead |
python | doocs__leetcode | solution/0400-0499/0474.Ones and Zeroes/Solution2.py | {
"start": 0,
"end": 377
} | class ____:
def findMaxForm(self, strs: List[str], m: int, n: int) -> int:
f = [[0] * (n + 1) for _ in range(m + 1)]
for s in strs:
a, b = s.count("0"), s.count("1")
for i in range(m, a - 1, -1):
for j in range(n, b - 1, -1):
f[i][j] = max(f[i][j], f[i - a][j - b] + 1)
return f[m][n]
| Solution |
python | google__pytype | pytype/tools/xref/testdata/class_def.py | {
"start": 335,
"end": 478
} | class ____(A):
pass
#- @Foo defines/binding ClassFoo
#- @A ref ClassA
#- @B ref ClassB
#- ClassFoo.node/kind record
#- ClassFoo.subkind class
| D |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/execution_api/datamodels/asset_event.py | {
"start": 1043,
"end": 1356
} | class ____(StrictBaseModel):
"""DagRun serializer for asset responses."""
run_id: str
dag_id: str
logical_date: datetime | None
start_date: datetime
end_date: datetime | None
state: str
data_interval_start: datetime | None
data_interval_end: datetime | None
| DagRunAssetReference |
python | boto__boto3 | tests/unit/s3/test_inject.py | {
"start": 6322,
"end": 8412
} | class ____(unittest.TestCase):
def setUp(self):
self.obj = mock.Mock(bucket_name='my_bucket', key='my_key')
self.copy_source = {'Bucket': 'foo', 'Key': 'bar'}
def test_upload_file_proxies_to_meta_client(self):
inject.object_upload_file(self.obj, Filename='foo')
self.obj.meta.client.upload_file.assert_called_with(
Filename='foo',
Bucket=self.obj.bucket_name,
Key=self.obj.key,
ExtraArgs=None,
Callback=None,
Config=None,
)
def test_download_file_proxies_to_meta_client(self):
inject.object_download_file(self.obj, Filename='foo')
self.obj.meta.client.download_file.assert_called_with(
Bucket=self.obj.bucket_name,
Key=self.obj.key,
Filename='foo',
ExtraArgs=None,
Callback=None,
Config=None,
)
def test_copy(self):
inject.object_copy(self.obj, self.copy_source)
self.obj.meta.client.copy.assert_called_with(
CopySource=self.copy_source,
Bucket=self.obj.bucket_name,
Key=self.obj.key,
ExtraArgs=None,
Callback=None,
SourceClient=None,
Config=None,
)
def test_upload_fileobj(self):
fileobj = io.BytesIO(b'foo')
inject.object_upload_fileobj(self.obj, Fileobj=fileobj)
self.obj.meta.client.upload_fileobj.assert_called_with(
Bucket=self.obj.bucket_name,
Fileobj=fileobj,
Key=self.obj.key,
ExtraArgs=None,
Callback=None,
Config=None,
)
def test_download_fileobj(self):
fileobj = io.BytesIO()
inject.object_download_fileobj(self.obj, Fileobj=fileobj)
self.obj.meta.client.download_fileobj.assert_called_with(
Bucket=self.obj.bucket_name,
Key=self.obj.key,
Fileobj=fileobj,
ExtraArgs=None,
Callback=None,
Config=None,
)
| TestObjectTransferMethods |
python | huggingface__transformers | examples/pytorch/image-pretraining/run_mae.py | {
"start": 6186,
"end": 14979
} | class ____(TrainingArguments):
base_learning_rate: float = field(
default=1e-3, metadata={"help": "Base learning rate: absolute_lr = base_lr * total_batch_size / 256."}
)
def collate_fn(examples):
pixel_values = torch.stack([example["pixel_values"] for example in examples])
return {"pixel_values": pixel_values}
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_process_index}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
+ f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# Initialize our dataset.
ds = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
data_files=data_args.data_files,
cache_dir=model_args.cache_dir,
token=model_args.token,
trust_remote_code=data_args.trust_remote_code,
)
# If we don't have a validation split, split off a percentage of train as validation.
data_args.train_val_split = None if "validation" in ds else data_args.train_val_split
if isinstance(data_args.train_val_split, float) and data_args.train_val_split > 0.0:
split = ds["train"].train_test_split(data_args.train_val_split)
ds["train"] = split["train"]
ds["validation"] = split["test"]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config_kwargs = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"token": model_args.token,
}
if model_args.config_name:
config = ViTMAEConfig.from_pretrained(model_args.config_name, **config_kwargs)
elif model_args.model_name_or_path:
config = ViTMAEConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
else:
config = ViTMAEConfig()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}")
config.update_from_string(model_args.config_overrides)
logger.info(f"New config: {config}")
# adapt config
config.update(
{
"mask_ratio": model_args.mask_ratio,
"norm_pix_loss": model_args.norm_pix_loss,
}
)
# create image processor
if model_args.image_processor_name:
image_processor = ViTImageProcessor.from_pretrained(model_args.image_processor_name, **config_kwargs)
elif model_args.model_name_or_path:
image_processor = ViTImageProcessor.from_pretrained(model_args.model_name_or_path, **config_kwargs)
else:
image_processor = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
model = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
token=model_args.token,
)
else:
logger.info("Training new model from scratch")
model = ViTMAEForPreTraining(config)
if training_args.do_train:
column_names = ds["train"].column_names
else:
column_names = ds["validation"].column_names
if data_args.image_column_name is not None:
image_column_name = data_args.image_column_name
elif "image" in column_names:
image_column_name = "image"
elif "img" in column_names:
image_column_name = "img"
else:
image_column_name = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
size = image_processor.size["shortest_edge"]
else:
size = (image_processor.size["height"], image_processor.size["width"])
transforms = Compose(
[
Lambda(lambda img: img.convert("RGB") if img.mode != "RGB" else img),
RandomResizedCrop(size, scale=(0.2, 1.0), interpolation=InterpolationMode.BICUBIC),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean, std=image_processor.image_std),
]
)
def preprocess_images(examples):
"""Preprocess a batch of images by applying transforms."""
examples["pixel_values"] = [transforms(image) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset")
if data_args.max_train_samples is not None:
ds["train"] = ds["train"].shuffle(seed=training_args.seed).select(range(data_args.max_train_samples))
# Set the training transforms
ds["train"].set_transform(preprocess_images)
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset")
if data_args.max_eval_samples is not None:
ds["validation"] = (
ds["validation"].shuffle(seed=training_args.seed).select(range(data_args.max_eval_samples))
)
# Set the validation transforms
ds["validation"].set_transform(preprocess_images)
# Compute absolute learning rate
total_train_batch_size = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
training_args.learning_rate = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=ds["train"] if training_args.do_train else None,
eval_dataset=ds["validation"] if training_args.do_eval else None,
processing_class=image_processor,
data_collator=collate_fn,
)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model()
trainer.log_metrics("train", train_result.metrics)
trainer.save_metrics("train", train_result.metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
metrics = trainer.evaluate()
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
# Write model card and (optionally) push to hub
kwargs = {
"tasks": "masked-auto-encoding",
"dataset": data_args.dataset_name,
"tags": ["masked-auto-encoding"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| CustomTrainingArguments |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/property17.py | {
"start": 262,
"end": 350
} | class ____(Protocol[T_co]):
@property
def prop(self) -> T_co: ...
@dataclass
| Proto |
python | miyuchina__mistletoe | test/test_span_token.py | {
"start": 6833,
"end": 7464
} | class ____(unittest.TestCase):
def test_parse_soft_break(self):
token, = span_token.tokenize_inner('\n')
self.assertIsInstance(token, span_token.LineBreak)
self.assertTrue(token.soft)
def test_parse_hard_break_with_double_blanks(self):
token, = span_token.tokenize_inner(' \n')
self.assertIsInstance(token, span_token.LineBreak)
self.assertFalse(token.soft)
def test_parse_hard_break_with_backslash(self):
_, token, = span_token.tokenize_inner(' \\\n')
self.assertIsInstance(token, span_token.LineBreak)
self.assertFalse(token.soft)
| TestLineBreak |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_types05.py | {
"start": 315,
"end": 1790
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("types05.xlsx")
self.ignore_files = [
"xl/calcChain.xml",
"[Content_Types].xml",
"xl/_rels/workbook.xml.rels",
]
def test_write_formula_default(self):
"""Test writing formulas with strings_to_formulas on."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.write(0, 0, "=1+1", None, 2)
worksheet.write_string(1, 0, "=1+1")
workbook.close()
self.assertExcelEqual()
def test_write_formula_implicit(self):
"""Test writing formulas with strings_to_formulas on."""
workbook = Workbook(self.got_filename, {"strings_to_formulas": True})
worksheet = workbook.add_worksheet()
worksheet.write(0, 0, "=1+1", None, 2)
worksheet.write_string(1, 0, "=1+1")
workbook.close()
self.assertExcelEqual()
def test_write_formula_explicit(self):
"""Test writing formulas with strings_to_formulas off."""
workbook = Workbook(self.got_filename, {"strings_to_formulas": False})
worksheet = workbook.add_worksheet()
worksheet.write_formula(0, 0, "=1+1", None, 2)
worksheet.write(1, 0, "=1+1")
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | networkx__networkx | networkx/linalg/tests/test_algebraic_connectivity.py | {
"start": 10235,
"end": 13735
} | class ____:
_graphs = (nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph)
@pytest.mark.parametrize("graph", _graphs)
def test_nullgraph(self, graph):
G = graph()
pytest.raises(nx.NetworkXError, nx.spectral_ordering, G)
@pytest.mark.parametrize("graph", _graphs)
def test_singleton(self, graph):
G = graph()
G.add_node("x")
assert nx.spectral_ordering(G) == ["x"]
G.add_edge("x", "x", weight=33)
G.add_edge("x", "x", weight=33)
assert nx.spectral_ordering(G) == ["x"]
def test_unrecognized_method(self):
G = nx.path_graph(4)
pytest.raises(nx.NetworkXError, nx.spectral_ordering, G, method="unknown")
@pytest.mark.parametrize("method", methods)
def test_three_nodes(self, method):
pytest.importorskip("scipy")
G = nx.Graph()
G.add_weighted_edges_from([(1, 2, 1), (1, 3, 2), (2, 3, 1)], weight="spam")
order = nx.spectral_ordering(G, weight="spam", method=method)
assert set(order) == set(G)
assert {1, 3} in (set(order[:-1]), set(order[1:]))
@pytest.mark.parametrize("method", methods)
def test_three_nodes_multigraph(self, method):
pytest.importorskip("scipy")
G = nx.MultiDiGraph()
G.add_weighted_edges_from([(1, 2, 1), (1, 3, 2), (2, 3, 1), (2, 3, 2)])
order = nx.spectral_ordering(G, method=method)
assert set(order) == set(G)
assert {2, 3} in (set(order[:-1]), set(order[1:]))
@pytest.mark.parametrize("method", methods)
def test_path(self, method):
pytest.importorskip("scipy")
path = list(range(10))
np.random.shuffle(path)
G = nx.Graph()
nx.add_path(G, path)
order = nx.spectral_ordering(G, method=method)
assert order in [path, list(reversed(path))]
@pytest.mark.parametrize("method", methods)
def test_seed_argument(self, method):
pytest.importorskip("scipy")
path = list(range(10))
np.random.shuffle(path)
G = nx.Graph()
nx.add_path(G, path)
order = nx.spectral_ordering(G, method=method, seed=1)
assert order in [path, list(reversed(path))]
@pytest.mark.parametrize("method", methods)
def test_disconnected(self, method):
pytest.importorskip("scipy")
G = nx.Graph()
nx.add_path(G, range(0, 10, 2))
nx.add_path(G, range(1, 10, 2))
order = nx.spectral_ordering(G, method=method)
assert set(order) == set(G)
seqs = [
list(range(0, 10, 2)),
list(range(8, -1, -2)),
list(range(1, 10, 2)),
list(range(9, -1, -2)),
]
assert order[:5] in seqs
assert order[5:] in seqs
@pytest.mark.parametrize(
("normalized", "expected_order"),
(
(False, [[1, 2, 0, 3, 4, 5, 6, 9, 7, 8], [8, 7, 9, 6, 5, 4, 3, 0, 2, 1]]),
(True, [[1, 2, 3, 0, 4, 5, 9, 6, 7, 8], [8, 7, 6, 9, 5, 4, 0, 3, 2, 1]]),
),
)
@pytest.mark.parametrize("method", methods)
def test_cycle(self, normalized, expected_order, method):
pytest.importorskip("scipy")
path = list(range(10))
G = nx.Graph()
nx.add_path(G, path, weight=5)
G.add_edge(path[-1], path[0], weight=1)
A = nx.laplacian_matrix(G).todense()
order = nx.spectral_ordering(G, normalized=normalized, method=method)
assert order in expected_order
| TestSpectralOrdering |
python | spack__spack | lib/spack/spack/vendor/jinja2/loaders.py | {
"start": 16442,
"end": 18769
} | class ____(BaseLoader):
"""A loader that is passed a dict of loaders where each loader is bound
to a prefix. The prefix is delimited from the template by a slash per
default, which can be changed by setting the `delimiter` argument to
something else::
loader = PrefixLoader({
'app1': PackageLoader('mypackage.app1'),
'app2': PackageLoader('mypackage.app2')
})
By loading ``'app1/index.html'`` the file from the app1 package is loaded,
by loading ``'app2/index.html'`` the file from the second.
"""
def __init__(
self, mapping: t.Mapping[str, BaseLoader], delimiter: str = "/"
) -> None:
self.mapping = mapping
self.delimiter = delimiter
def get_loader(self, template: str) -> t.Tuple[BaseLoader, str]:
try:
prefix, name = template.split(self.delimiter, 1)
loader = self.mapping[prefix]
except (ValueError, KeyError) as e:
raise TemplateNotFound(template) from e
return loader, name
def get_source(
self, environment: "Environment", template: str
) -> t.Tuple[str, t.Optional[str], t.Optional[t.Callable[[], bool]]]:
loader, name = self.get_loader(template)
try:
return loader.get_source(environment, name)
except TemplateNotFound as e:
# re-raise the exception with the correct filename here.
# (the one that includes the prefix)
raise TemplateNotFound(template) from e
@internalcode
def load(
self,
environment: "Environment",
name: str,
globals: t.Optional[t.MutableMapping[str, t.Any]] = None,
) -> "Template":
loader, local_name = self.get_loader(name)
try:
return loader.load(environment, local_name, globals)
except TemplateNotFound as e:
# re-raise the exception with the correct filename here.
# (the one that includes the prefix)
raise TemplateNotFound(name) from e
def list_templates(self) -> t.List[str]:
result = []
for prefix, loader in self.mapping.items():
for template in loader.list_templates():
result.append(prefix + self.delimiter + template)
return result
| PrefixLoader |
python | tiangolo__fastapi | fastapi/openapi/models.py | {
"start": 3053,
"end": 3259
} | class ____(BaseModelWithConfig):
name: Optional[str] = None
namespace: Optional[str] = None
prefix: Optional[str] = None
attribute: Optional[bool] = None
wrapped: Optional[bool] = None
| XML |
python | doocs__leetcode | solution/0700-0799/0723.Candy Crush/Solution.py | {
"start": 0,
"end": 1385
} | class ____:
def candyCrush(self, board: List[List[int]]) -> List[List[int]]:
m, n = len(board), len(board[0])
run = True
while run:
run = False
for i in range(m):
for j in range(2, n):
if board[i][j] and abs(board[i][j]) == abs(board[i][j - 1]) == abs(
board[i][j - 2]
):
run = True
board[i][j] = board[i][j - 1] = board[i][j - 2] = -abs(
board[i][j]
)
for j in range(n):
for i in range(2, m):
if board[i][j] and abs(board[i][j]) == abs(board[i - 1][j]) == abs(
board[i - 2][j]
):
run = True
board[i][j] = board[i - 1][j] = board[i - 2][j] = -abs(
board[i][j]
)
if run:
for j in range(n):
k = m - 1
for i in range(m - 1, -1, -1):
if board[i][j] > 0:
board[k][j] = board[i][j]
k -= 1
while k >= 0:
board[k][j] = 0
k -= 1
return board
| Solution |
python | django__django | tests/gis_tests/geoapp/test_indexes.py | {
"start": 224,
"end": 2804
} | class ____(TransactionTestCase):
available_apps = []
models = [City]
def get_indexes(self, table):
with connection.cursor() as cursor:
constraints = connection.introspection.get_constraints(cursor, table)
return {
name: constraint["columns"]
for name, constraint in constraints.items()
if constraint["index"]
}
def has_spatial_indexes(self, table):
if connection.ops.mysql:
with connection.cursor() as cursor:
return connection.introspection.supports_spatial_index(cursor, table)
elif connection.ops.oracle:
# Spatial indexes in Meta.indexes are not supported by the Oracle
# backend (see #31252).
return False
return True
def test_using_sql(self):
if not connection.ops.postgis:
self.skipTest("This is a PostGIS-specific test.")
index = Index(fields=["point"])
editor = connection.schema_editor()
self.assertIn(
"%s USING " % editor.quote_name(City._meta.db_table),
str(index.create_sql(City, editor)),
)
@isolate_apps("gis_tests.geoapp")
def test_namespaced_db_table(self):
if not connection.ops.postgis:
self.skipTest("PostGIS-specific test.")
class SchemaCity(models.Model):
point = models.PointField()
class Meta:
app_label = "geoapp"
db_table = 'django_schema"."geoapp_schema_city'
index = Index(fields=["point"])
editor = connection.schema_editor()
create_index_sql = str(index.create_sql(SchemaCity, editor))
self.assertIn(
"%s USING " % editor.quote_name(SchemaCity._meta.db_table),
create_index_sql,
)
self.assertIn(
'CREATE INDEX "geoapp_schema_city_point_9ed70651_id" ',
create_index_sql,
)
def test_index_name(self):
if not self.has_spatial_indexes(City._meta.db_table):
self.skipTest("Spatial indexes in Meta.indexes are not supported.")
index_name = "custom_point_index_name"
index = Index(fields=["point"], name=index_name)
with connection.schema_editor() as editor:
editor.add_index(City, index)
indexes = self.get_indexes(City._meta.db_table)
self.assertIn(index_name, indexes)
self.assertEqual(indexes[index_name], ["point"])
editor.remove_index(City, index)
| SchemaIndexesTests |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config_vectors.py | {
"start": 3308,
"end": 6218
} | class ____:
@staticmethod
def __hnsw(
*,
quantizer: Optional[_QuantizerConfigCreate] = None,
multivector: Optional[_MultiVectorConfigCreate] = None,
) -> _VectorIndexConfigHNSWCreate:
return _VectorIndexConfigHNSWCreate(
cleanupIntervalSeconds=None,
distance=None,
dynamicEfMin=None,
dynamicEfMax=None,
dynamicEfFactor=None,
efConstruction=None,
ef=None,
filterStrategy=None,
flatSearchCutoff=None,
maxConnections=None,
vectorCacheMaxObjects=None,
quantizer=quantizer,
multivector=multivector,
)
@staticmethod
def __flat(*, quantizer: Optional[_QuantizerConfigCreate]) -> _VectorIndexConfigFlatCreate:
return _VectorIndexConfigFlatCreate(
distance=None,
vectorCacheMaxObjects=None,
quantizer=quantizer,
multivector=None,
)
@staticmethod
def single(
vector_index_config: Optional[_VectorIndexConfigCreate],
quantizer: Optional[_QuantizerConfigCreate],
) -> Optional[_VectorIndexConfigCreate]:
if quantizer is not None:
if vector_index_config is None:
vector_index_config = _IndexWrappers.__hnsw(quantizer=quantizer)
else:
if isinstance(vector_index_config, _VectorIndexConfigDynamicCreate):
if vector_index_config.hnsw is None:
vector_index_config.hnsw = _IndexWrappers.__hnsw(quantizer=quantizer)
else:
vector_index_config.hnsw.quantizer = quantizer
if vector_index_config.flat is None:
vector_index_config.flat = _IndexWrappers.__flat(quantizer=quantizer)
else:
vector_index_config.flat.quantizer = quantizer
else:
vector_index_config.quantizer = quantizer
return vector_index_config
@staticmethod
def multi(
vector_index_config: Optional[_VectorIndexConfigCreate],
quantizer: Optional[_QuantizerConfigCreate],
multi_vector_config: Optional[_MultiVectorConfigCreate],
encoding: Optional[_MultiVectorEncodingConfigCreate],
) -> Optional[_VectorIndexConfigCreate]:
if multi_vector_config is None:
multi_vector_config = _MultiVectorConfigCreate(aggregation=None, encoding=None)
if encoding is not None:
multi_vector_config.encoding = encoding
if vector_index_config is None:
vector_index_config = _IndexWrappers.__hnsw(multivector=multi_vector_config)
else:
vector_index_config.multivector = multi_vector_config
return _IndexWrappers.single(vector_index_config, quantizer)
| _IndexWrappers |
python | conda__conda | conda/gateways/repodata/jlap/core.py | {
"start": 873,
"end": 4146
} | class ____(UserList):
@classmethod
def from_lines(cls, lines: Iterable[bytes], iv: bytes, pos=0, verify=True):
r"""
:param lines: iterator over input split by b'\n', with b'\n' removed
:param pos: initial position
:param iv: initialization vector (first line of .jlap stream, hex
decoded). Ignored if pos==0.
:param verify: assert last line equals computed checksum of previous
line. Useful for writing new .jlap files if False.
:raises ValueError: if trailing and computed checksums do not match
:return: list of (offset, line, checksum)
"""
# save initial iv in case there were no new lines
buffer: list[tuple[int, str, str]] = [(-1, iv.hex(), iv.hex())]
initial_pos = pos
for pos, line in line_and_pos(lines, pos=pos):
if pos == 0:
iv = bytes.fromhex(line.decode("utf-8"))
buffer = [(0, iv.hex(), iv.hex())]
else:
iv = keyed_hash(line, iv).digest()
buffer.append((pos, line.decode("utf-8"), iv.hex()))
log.debug("%d bytes read", pos - initial_pos) # maybe + length of last line
if verify:
if buffer[-1][1] != buffer[-2][-1]:
raise ValueError("checksum mismatch")
else:
log.info("Checksum OK")
return cls(buffer)
@classmethod
def from_path(cls, path: Path | str, verify=True):
# in binary mode, line separator is hardcoded as \n
with Path(path).open("rb") as p:
return cls.from_lines(
(line.rstrip(b"\n") for line in p), b"", verify=verify
)
def add(self, line: str):
"""
Add line to buffer, following checksum rules.
Buffer must not be empty.
(Remember to pop trailing checksum and possibly trailing metadata line, if
appending to a complete jlap file)
Less efficient than creating a new buffer from many lines and our last iv,
and extending.
:return: self
"""
if "\n" in line:
raise ValueError("\\n not allowed in line")
pos, last_line, iv = self[-1]
# include last line's utf-8 encoded length, plus 1 in pos?
pos += len(last_line.encode("utf-8")) + 1
self.extend(
JLAP.from_lines(
(line.encode("utf-8"),), bytes.fromhex(iv), pos, verify=False
)[1:]
)
return self
def terminate(self):
"""
Add trailing checksum to buffer.
:return: self
"""
_, _, iv = self[-1]
self.add(iv)
return self
def write(self, path: Path):
"""Write buffer to path."""
with Path(path).open("w", encoding="utf-8", newline="\n") as p:
return p.write("\n".join(b[1] for b in self))
@property
def body(self):
"""All lines except the first, and last two."""
return self[1:-2]
@property
def penultimate(self):
"""Next-to-last line. Should contain the footer."""
return self[-2]
@property
def last(self):
"""Last line. Should contain the trailing checksum."""
return self[-1]
| JLAP |
python | sympy__sympy | sympy/geometry/line.py | {
"start": 56585,
"end": 61448
} | class ____(LinearEntity2D, Line):
"""An infinite line in space 2D.
A line is declared with two distinct points or a point and slope
as defined using keyword `slope`.
Parameters
==========
p1 : Point
pt : Point
slope : SymPy expression
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Line, Segment, Point
>>> L = Line(Point(2,3), Point(3,5))
>>> L
Line2D(Point2D(2, 3), Point2D(3, 5))
>>> L.points
(Point2D(2, 3), Point2D(3, 5))
>>> L.equation()
-2*x + y + 1
>>> L.coefficients
(-2, 1, 1)
Instantiate with keyword ``slope``:
>>> Line(Point(0, 0), slope=0)
Line2D(Point2D(0, 0), Point2D(1, 0))
Instantiate with another linear object
>>> s = Segment((0, 0), (0, 1))
>>> Line(s).equation()
x
"""
def __new__(cls, p1, pt=None, slope=None, **kwargs):
if isinstance(p1, LinearEntity):
if pt is not None:
raise ValueError('When p1 is a LinearEntity, pt should be None')
p1, pt = Point._normalize_dimension(*p1.args, dim=2)
else:
p1 = Point(p1, dim=2)
if pt is not None and slope is None:
try:
p2 = Point(pt, dim=2)
except (NotImplementedError, TypeError, ValueError):
raise ValueError(filldedent('''
The 2nd argument was not a valid Point.
If it was a slope, enter it with keyword "slope".
'''))
elif slope is not None and pt is None:
slope = sympify(slope)
if slope.is_finite is False:
# when infinite slope, don't change x
dx = 0
dy = 1
else:
# go over 1 up slope
dx = 1
dy = slope
# XXX avoiding simplification by adding to coords directly
p2 = Point(p1.x + dx, p1.y + dy, evaluate=False)
else:
raise ValueError('A 2nd Point or keyword "slope" must be used.')
return LinearEntity2D.__new__(cls, p1, p2, **kwargs)
def _svg(self, scale_factor=1., fill_color="#66cc99"):
"""Returns SVG path element for the LinearEntity.
Parameters
==========
scale_factor : float
Multiplication factor for the SVG stroke-width. Default is 1.
fill_color : str, optional
Hex string for fill color. Default is "#66cc99".
"""
verts = (N(self.p1), N(self.p2))
coords = ["{},{}".format(p.x, p.y) for p in verts]
path = "M {} L {}".format(coords[0], " L ".join(coords[1:]))
return (
'<path fill-rule="evenodd" fill="{2}" stroke="#555555" '
'stroke-width="{0}" opacity="0.6" d="{1}" '
'marker-start="url(#markerReverseArrow)" marker-end="url(#markerArrow)"/>'
).format(2.*scale_factor, path, fill_color)
@property
def coefficients(self):
"""The coefficients (`a`, `b`, `c`) for `ax + by + c = 0`.
See Also
========
sympy.geometry.line.Line2D.equation
Examples
========
>>> from sympy import Point, Line
>>> from sympy.abc import x, y
>>> p1, p2 = Point(0, 0), Point(5, 3)
>>> l = Line(p1, p2)
>>> l.coefficients
(-3, 5, 0)
>>> p3 = Point(x, y)
>>> l2 = Line(p1, p3)
>>> l2.coefficients
(-y, x, 0)
"""
p1, p2 = self.points
if p1.x == p2.x:
return (S.One, S.Zero, -p1.x)
elif p1.y == p2.y:
return (S.Zero, S.One, -p1.y)
return tuple([simplify(i) for i in
(self.p1.y - self.p2.y,
self.p2.x - self.p1.x,
self.p1.x*self.p2.y - self.p1.y*self.p2.x)])
def equation(self, x='x', y='y'):
"""The equation of the line: ax + by + c.
Parameters
==========
x : str, optional
The name to use for the x-axis, default value is 'x'.
y : str, optional
The name to use for the y-axis, default value is 'y'.
Returns
=======
equation : SymPy expression
See Also
========
sympy.geometry.line.Line2D.coefficients
Examples
========
>>> from sympy import Point, Line
>>> p1, p2 = Point(1, 0), Point(5, 3)
>>> l1 = Line(p1, p2)
>>> l1.equation()
-3*x + 4*y + 3
"""
x = _symbol(x, real=True)
y = _symbol(y, real=True)
p1, p2 = self.points
if p1.x == p2.x:
return x - p1.x
elif p1.y == p2.y:
return y - p1.y
a, b, c = self.coefficients
return a*x + b*y + c
| Line2D |
python | bokeh__bokeh | src/bokeh/models/misc/group_by.py | {
"start": 1526,
"end": 1749
} | class ____(Model):
""" Base class for grouping behaviors. """
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
| GroupBy |
python | celery__celery | t/unit/utils/test_graph.py | {
"start": 122,
"end": 1935
} | class ____:
def graph1(self):
res_a = self.app.AsyncResult('A')
res_b = self.app.AsyncResult('B')
res_c = self.app.GroupResult('C', [res_a])
res_d = self.app.GroupResult('D', [res_c, res_b])
node_a = (res_a, [])
node_b = (res_b, [])
node_c = (res_c, [res_a])
node_d = (res_d, [res_c, res_b])
return DependencyGraph([
node_a,
node_b,
node_c,
node_d,
])
def test_repr(self):
assert repr(self.graph1())
def test_topsort(self):
order = self.graph1().topsort()
# C must start before D
assert order.index('C') < order.index('D')
# and B must start before D
assert order.index('B') < order.index('D')
# and A must start before C
assert order.index('A') < order.index('C')
def test_edges(self):
edges = self.graph1().edges()
assert sorted(edges, key=str) == ['C', 'D']
def test_connect(self):
x, y = self.graph1(), self.graph1()
x.connect(y)
def test_valency_of_when_missing(self):
x = self.graph1()
assert x.valency_of('foobarbaz') == 0
def test_format(self):
x = self.graph1()
x.formatter = Mock()
obj = Mock()
assert x.format(obj)
x.formatter.assert_called_with(obj)
x.formatter = None
assert x.format(obj) is obj
def test_items(self):
assert dict(self.graph1().items()) == {
'A': [], 'B': [], 'C': ['A'], 'D': ['C', 'B'],
}
def test_repr_node(self):
x = self.graph1()
assert x.repr_node('fasdswewqewq')
def test_to_dot(self):
s = WhateverIO()
self.graph1().to_dot(s)
assert s.getvalue()
| test_DependencyGraph |
python | automl__auto-sklearn | autosklearn/metalearning/metafeatures/metafeatures.py | {
"start": 37017,
"end": 38511
} | class ____(HelperFunction):
def _calculate(self, X, y, logger, feat_type):
import sklearn.decomposition
pca = sklearn.decomposition.PCA(copy=True)
rs = np.random.RandomState(42)
indices = np.arange(X.shape[0])
for i in range(10):
try:
rs.shuffle(indices)
pca.fit(
X.iloc[indices] if hasattr(X, "iloc") else X[indices],
)
return pca
except LinAlgError:
pass
self.logger.warning("Failed to compute a Principle Component Analysis")
return None
def _calculate_sparse(self, X, y, logger, feat_type):
import sklearn.decomposition
rs = np.random.RandomState(42)
indices = np.arange(X.shape[0])
# This is expensive, but necessary with scikit-learn 0.15
Xt = X.astype(np.float64)
for i in range(10):
try:
rs.shuffle(indices)
truncated_svd = sklearn.decomposition.TruncatedSVD(
n_components=X.shape[1] - 1, random_state=i, algorithm="randomized"
)
truncated_svd.fit(Xt[indices])
return truncated_svd
except LinAlgError:
pass
self.logger.warning("Failed to compute a Truncated SVD")
return None
# Maybe define some more...
@metafeatures.define("PCAFractionOfComponentsFor95PercentVariance", dependency="PCA")
| PCA |
python | numpy__numpy | numpy/_core/tests/test_multiarray.py | {
"start": 352403,
"end": 357610
} | class ____:
class Foo:
def __init__(self, value):
self.value = value
self.iface = {'typestr': 'f8'}
def __float__(self):
return float(self.value)
@property
def __array_interface__(self):
return self.iface
f = Foo(0.5)
@pytest.mark.parametrize('val, iface, expected', [
(f, {}, 0.5),
([f], {}, [0.5]),
([f, f], {}, [0.5, 0.5]),
(f, {'shape': ()}, 0.5),
(f, {'shape': None}, TypeError),
(f, {'shape': (1, 1)}, [[0.5]]),
(f, {'shape': (2,)}, ValueError),
(f, {'strides': ()}, 0.5),
(f, {'strides': (2,)}, ValueError),
(f, {'strides': 16}, TypeError),
# This fails due to going into the buffer protocol path
(f, {'data': None, 'shape': ()}, TypeError),
])
@pytest.mark.thread_unsafe(reason="test result depends on the reference count of a global object")
def test_scalar_interface(self, val, iface, expected):
# Test scalar coercion within the array interface
self.f.iface = {'typestr': 'f8'}
self.f.iface.update(iface)
if HAS_REFCOUNT:
pre_cnt = sys.getrefcount(np.dtype('f8'))
if isinstance(expected, type):
assert_raises(expected, np.array, val)
else:
result = np.array(val)
assert_equal(np.array(val), expected)
assert result.dtype == 'f8'
del result
if HAS_REFCOUNT:
post_cnt = sys.getrefcount(np.dtype('f8'))
assert_equal(pre_cnt, post_cnt)
def test_interface_empty_shape():
class ArrayLike:
array = np.array(1)
__array_interface__ = array.__array_interface__
assert_equal(np.array(ArrayLike()), 1)
def test_interface_no_shape_error():
class ArrayLike:
__array_interface__ = {"data": None, "typestr": "f8"}
with pytest.raises(ValueError, match="Missing __array_interface__ shape"):
np.array(ArrayLike())
@pytest.mark.parametrize("iface", [
{"typestr": "f8", "shape": (0, 1)},
{"typestr": "(0,)f8,", "shape": (1, 3)},
])
def test_interface_nullptr(iface):
iface.update({"data": (0, True)})
class ArrayLike:
__array_interface__ = iface
arr = np.asarray(ArrayLike())
# Note, we currently set the base anyway, but we do an allocation
# (because NumPy doesn't like NULL data pointers everywhere).
assert arr.shape == iface["shape"]
assert arr.dtype == np.dtype(iface["typestr"])
assert arr.base is not None
assert arr.flags.owndata
def test_interface_nullptr_size_check():
# Note that prior to NumPy 2.4 the below took the scalar path (if shape had size 1)
class ArrayLike:
__array_interface__ = {"data": (0, True), "typestr": "f8", "shape": ()}
with pytest.raises(ValueError, match="data is NULL but array contains data"):
np.array(ArrayLike())
def test_array_interface_itemsize():
# See gh-6361
my_dtype = np.dtype({'names': ['A', 'B'], 'formats': ['f4', 'f4'],
'offsets': [0, 8], 'itemsize': 16})
a = np.ones(10, dtype=my_dtype)
descr_t = np.dtype(a.__array_interface__['descr'])
typestr_t = np.dtype(a.__array_interface__['typestr'])
assert_equal(descr_t.itemsize, typestr_t.itemsize)
def test_array_interface_empty_shape():
# See gh-7994
arr = np.array([1, 2, 3])
interface1 = dict(arr.__array_interface__)
interface1['shape'] = ()
class DummyArray1:
__array_interface__ = interface1
# NOTE: Because Py2 str/Py3 bytes supports the buffer interface, setting
# the interface data to bytes would invoke the bug this tests for, that
# __array_interface__ with shape=() is not allowed if the data is an object
# exposing the buffer interface
interface2 = dict(interface1)
interface2['data'] = arr[0].tobytes()
class DummyArray2:
__array_interface__ = interface2
arr1 = np.asarray(DummyArray1())
arr2 = np.asarray(DummyArray2())
arr3 = arr[:1].reshape(())
assert_equal(arr1, arr2)
assert_equal(arr1, arr3)
def test_array_interface_offset():
arr = np.array([1, 2, 3], dtype='int32')
interface = dict(arr.__array_interface__)
interface['data'] = memoryview(arr)
interface['shape'] = (2,)
interface['offset'] = 4
class DummyArray:
__array_interface__ = interface
arr1 = np.asarray(DummyArray())
assert_equal(arr1, arr[1:])
def test_array_interface_unicode_typestr():
arr = np.array([1, 2, 3], dtype='int32')
interface = dict(arr.__array_interface__)
interface['typestr'] = '\N{check mark}'
class DummyArray:
__array_interface__ = interface
# should not be UnicodeEncodeError
with pytest.raises(TypeError):
np.asarray(DummyArray())
def test_flat_element_deletion():
it = np.ones(3).flat
try:
del it[1]
del it[1:2]
except TypeError:
pass
except Exception:
raise AssertionError
def test_scalar_element_deletion():
a = np.zeros(2, dtype=[('x', 'int'), ('y', 'int')])
assert_raises(ValueError, a[0].__delitem__, 'x')
| TestArrayInterface |
python | tiangolo__fastapi | docs_src/path_operation_configuration/tutorial001_py310.py | {
"start": 86,
"end": 363
} | class ____(BaseModel):
name: str
description: str | None = None
price: float
tax: float | None = None
tags: set[str] = set()
@app.post("/items/", response_model=Item, status_code=status.HTTP_201_CREATED)
async def create_item(item: Item):
return item
| Item |
python | redis__redis-py | tests/test_pubsub.py | {
"start": 2424,
"end": 17016
} | class ____:
def _test_subscribe_unsubscribe(
self, p, sub_type, unsub_type, sub_func, unsub_func, keys
):
for key in keys:
assert sub_func(key) is None
# should be a message for each channel/pattern we just subscribed to
for i, key in enumerate(keys):
assert wait_for_message(p) == make_message(sub_type, key, i + 1)
for key in keys:
assert unsub_func(key) is None
# should be a message for each channel/pattern we just unsubscribed
# from
for i, key in enumerate(keys):
i = len(keys) - 1 - i
assert wait_for_message(p) == make_message(unsub_type, key, i)
def test_channel_subscribe_unsubscribe(self, r):
kwargs = make_subscribe_test_data(r.pubsub(), "channel")
self._test_subscribe_unsubscribe(**kwargs)
def test_pattern_subscribe_unsubscribe(self, r):
kwargs = make_subscribe_test_data(r.pubsub(), "pattern")
self._test_subscribe_unsubscribe(**kwargs)
@pytest.mark.onlynoncluster
@skip_if_server_version_lt("7.0.0")
def test_shard_channel_subscribe_unsubscribe(self, r):
kwargs = make_subscribe_test_data(r.pubsub(), "shard_channel")
self._test_subscribe_unsubscribe(**kwargs)
@pytest.mark.onlycluster
@skip_if_server_version_lt("7.0.0")
def test_shard_channel_subscribe_unsubscribe_cluster(self, r):
node_channels = defaultdict(int)
p = r.pubsub()
keys = {
"foo": r.get_node_from_key("foo"),
"bar": r.get_node_from_key("bar"),
"uni" + chr(4456) + "code": r.get_node_from_key("uni" + chr(4456) + "code"),
}
for key, node in keys.items():
assert p.ssubscribe(key) is None
# should be a message for each shard_channel we just subscribed to
for key, node in keys.items():
node_channels[node.name] += 1
assert wait_for_message(p, node=node) == make_message(
"ssubscribe", key, node_channels[node.name]
)
for key in keys.keys():
assert p.sunsubscribe(key) is None
# should be a message for each shard_channel we just unsubscribed
# from
for key, node in keys.items():
node_channels[node.name] -= 1
assert wait_for_message(p, node=node) == make_message(
"sunsubscribe", key, node_channels[node.name]
)
def _test_resubscribe_on_reconnection(
self, p, sub_type, unsub_type, sub_func, unsub_func, keys
):
for key in keys:
assert sub_func(key) is None
# should be a message for each channel/pattern we just subscribed to
for i, key in enumerate(keys):
assert wait_for_message(p) == make_message(sub_type, key, i + 1)
# manually disconnect
p.connection.disconnect()
# calling get_message again reconnects and resubscribes
# note, we may not re-subscribe to channels in exactly the same order
# so we have to do some extra checks to make sure we got them all
messages = []
for i in range(len(keys)):
messages.append(wait_for_message(p))
unique_channels = set()
assert len(messages) == len(keys)
for i, message in enumerate(messages):
assert message["type"] == sub_type
assert message["data"] == i + 1
assert isinstance(message["channel"], bytes)
channel = message["channel"].decode("utf-8")
unique_channels.add(channel)
assert len(unique_channels) == len(keys)
for channel in unique_channels:
assert channel in keys
def test_resubscribe_to_channels_on_reconnection(self, r):
kwargs = make_subscribe_test_data(r.pubsub(), "channel")
self._test_resubscribe_on_reconnection(**kwargs)
@pytest.mark.onlynoncluster
def test_resubscribe_to_patterns_on_reconnection(self, r):
kwargs = make_subscribe_test_data(r.pubsub(), "pattern")
self._test_resubscribe_on_reconnection(**kwargs)
@pytest.mark.onlynoncluster
@skip_if_server_version_lt("7.0.0")
def test_resubscribe_to_shard_channels_on_reconnection(self, r):
kwargs = make_subscribe_test_data(r.pubsub(), "shard_channel")
self._test_resubscribe_on_reconnection(**kwargs)
def _test_subscribed_property(
self, p, sub_type, unsub_type, sub_func, unsub_func, keys
):
assert p.subscribed is False
sub_func(keys[0])
# we're now subscribed even though we haven't processed the
# reply from the server just yet
assert p.subscribed is True
assert wait_for_message(p) == make_message(sub_type, keys[0], 1)
# we're still subscribed
assert p.subscribed is True
# unsubscribe from all channels
unsub_func()
# we're still technically subscribed until we process the
# response messages from the server
assert p.subscribed is True
assert wait_for_message(p) == make_message(unsub_type, keys[0], 0)
# now we're no longer subscribed as no more messages can be delivered
# to any channels we were listening to
assert p.subscribed is False
# subscribing again flips the flag back
sub_func(keys[0])
assert p.subscribed is True
assert wait_for_message(p) == make_message(sub_type, keys[0], 1)
# unsubscribe again
unsub_func()
assert p.subscribed is True
# subscribe to another channel before reading the unsubscribe response
sub_func(keys[1])
assert p.subscribed is True
# read the unsubscribe for key1
assert wait_for_message(p) == make_message(unsub_type, keys[0], 0)
# we're still subscribed to key2, so subscribed should still be True
assert p.subscribed is True
# read the key2 subscribe message
assert wait_for_message(p) == make_message(sub_type, keys[1], 1)
unsub_func()
# haven't read the message yet, so we're still subscribed
assert p.subscribed is True
assert wait_for_message(p) == make_message(unsub_type, keys[1], 0)
# now we're finally unsubscribed
assert p.subscribed is False
def test_subscribe_property_with_channels(self, r):
kwargs = make_subscribe_test_data(r.pubsub(), "channel")
self._test_subscribed_property(**kwargs)
@pytest.mark.onlynoncluster
def test_subscribe_property_with_patterns(self, r):
kwargs = make_subscribe_test_data(r.pubsub(), "pattern")
self._test_subscribed_property(**kwargs)
@pytest.mark.onlynoncluster
@skip_if_server_version_lt("7.0.0")
def test_subscribe_property_with_shard_channels(self, r):
kwargs = make_subscribe_test_data(r.pubsub(), "shard_channel")
self._test_subscribed_property(**kwargs)
@pytest.mark.onlycluster
@skip_if_server_version_lt("7.0.0")
def test_subscribe_property_with_shard_channels_cluster(self, r):
p = r.pubsub()
keys = ["foo", "bar", "uni" + chr(4456) + "code"]
nodes = [r.get_node_from_key(key) for key in keys]
assert p.subscribed is False
p.ssubscribe(keys[0])
# we're now subscribed even though we haven't processed the
# reply from the server just yet
assert p.subscribed is True
assert wait_for_message(p, node=nodes[0]) == make_message(
"ssubscribe", keys[0], 1
)
# we're still subscribed
assert p.subscribed is True
# unsubscribe from all shard_channels
p.sunsubscribe()
# we're still technically subscribed until we process the
# response messages from the server
assert p.subscribed is True
assert wait_for_message(p, node=nodes[0]) == make_message(
"sunsubscribe", keys[0], 0
)
# now we're no longer subscribed as no more messages can be delivered
# to any channels we were listening to
assert p.subscribed is False
# subscribing again flips the flag back
p.ssubscribe(keys[0])
assert p.subscribed is True
assert wait_for_message(p, node=nodes[0]) == make_message(
"ssubscribe", keys[0], 1
)
# unsubscribe again
p.sunsubscribe()
assert p.subscribed is True
# subscribe to another shard_channel before reading the unsubscribe response
p.ssubscribe(keys[1])
assert p.subscribed is True
# read the unsubscribe for key1
assert wait_for_message(p, node=nodes[0]) == make_message(
"sunsubscribe", keys[0], 0
)
# we're still subscribed to key2, so subscribed should still be True
assert p.subscribed is True
# read the key2 subscribe message
assert wait_for_message(p, node=nodes[1]) == make_message(
"ssubscribe", keys[1], 1
)
p.sunsubscribe()
# haven't read the message yet, so we're still subscribed
assert p.subscribed is True
assert wait_for_message(p, node=nodes[1]) == make_message(
"sunsubscribe", keys[1], 0
)
# now we're finally unsubscribed
assert p.subscribed is False
@skip_if_server_version_lt("7.0.0")
def test_ignore_all_subscribe_messages(self, r):
p = r.pubsub(ignore_subscribe_messages=True)
checks = (
(p.subscribe, "foo", p.get_message),
(p.unsubscribe, "foo", p.get_message),
(p.psubscribe, "f*", p.get_message),
(p.punsubscribe, "f*", p.get_message),
(p.ssubscribe, "foo", p.get_sharded_message),
(p.sunsubscribe, "foo", p.get_sharded_message),
)
assert p.subscribed is False
for func, channel, get_func in checks:
assert func(channel) is None
assert p.subscribed is True
assert wait_for_message(p, func=get_func) is None
assert p.subscribed is False
@skip_if_server_version_lt("7.0.0")
def test_ignore_individual_subscribe_messages(self, r):
p = r.pubsub()
checks = (
(p.subscribe, "foo", p.get_message),
(p.unsubscribe, "foo", p.get_message),
(p.psubscribe, "f*", p.get_message),
(p.punsubscribe, "f*", p.get_message),
(p.ssubscribe, "foo", p.get_sharded_message),
(p.sunsubscribe, "foo", p.get_sharded_message),
)
assert p.subscribed is False
for func, channel, get_func in checks:
assert func(channel) is None
assert p.subscribed is True
message = wait_for_message(p, ignore_subscribe_messages=True, func=get_func)
assert message is None
assert p.subscribed is False
def test_sub_unsub_resub_channels(self, r):
kwargs = make_subscribe_test_data(r.pubsub(), "channel")
self._test_sub_unsub_resub(**kwargs)
@pytest.mark.onlynoncluster
def test_sub_unsub_resub_patterns(self, r):
kwargs = make_subscribe_test_data(r.pubsub(), "pattern")
self._test_sub_unsub_resub(**kwargs)
@pytest.mark.onlynoncluster
@skip_if_server_version_lt("7.0.0")
def test_sub_unsub_resub_shard_channels(self, r):
kwargs = make_subscribe_test_data(r.pubsub(), "shard_channel")
self._test_sub_unsub_resub(**kwargs)
def _test_sub_unsub_resub(
self, p, sub_type, unsub_type, sub_func, unsub_func, keys
):
# https://github.com/andymccurdy/redis-py/issues/764
key = keys[0]
sub_func(key)
unsub_func(key)
sub_func(key)
assert p.subscribed is True
assert wait_for_message(p) == make_message(sub_type, key, 1)
assert wait_for_message(p) == make_message(unsub_type, key, 0)
assert wait_for_message(p) == make_message(sub_type, key, 1)
assert p.subscribed is True
@pytest.mark.onlycluster
@skip_if_server_version_lt("7.0.0")
def test_sub_unsub_resub_shard_channels_cluster(self, r):
p = r.pubsub()
key = "foo"
p.ssubscribe(key)
p.sunsubscribe(key)
p.ssubscribe(key)
assert p.subscribed is True
assert wait_for_message(p, func=p.get_sharded_message) == make_message(
"ssubscribe", key, 1
)
assert wait_for_message(p, func=p.get_sharded_message) == make_message(
"sunsubscribe", key, 0
)
assert wait_for_message(p, func=p.get_sharded_message) == make_message(
"ssubscribe", key, 1
)
assert p.subscribed is True
def test_sub_unsub_all_resub_channels(self, r):
kwargs = make_subscribe_test_data(r.pubsub(), "channel")
self._test_sub_unsub_all_resub(**kwargs)
def test_sub_unsub_all_resub_patterns(self, r):
kwargs = make_subscribe_test_data(r.pubsub(), "pattern")
self._test_sub_unsub_all_resub(**kwargs)
@pytest.mark.onlynoncluster
@skip_if_server_version_lt("7.0.0")
def test_sub_unsub_all_resub_shard_channels(self, r):
kwargs = make_subscribe_test_data(r.pubsub(), "shard_channel")
self._test_sub_unsub_all_resub(**kwargs)
def _test_sub_unsub_all_resub(
self, p, sub_type, unsub_type, sub_func, unsub_func, keys
):
# https://github.com/andymccurdy/redis-py/issues/764
key = keys[0]
sub_func(key)
unsub_func()
sub_func(key)
assert p.subscribed is True
assert wait_for_message(p) == make_message(sub_type, key, 1)
assert wait_for_message(p) == make_message(unsub_type, key, 0)
assert wait_for_message(p) == make_message(sub_type, key, 1)
assert p.subscribed is True
@pytest.mark.onlycluster
@skip_if_server_version_lt("7.0.0")
def test_sub_unsub_all_resub_shard_channels_cluster(self, r):
p = r.pubsub()
key = "foo"
p.ssubscribe(key)
p.sunsubscribe()
p.ssubscribe(key)
assert p.subscribed is True
assert wait_for_message(p, func=p.get_sharded_message) == make_message(
"ssubscribe", key, 1
)
assert wait_for_message(p, func=p.get_sharded_message) == make_message(
"sunsubscribe", key, 0
)
assert wait_for_message(p, func=p.get_sharded_message) == make_message(
"ssubscribe", key, 1
)
assert p.subscribed is True
| TestPubSubSubscribeUnsubscribe |
python | django-extensions__django-extensions | django_extensions/management/commands/find_template.py | {
"start": 186,
"end": 671
} | class ____(LabelCommand):
help = "Finds the location of the given template by resolving its path"
args = "[template_path]"
label = "template path"
@signalcommand
def handle_label(self, template_path, **options):
try:
template = loader.get_template(template_path).template
except TemplateDoesNotExist:
sys.stderr.write("No template found\n")
else:
sys.stdout.write(self.style.SUCCESS((template.name)))
| Command |
python | walkccc__LeetCode | solutions/692. Top K Frequent Words/692.py | {
"start": 0,
"end": 367
} | class ____:
def topKFrequent(self, words: list[str], k: int) -> list[str]:
ans = []
bucket = [[] for _ in range(len(words) + 1)]
for word, freq in collections.Counter(words).items():
bucket[freq].append(word)
for b in reversed(bucket):
for word in sorted(b):
ans.append(word)
if len(ans) == k:
return ans
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 56257,
"end": 56835
} | class ____(sgqlc.types.Enum):
"""The possible types of patch statuses.
Enumeration Choices:
* `ADDED`: The file was added. Git status 'A'.
* `CHANGED`: The file's type was changed. Git status 'T'.
* `COPIED`: The file was copied. Git status 'C'.
* `DELETED`: The file was deleted. Git status 'D'.
* `MODIFIED`: The file's contents were changed. Git status 'M'.
* `RENAMED`: The file was renamed. Git status 'R'.
"""
__schema__ = github_schema
__choices__ = ("ADDED", "CHANGED", "COPIED", "DELETED", "MODIFIED", "RENAMED")
| PatchStatus |
python | django__django | tests/model_fields/storage.py | {
"start": 69,
"end": 233
} | class ____(FileSystemStorage):
def open(self, *args, **kwargs):
raise AssertionError("This storage class does not support reading.")
| NoReadFileSystemStorage |
python | mahmoud__glom | glom/core.py | {
"start": 65059,
"end": 65197
} | class ____(type):
def __instancecheck__(cls, C):
return hasattr(C, "__dict__") and hasattr(C.__dict__, "keys")
| _ObjStyleKeysMeta |
python | dask__dask | dask/dataframe/utils.py | {
"start": 23702,
"end": 25800
} | class ____(NotImplementedError, AttributeError):
"""NotImplementedError and AttributeError"""
def meta_frame_constructor(like):
"""Return a serial DataFrame constructor
Parameters
----------
like :
Any series-like, Index-like or dataframe-like object.
"""
if is_dask_collection(like):
try:
like = like._meta
except AttributeError:
raise TypeError(f"{type(like)} not supported by meta_frame_constructor")
if is_dataframe_like(like):
return like._constructor
elif is_series_like(like):
return like._constructor_expanddim
elif is_index_like(like):
return like.to_frame()._constructor
else:
raise TypeError(f"{type(like)} not supported by meta_frame_constructor")
def meta_series_constructor(like):
"""Return a serial Series constructor
Parameters
----------
like :
Any series-like, Index-like or dataframe-like object.
"""
if is_dask_collection(like):
try:
like = like._meta
except AttributeError:
raise TypeError(f"{type(like)} not supported by meta_series_constructor")
if is_dataframe_like(like):
return like._constructor_sliced
elif is_series_like(like):
return like._constructor
elif is_index_like(like):
return like.to_frame()._constructor_sliced
else:
raise TypeError(f"{type(like)} not supported by meta_series_constructor")
def get_string_dtype():
"""Depending on config setting, we might convert objects to pyarrow strings"""
return pd.StringDtype("pyarrow") if pyarrow_strings_enabled() else object
def pyarrow_strings_enabled() -> bool:
"""Config setting to convert objects to pyarrow strings"""
convert_string = dask.config.get("dataframe.convert-string")
if convert_string is None:
convert_string = True
return convert_string
def get_numeric_only_kwargs(numeric_only: bool | NoDefault) -> dict:
return {} if numeric_only is no_default else {"numeric_only": numeric_only}
| AttributeNotImplementedError |
python | google__flatbuffers | tests/MyGame/Example/ArrayTable.py | {
"start": 1989,
"end": 3132
} | class ____(object):
# ArrayTableT
def __init__(
self,
a = None,
):
self.a = a # type: Optional[MyGame.Example.ArrayStruct.ArrayStructT]
@classmethod
def InitFromBuf(cls, buf, pos):
arrayTable = ArrayTable()
arrayTable.Init(buf, pos)
return cls.InitFromObj(arrayTable)
@classmethod
def InitFromPackedBuf(cls, buf, pos=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
return cls.InitFromBuf(buf, pos+n)
@classmethod
def InitFromObj(cls, arrayTable):
x = ArrayTableT()
x._UnPack(arrayTable)
return x
# ArrayTableT
def _UnPack(self, arrayTable):
if arrayTable is None:
return
if arrayTable.A() is not None:
self.a = MyGame.Example.ArrayStruct.ArrayStructT.InitFromObj(arrayTable.A())
# ArrayTableT
def Pack(self, builder):
ArrayTableStart(builder)
if self.a is not None:
a = self.a.Pack(builder)
ArrayTableAddA(builder, a)
arrayTable = ArrayTableEnd(builder)
return arrayTable
| ArrayTableT |
python | pyinstaller__pyinstaller | PyInstaller/loader/pyimod02_importers.py | {
"start": 17392,
"end": 26184
} | class ____:
"""
PyInstaller's frozen loader for modules in the PYZ archive, which are discovered by PyiFrozenFinder.
Since this loader is instantiated only from PyiFrozenFinder and since each loader instance is tied to a specific
module, the fact that the loader was instantiated serves as the proof that the module exists in the PYZ archive.
Hence, we can avoid any additional validation in the implementation of the loader's methods.
"""
def __init__(self, name, pyz_archive, pyz_entry_name, is_package):
# Store the reference to PYZ archive (for code object retrieval), as well as full PYZ entry name
# and typecode, all of which are passed from the PyiFrozenFinder.
self._pyz_archive = pyz_archive
self._pyz_entry_name = pyz_entry_name
self._is_package = is_package
# Compute the module file path, as if module was located on filesystem.
#
# Rather than returning path to the .pyc file, return the path to .py file - which might actually exist, if it
# was explicitly collected into the frozen application). This improves compliance with
# https://docs.python.org/3/library/importlib.html#importlib.abc.ExecutionLoader.get_filename
# as well as general compatibility with 3rd party code that blindly assumes that module's file path points to
# the source .py file.
#
# NOTE: since we are using sys._MEIPASS as prefix, we need to construct path from full PYZ entry name
# (so that a module with `name`=`jaraco.text` and `pyz_entry_name`=`setuptools._vendor.jaraco.text`
# ends up with path set to `sys._MEIPASS/setuptools/_vendor/jaraco/text/__init__.pyc` instead of
# `sys._MEIPASS/jaraco/text/__init__.pyc`).
if is_package:
module_file = os.path.join(sys._MEIPASS, pyz_entry_name.replace('.', os.path.sep), '__init__.py')
else:
module_file = os.path.join(sys._MEIPASS, pyz_entry_name.replace('.', os.path.sep) + '.py')
# These properties are defined as part of importlib.abc.FileLoader. They are used by our implementation
# (e.g., module name validation, get_filename(), get_source(), get_resource_reader()), and might also be used
# by 3rd party code that naively expects to be dealing with a FileLoader instance.
self.name = name # The name of the module the loader can handle.
self.path = module_file # Path to the file of the module
#-- Core PEP451 loader functionality as defined by importlib.abc.Loader
# https://docs.python.org/3/library/importlib.html#importlib.abc.Loader
def create_module(self, spec):
"""
A method that returns the module object to use when importing a module. This method may return None, indicating
that default module creation semantics should take place.
https://docs.python.org/3/library/importlib.html#importlib.abc.Loader.create_module
"""
return None
def exec_module(self, module):
"""
A method that executes the module in its own namespace when a module is imported or reloaded. The module
should already be initialized when exec_module() is called. When this method exists, create_module()
must be defined.
https://docs.python.org/3/library/importlib.html#importlib.abc.Loader.exec_module
"""
spec = module.__spec__
bytecode = self.get_code(spec.name) # NOTE: get_code verifies that `spec.name` matches `self.name`!
if bytecode is None:
raise RuntimeError(f"Failed to retrieve bytecode for {spec.name!r}!")
# Set by the import machinery
assert hasattr(module, '__file__')
# If `submodule_search_locations` is not None, this is a package; set __path__.
if spec.submodule_search_locations is not None:
module.__path__ = spec.submodule_search_locations
exec(bytecode, module.__dict__)
# The following method is part of legacy PEP302 loader interface. It has been deprecated since python 3.4, and
# slated for removal in python 3.12, although that has not happened yet. Provide compatibility shim to accommodate
# code that might still be using it.
if True:
@_check_name
def load_module(self, fullname):
"""
A legacy method for loading a module. If the module cannot be loaded, ImportError is raised, otherwise the
loaded module is returned.
Deprecated since python 3.4, slated for removal in 3.12 (but still present in python's own FileLoader in
both v3.12.4 and v3.13.0rc1).
"""
# Based on:
# https://github.com/python/cpython/blob/v3.11.9/Lib/importlib/_bootstrap_external.py#L942-L945
import importlib._bootstrap as _bootstrap
return _bootstrap._load_module_shim(self, fullname)
#-- PEP302 protocol extensions as defined by importlib.abc.ExecutionLoader
# https://docs.python.org/3/library/importlib.html#importlib.abc.ExecutionLoader
@_check_name
def get_filename(self, fullname):
"""
A method that is to return the value of __file__ for the specified module. If no path is available, ImportError
is raised.
If source code is available, then the method should return the path to the source file, regardless of whether a
bytecode was used to load the module.
https://docs.python.org/3/library/importlib.html#importlib.abc.ExecutionLoader.get_filename
"""
return self.path
#-- PEP302 protocol extensions as defined by importlib.abc.InspectLoader
# https://docs.python.org/3/library/importlib.html#importlib.abc.InspectLoader
@_check_name
def get_code(self, fullname):
"""
Return the code object for a module, or None if the module does not have a code object (as would be the case,
for example, for a built-in module). Raise an ImportError if loader cannot find the requested module.
https://docs.python.org/3/library/importlib.html#importlib.abc.InspectLoader.get_code
"""
return self._pyz_archive.extract(self._pyz_entry_name)
@_check_name
def get_source(self, fullname):
"""
A method to return the source of a module. It is returned as a text string using universal newlines, translating
all recognized line separators into '\n' characters. Returns None if no source is available (e.g. a built-in
module). Raises ImportError if the loader cannot find the module specified.
https://docs.python.org/3/library/importlib.html#importlib.abc.InspectLoader.get_source
"""
# The `path` attribute (which is also returned from `get_filename()`) already points to where the source .py
# file should exist, if it is available.
filename = self.path
try:
# Read in binary mode, then decode
with open(filename, 'rb') as fp:
source_bytes = fp.read()
return _decode_source(source_bytes)
except FileNotFoundError:
pass
# Source code is unavailable.
return None
@_check_name
def is_package(self, fullname):
"""
A method to return a true value if the module is a package, a false value otherwise. ImportError is raised if
the loader cannot find the module.
https://docs.python.org/3/library/importlib.html#importlib.abc.InspectLoader.is_package
"""
return self._is_package
#-- PEP302 protocol extensions as dfined by importlib.abc.ResourceLoader
# https://docs.python.org/3/library/importlib.html#importlib.abc.ResourceLoader
def get_data(self, path):
"""
A method to return the bytes for the data located at path. Loaders that have a file-like storage back-end that
allows storing arbitrary data can implement this abstract method to give direct access to the data stored.
OSError is to be raised if the path cannot be found. The path is expected to be constructed using a module’s
__file__ attribute or an item from a package’s __path__.
https://docs.python.org/3/library/importlib.html#importlib.abc.ResourceLoader.get_data
"""
# Try to fetch the data from the filesystem. Since __file__ attribute works properly, just try to open the file
# and read it.
with open(path, 'rb') as fp:
return fp.read()
#-- Support for `importlib.resources`.
@_check_name
def get_resource_reader(self, fullname):
"""
Return resource reader compatible with `importlib.resources`.
"""
return PyiFrozenResourceReader(self)
| PyiFrozenLoader |
python | numpy__numpy | numpy/ma/tests/test_core.py | {
"start": 104604,
"end": 123238
} | class ____:
# Test MaskedArray Arithmetic
def _create_intdata(self):
x = arange(10)
y = arange(10)
xm = arange(10)
xm[2] = masked
return x, y, xm
def _create_floatdata(self):
x, y, xm = self._create_intdata()
return x.astype(float), y.astype(float), xm.astype(float)
def _create_otherdata(self):
o = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
othertypes = [np.dtype(_).type for _ in o]
x, y, xm = self._create_intdata()
uint8data = (
x.astype(np.uint8),
y.astype(np.uint8),
xm.astype(np.uint8)
)
return othertypes, uint8data
def test_inplace_addition_scalar(self):
# Test of inplace additions
x, y, xm = self._create_intdata()
xm[2] = masked
x += 1
assert_equal(x, y + 1)
xm += 1
assert_equal(xm, y + 1)
x, _, xm = self._create_floatdata()
id1 = x.data.ctypes.data
x += 1.
assert_(id1 == x.data.ctypes.data)
assert_equal(x, y + 1.)
def test_inplace_addition_array(self):
# Test of inplace additions
x, y, xm = self._create_intdata()
m = xm.mask
a = arange(10, dtype=np.int16)
a[-1] = masked
x += a
xm += a
assert_equal(x, y + a)
assert_equal(xm, y + a)
assert_equal(xm.mask, mask_or(m, a.mask))
def test_inplace_subtraction_scalar(self):
# Test of inplace subtractions
x, y, xm = self._create_intdata()
x -= 1
assert_equal(x, y - 1)
xm -= 1
assert_equal(xm, y - 1)
def test_inplace_subtraction_array(self):
# Test of inplace subtractions
x, y, xm = self._create_floatdata()
m = xm.mask
a = arange(10, dtype=float)
a[-1] = masked
x -= a
xm -= a
assert_equal(x, y - a)
assert_equal(xm, y - a)
assert_equal(xm.mask, mask_or(m, a.mask))
def test_inplace_multiplication_scalar(self):
# Test of inplace multiplication
x, y, xm = self._create_floatdata()
x *= 2.0
assert_equal(x, y * 2)
xm *= 2.0
assert_equal(xm, y * 2)
def test_inplace_multiplication_array(self):
# Test of inplace multiplication
x, y, xm = self._create_floatdata()
m = xm.mask
a = arange(10, dtype=float)
a[-1] = masked
x *= a
xm *= a
assert_equal(x, y * a)
assert_equal(xm, y * a)
assert_equal(xm.mask, mask_or(m, a.mask))
def test_inplace_division_scalar_int(self):
# Test of inplace division
x, y, xm = self._create_intdata()
x = arange(10) * 2
xm = arange(10) * 2
xm[2] = masked
x //= 2
assert_equal(x, y)
xm //= 2
assert_equal(xm, y)
def test_inplace_division_scalar_float(self):
# Test of inplace division
x, y, xm = self._create_floatdata()
x /= 2.0
assert_equal(x, y / 2.0)
xm /= arange(10)
assert_equal(xm, ones((10,)))
def test_inplace_division_array_float(self):
# Test of inplace division
x, y, xm = self._create_floatdata()
m = xm.mask
a = arange(10, dtype=float)
a[-1] = masked
x /= a
xm /= a
assert_equal(x, y / a)
assert_equal(xm, y / a)
assert_equal(xm.mask, mask_or(mask_or(m, a.mask), (a == 0)))
def test_inplace_division_misc(self):
x = [1., 1., 1., -2., pi / 2., 4., 5., -10., 10., 1., 2., 3.]
y = [5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
z = xm / ym
assert_equal(z._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1])
assert_equal(z._data,
[1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.])
xm = xm.copy()
xm /= ym
assert_equal(xm._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1])
assert_equal(z._data,
[1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.])
def test_datafriendly_add(self):
# Test keeping data w/ (inplace) addition
x = array([1, 2, 3], mask=[0, 0, 1])
# Test add w/ scalar
xx = x + 1
assert_equal(xx.data, [2, 3, 3])
assert_equal(xx.mask, [0, 0, 1])
# Test iadd w/ scalar
x += 1
assert_equal(x.data, [2, 3, 3])
assert_equal(x.mask, [0, 0, 1])
# Test add w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x + array([1, 2, 3], mask=[1, 0, 0])
assert_equal(xx.data, [1, 4, 3])
assert_equal(xx.mask, [1, 0, 1])
# Test iadd w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
x += array([1, 2, 3], mask=[1, 0, 0])
assert_equal(x.data, [1, 4, 3])
assert_equal(x.mask, [1, 0, 1])
def test_datafriendly_sub(self):
# Test keeping data w/ (inplace) subtraction
# Test sub w/ scalar
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x - 1
assert_equal(xx.data, [0, 1, 3])
assert_equal(xx.mask, [0, 0, 1])
# Test isub w/ scalar
x = array([1, 2, 3], mask=[0, 0, 1])
x -= 1
assert_equal(x.data, [0, 1, 3])
assert_equal(x.mask, [0, 0, 1])
# Test sub w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x - array([1, 2, 3], mask=[1, 0, 0])
assert_equal(xx.data, [1, 0, 3])
assert_equal(xx.mask, [1, 0, 1])
# Test isub w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
x -= array([1, 2, 3], mask=[1, 0, 0])
assert_equal(x.data, [1, 0, 3])
assert_equal(x.mask, [1, 0, 1])
def test_datafriendly_mul(self):
# Test keeping data w/ (inplace) multiplication
# Test mul w/ scalar
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x * 2
assert_equal(xx.data, [2, 4, 3])
assert_equal(xx.mask, [0, 0, 1])
# Test imul w/ scalar
x = array([1, 2, 3], mask=[0, 0, 1])
x *= 2
assert_equal(x.data, [2, 4, 3])
assert_equal(x.mask, [0, 0, 1])
# Test mul w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x * array([10, 20, 30], mask=[1, 0, 0])
assert_equal(xx.data, [1, 40, 3])
assert_equal(xx.mask, [1, 0, 1])
# Test imul w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
x *= array([10, 20, 30], mask=[1, 0, 0])
assert_equal(x.data, [1, 40, 3])
assert_equal(x.mask, [1, 0, 1])
def test_datafriendly_div(self):
# Test keeping data w/ (inplace) division
# Test div on scalar
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x / 2.
assert_equal(xx.data, [1 / 2., 2 / 2., 3])
assert_equal(xx.mask, [0, 0, 1])
# Test idiv on scalar
x = array([1., 2., 3.], mask=[0, 0, 1])
x /= 2.
assert_equal(x.data, [1 / 2., 2 / 2., 3])
assert_equal(x.mask, [0, 0, 1])
# Test div on array
x = array([1., 2., 3.], mask=[0, 0, 1])
xx = x / array([10., 20., 30.], mask=[1, 0, 0])
assert_equal(xx.data, [1., 2. / 20., 3.])
assert_equal(xx.mask, [1, 0, 1])
# Test idiv on array
x = array([1., 2., 3.], mask=[0, 0, 1])
x /= array([10., 20., 30.], mask=[1, 0, 0])
assert_equal(x.data, [1., 2 / 20., 3.])
assert_equal(x.mask, [1, 0, 1])
def test_datafriendly_pow(self):
# Test keeping data w/ (inplace) power
# Test pow on scalar
x = array([1., 2., 3.], mask=[0, 0, 1])
xx = x ** 2.5
assert_equal(xx.data, [1., 2. ** 2.5, 3.])
assert_equal(xx.mask, [0, 0, 1])
# Test ipow on scalar
x **= 2.5
assert_equal(x.data, [1., 2. ** 2.5, 3])
assert_equal(x.mask, [0, 0, 1])
def test_datafriendly_add_arrays(self):
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 0])
a += b
assert_equal(a, [[2, 2], [4, 4]])
if a.mask is not nomask:
assert_equal(a.mask, [[0, 0], [0, 0]])
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 1])
a += b
assert_equal(a, [[2, 2], [4, 4]])
assert_equal(a.mask, [[0, 1], [0, 1]])
def test_datafriendly_sub_arrays(self):
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 0])
a -= b
assert_equal(a, [[0, 0], [2, 2]])
if a.mask is not nomask:
assert_equal(a.mask, [[0, 0], [0, 0]])
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 1])
a -= b
assert_equal(a, [[0, 0], [2, 2]])
assert_equal(a.mask, [[0, 1], [0, 1]])
def test_datafriendly_mul_arrays(self):
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 0])
a *= b
assert_equal(a, [[1, 1], [3, 3]])
if a.mask is not nomask:
assert_equal(a.mask, [[0, 0], [0, 0]])
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 1])
a *= b
assert_equal(a, [[1, 1], [3, 3]])
assert_equal(a.mask, [[0, 1], [0, 1]])
def test_inplace_addition_scalar_type(self):
# Test of inplace additions
othertypes, uint8data = self._create_otherdata()
for t in othertypes:
with warnings.catch_warnings():
warnings.filterwarnings("error")
x, y, xm = (_.astype(t) for _ in uint8data)
xm[2] = masked
x += t(1)
assert_equal(x, y + t(1))
xm += t(1)
assert_equal(xm, y + t(1))
def test_inplace_addition_array_type(self):
# Test of inplace additions
othertypes, uint8data = self._create_otherdata()
for t in othertypes:
with warnings.catch_warnings():
warnings.filterwarnings("error")
x, y, xm = (_.astype(t) for _ in uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
x += a
xm += a
assert_equal(x, y + a)
assert_equal(xm, y + a)
assert_equal(xm.mask, mask_or(m, a.mask))
def test_inplace_subtraction_scalar_type(self):
# Test of inplace subtractions
othertypes, uint8data = self._create_otherdata()
for t in othertypes:
with warnings.catch_warnings():
warnings.filterwarnings("error")
x, y, xm = (_.astype(t) for _ in uint8data)
x -= t(1)
assert_equal(x, y - t(1))
xm -= t(1)
assert_equal(xm, y - t(1))
def test_inplace_subtraction_array_type(self):
# Test of inplace subtractions
othertypes, uint8data = self._create_otherdata()
for t in othertypes:
with warnings.catch_warnings():
warnings.filterwarnings("error")
x, y, xm = (_.astype(t) for _ in uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
x -= a
xm -= a
assert_equal(x, y - a)
assert_equal(xm, y - a)
assert_equal(xm.mask, mask_or(m, a.mask))
def test_inplace_multiplication_scalar_type(self):
# Test of inplace multiplication
othertypes, uint8data = self._create_otherdata()
for t in othertypes:
with warnings.catch_warnings():
warnings.filterwarnings("error")
x, y, xm = (_.astype(t) for _ in uint8data)
x *= t(2)
assert_equal(x, y * t(2))
xm *= t(2)
assert_equal(xm, y * t(2))
def test_inplace_multiplication_array_type(self):
# Test of inplace multiplication
othertypes, uint8data = self._create_otherdata()
for t in othertypes:
with warnings.catch_warnings():
warnings.filterwarnings("error")
x, y, xm = (_.astype(t) for _ in uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
x *= a
xm *= a
assert_equal(x, y * a)
assert_equal(xm, y * a)
assert_equal(xm.mask, mask_or(m, a.mask))
def test_inplace_floor_division_scalar_type(self):
# Test of inplace division
# Check for TypeError in case of unsupported types
othertypes, uint8data = self._create_otherdata()
unsupported = {np.dtype(t).type for t in np.typecodes["Complex"]}
for t in othertypes:
with warnings.catch_warnings():
warnings.filterwarnings("error")
x, y, xm = (_.astype(t) for _ in uint8data)
x = arange(10, dtype=t) * t(2)
xm = arange(10, dtype=t) * t(2)
xm[2] = masked
try:
x //= t(2)
xm //= t(2)
assert_equal(x, y)
assert_equal(xm, y)
except TypeError:
msg = f"Supported type {t} throwing TypeError"
assert t in unsupported, msg
def test_inplace_floor_division_array_type(self):
# Test of inplace division
# Check for TypeError in case of unsupported types
othertypes, uint8data = self._create_otherdata()
unsupported = {np.dtype(t).type for t in np.typecodes["Complex"]}
for t in othertypes:
with warnings.catch_warnings():
warnings.filterwarnings("error")
x, y, xm = (_.astype(t) for _ in uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
try:
x //= a
xm //= a
assert_equal(x, y // a)
assert_equal(xm, y // a)
assert_equal(
xm.mask,
mask_or(mask_or(m, a.mask), (a == t(0)))
)
except TypeError:
msg = f"Supported type {t} throwing TypeError"
assert t in unsupported, msg
def test_inplace_division_scalar_type(self):
# Test of inplace division
othertypes, uint8data = self._create_otherdata()
with warnings.catch_warnings():
warnings.simplefilter('error', DeprecationWarning)
for t in othertypes:
x, y, xm = (_.astype(t) for _ in uint8data)
x = arange(10, dtype=t) * t(2)
xm = arange(10, dtype=t) * t(2)
xm[2] = masked
nwarns = 0
# May get a DeprecationWarning or a TypeError.
#
# This is a consequence of the fact that this is true divide
# and will require casting to float for calculation and
# casting back to the original type. This will only be raised
# with integers. Whether it is an error or warning is only
# dependent on how stringent the casting rules are.
#
# Will handle the same way.
try:
x /= t(2)
assert_equal(x, y)
except (DeprecationWarning, TypeError):
nwarns += 1
try:
xm /= t(2)
assert_equal(xm, y)
except (DeprecationWarning, TypeError):
nwarns += 1
if issubclass(t, np.integer):
assert_equal(nwarns, 2, f'Failed on type={t}.')
else:
assert_equal(nwarns, 0, f'Failed on type={t}.')
def test_inplace_division_array_type(self):
# Test of inplace division
othertypes, uint8data = self._create_otherdata()
with warnings.catch_warnings():
warnings.simplefilter('error', DeprecationWarning)
for t in othertypes:
x, y, xm = (_.astype(t) for _ in uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
nwarns = 0
# May get a DeprecationWarning or a TypeError.
#
# This is a consequence of the fact that this is true divide
# and will require casting to float for calculation and
# casting back to the original type. This will only be raised
# with integers. Whether it is an error or warning is only
# dependent on how stringent the casting rules are.
#
# Will handle the same way.
try:
x /= a
assert_equal(x, y / a)
except (DeprecationWarning, TypeError):
nwarns += 1
try:
xm /= a
assert_equal(xm, y / a)
assert_equal(
xm.mask,
mask_or(mask_or(m, a.mask), (a == t(0)))
)
except (DeprecationWarning, TypeError):
nwarns += 1
if issubclass(t, np.integer):
assert_equal(nwarns, 2, f'Failed on type={t}.')
else:
assert_equal(nwarns, 0, f'Failed on type={t}.')
def test_inplace_pow_type(self):
# Test keeping data w/ (inplace) power
othertypes = self._create_otherdata()[0]
for t in othertypes:
with warnings.catch_warnings():
warnings.filterwarnings("error")
# Test pow on scalar
x = array([1, 2, 3], mask=[0, 0, 1], dtype=t)
xx = x ** t(2)
xx_r = array([1, 2 ** 2, 3], mask=[0, 0, 1], dtype=t)
assert_equal(xx.data, xx_r.data)
assert_equal(xx.mask, xx_r.mask)
# Test ipow on scalar
x **= t(2)
assert_equal(x.data, xx_r.data)
assert_equal(x.mask, xx_r.mask)
| TestMaskedArrayInPlaceArithmetic |
python | ray-project__ray | python/ray/data/namespace_expressions/string_namespace.py | {
"start": 1357,
"end": 13361
} | class ____:
"""Namespace for string operations on expression columns.
This namespace provides methods for operating on string-typed columns using
PyArrow compute functions.
Example:
>>> from ray.data.expressions import col
>>> # Convert to uppercase
>>> expr = col("name").str.upper()
>>> # Get string length
>>> expr = col("name").str.len()
>>> # Check if string starts with a prefix
>>> expr = col("name").str.starts_with("A")
"""
_expr: Expr
# Length methods
def len(self) -> "UDFExpr":
"""Get the length of each string in characters."""
return _create_str_udf(pc.utf8_length, DataType.int32())(self._expr)
def byte_len(self) -> "UDFExpr":
"""Get the length of each string in bytes."""
return _create_str_udf(pc.binary_length, DataType.int32())(self._expr)
# Case methods
def upper(self) -> "UDFExpr":
"""Convert strings to uppercase."""
return _create_str_udf(pc.utf8_upper, DataType.string())(self._expr)
def lower(self) -> "UDFExpr":
"""Convert strings to lowercase."""
return _create_str_udf(pc.utf8_lower, DataType.string())(self._expr)
def capitalize(self) -> "UDFExpr":
"""Capitalize the first character of each string."""
return _create_str_udf(pc.utf8_capitalize, DataType.string())(self._expr)
def title(self) -> "UDFExpr":
"""Convert strings to title case."""
return _create_str_udf(pc.utf8_title, DataType.string())(self._expr)
def swapcase(self) -> "UDFExpr":
"""Swap the case of each character."""
return _create_str_udf(pc.utf8_swapcase, DataType.string())(self._expr)
# Predicate methods
def is_alpha(self) -> "UDFExpr":
"""Check if strings contain only alphabetic characters."""
return _create_str_udf(pc.utf8_is_alpha, DataType.bool())(self._expr)
def is_alnum(self) -> "UDFExpr":
"""Check if strings contain only alphanumeric characters."""
return _create_str_udf(pc.utf8_is_alnum, DataType.bool())(self._expr)
def is_digit(self) -> "UDFExpr":
"""Check if strings contain only digits."""
return _create_str_udf(pc.utf8_is_digit, DataType.bool())(self._expr)
def is_decimal(self) -> "UDFExpr":
"""Check if strings contain only decimal characters."""
return _create_str_udf(pc.utf8_is_decimal, DataType.bool())(self._expr)
def is_numeric(self) -> "UDFExpr":
"""Check if strings contain only numeric characters."""
return _create_str_udf(pc.utf8_is_numeric, DataType.bool())(self._expr)
def is_space(self) -> "UDFExpr":
"""Check if strings contain only whitespace."""
return _create_str_udf(pc.utf8_is_space, DataType.bool())(self._expr)
def is_lower(self) -> "UDFExpr":
"""Check if strings are lowercase."""
return _create_str_udf(pc.utf8_is_lower, DataType.bool())(self._expr)
def is_upper(self) -> "UDFExpr":
"""Check if strings are uppercase."""
return _create_str_udf(pc.utf8_is_upper, DataType.bool())(self._expr)
def is_title(self) -> "UDFExpr":
"""Check if strings are title-cased."""
return _create_str_udf(pc.utf8_is_title, DataType.bool())(self._expr)
def is_printable(self) -> "UDFExpr":
"""Check if strings contain only printable characters."""
return _create_str_udf(pc.utf8_is_printable, DataType.bool())(self._expr)
def is_ascii(self) -> "UDFExpr":
"""Check if strings contain only ASCII characters."""
return _create_str_udf(pc.string_is_ascii, DataType.bool())(self._expr)
# Searching methods
def starts_with(self, pattern: str, *args: Any, **kwargs: Any) -> "UDFExpr":
"""Check if strings start with a pattern."""
return _create_str_udf(pc.starts_with, DataType.bool())(
self._expr, pattern, *args, **kwargs
)
def ends_with(self, pattern: str, *args: Any, **kwargs: Any) -> "UDFExpr":
"""Check if strings end with a pattern."""
return _create_str_udf(pc.ends_with, DataType.bool())(
self._expr, pattern, *args, **kwargs
)
def contains(self, pattern: str, *args: Any, **kwargs: Any) -> "UDFExpr":
"""Check if strings contain a substring."""
return _create_str_udf(pc.match_substring, DataType.bool())(
self._expr, pattern, *args, **kwargs
)
def match(self, pattern: str, *args: Any, **kwargs: Any) -> "UDFExpr":
"""Match strings against a SQL LIKE pattern."""
return _create_str_udf(pc.match_like, DataType.bool())(
self._expr, pattern, *args, **kwargs
)
def find(self, pattern: str, *args: Any, **kwargs: Any) -> "UDFExpr":
"""Find the first occurrence of a substring."""
return _create_str_udf(pc.find_substring, DataType.int32())(
self._expr, pattern, *args, **kwargs
)
def count(self, pattern: str, *args: Any, **kwargs: Any) -> "UDFExpr":
"""Count occurrences of a substring."""
return _create_str_udf(pc.count_substring, DataType.int32())(
self._expr, pattern, *args, **kwargs
)
def find_regex(self, pattern: str, *args: Any, **kwargs: Any) -> "UDFExpr":
"""Find the first occurrence matching a regex pattern."""
return _create_str_udf(pc.find_substring_regex, DataType.int32())(
self._expr, pattern, *args, **kwargs
)
def count_regex(self, pattern: str, *args: Any, **kwargs: Any) -> "UDFExpr":
"""Count occurrences matching a regex pattern."""
return _create_str_udf(pc.count_substring_regex, DataType.int32())(
self._expr, pattern, *args, **kwargs
)
def match_regex(self, pattern: str, *args: Any, **kwargs: Any) -> "UDFExpr":
"""Check if strings match a regex pattern."""
return _create_str_udf(pc.match_substring_regex, DataType.bool())(
self._expr, pattern, *args, **kwargs
)
# Transformation methods
def reverse(self) -> "UDFExpr":
"""Reverse each string."""
return _create_str_udf(pc.utf8_reverse, DataType.string())(self._expr)
def slice(self, *args: Any, **kwargs: Any) -> "UDFExpr":
"""Slice strings by codeunit indices."""
return _create_str_udf(pc.utf8_slice_codeunits, DataType.string())(
self._expr, *args, **kwargs
)
def replace(
self, pattern: str, replacement: str, *args: Any, **kwargs: Any
) -> "UDFExpr":
"""Replace occurrences of a substring."""
return _create_str_udf(pc.replace_substring, DataType.string())(
self._expr, pattern, replacement, *args, **kwargs
)
def replace_regex(
self, pattern: str, replacement: str, *args: Any, **kwargs: Any
) -> "UDFExpr":
"""Replace occurrences matching a regex pattern."""
return _create_str_udf(pc.replace_substring_regex, DataType.string())(
self._expr, pattern, replacement, *args, **kwargs
)
def replace_slice(
self, start: int, stop: int, replacement: str, *args: Any, **kwargs: Any
) -> "UDFExpr":
"""Replace a slice with a string."""
return _create_str_udf(pc.binary_replace_slice, DataType.string())(
self._expr, start, stop, replacement, *args, **kwargs
)
def split(self, pattern: str, *args: Any, **kwargs: Any) -> "UDFExpr":
"""Split strings by a pattern."""
return _create_str_udf(pc.split_pattern, DataType(object))(
self._expr, pattern, *args, **kwargs
)
def split_regex(self, pattern: str, *args: Any, **kwargs: Any) -> "UDFExpr":
"""Split strings by a regex pattern."""
return _create_str_udf(pc.split_pattern_regex, DataType(object))(
self._expr, pattern, *args, **kwargs
)
def split_whitespace(self, *args: Any, **kwargs: Any) -> "UDFExpr":
"""Split strings on whitespace."""
return _create_str_udf(pc.utf8_split_whitespace, DataType(object))(
self._expr, *args, **kwargs
)
def extract(self, pattern: str, *args: Any, **kwargs: Any) -> "UDFExpr":
"""Extract a substring matching a regex pattern."""
return _create_str_udf(pc.extract_regex, DataType.string())(
self._expr, pattern, *args, **kwargs
)
def repeat(self, n: int, *args: Any, **kwargs: Any) -> "UDFExpr":
"""Repeat each string n times."""
return _create_str_udf(pc.binary_repeat, DataType.string())(
self._expr, n, *args, **kwargs
)
def center(
self, width: int, padding: str = " ", *args: Any, **kwargs: Any
) -> "UDFExpr":
"""Center strings in a field of given width."""
return _create_str_udf(pc.utf8_center, DataType.string())(
self._expr, width, padding, *args, **kwargs
)
# Custom methods that need special logic beyond simple PyArrow function calls
def strip(self, characters: str | None = None) -> "UDFExpr":
"""Remove leading and trailing whitespace or specified characters.
Args:
characters: Characters to remove. If None, removes whitespace.
Returns:
UDFExpr that strips characters from both ends.
"""
@pyarrow_udf(return_dtype=DataType.string())
def _str_strip(arr: pyarrow.Array) -> pyarrow.Array:
if characters is None:
return pc.utf8_trim_whitespace(arr)
else:
return pc.utf8_trim(arr, characters=characters)
return _str_strip(self._expr)
def lstrip(self, characters: str | None = None) -> "UDFExpr":
"""Remove leading whitespace or specified characters.
Args:
characters: Characters to remove. If None, removes whitespace.
Returns:
UDFExpr that strips characters from the left.
"""
@pyarrow_udf(return_dtype=DataType.string())
def _str_lstrip(arr: pyarrow.Array) -> pyarrow.Array:
if characters is None:
return pc.utf8_ltrim_whitespace(arr)
else:
return pc.utf8_ltrim(arr, characters=characters)
return _str_lstrip(self._expr)
def rstrip(self, characters: str | None = None) -> "UDFExpr":
"""Remove trailing whitespace or specified characters.
Args:
characters: Characters to remove. If None, removes whitespace.
Returns:
UDFExpr that strips characters from the right.
"""
@pyarrow_udf(return_dtype=DataType.string())
def _str_rstrip(arr: pyarrow.Array) -> pyarrow.Array:
if characters is None:
return pc.utf8_rtrim_whitespace(arr)
else:
return pc.utf8_rtrim(arr, characters=characters)
return _str_rstrip(self._expr)
# Padding
def pad(
self,
width: int,
fillchar: str = " ",
side: Literal["left", "right", "both"] = "right",
) -> "UDFExpr":
"""Pad strings to a specified width.
Args:
width: Target width.
fillchar: Character to use for padding.
side: "left", "right", or "both" for padding side.
Returns:
UDFExpr that pads strings.
"""
@pyarrow_udf(return_dtype=DataType.string())
def _str_pad(arr: pyarrow.Array) -> pyarrow.Array:
if side == "right":
return pc.utf8_rpad(arr, width=width, padding=fillchar)
elif side == "left":
return pc.utf8_lpad(arr, width=width, padding=fillchar)
elif side == "both":
return pc.utf8_center(arr, width=width, padding=fillchar)
else:
raise ValueError("side must be 'left', 'right', or 'both'")
return _str_pad(self._expr)
| _StringNamespace |
python | PyCQA__isort | isort/format.py | {
"start": 3631,
"end": 5487
} | class ____(BasicPrinter):
def __init__(self, error: str, success: str, output: TextIO | None):
super().__init__(error, success, output=output)
# Note: this constants are instance variables instead ofs class variables
# because they refer to colorama which might not be installed.
self.ERROR = self.style_text("ERROR", colorama.Fore.RED)
self.SUCCESS = self.style_text("SUCCESS", colorama.Fore.GREEN)
self.ADDED_LINE = colorama.Fore.GREEN
self.REMOVED_LINE = colorama.Fore.RED
@staticmethod
def style_text(text: str, style: str | None = None) -> str:
if style is None:
return text
return style + text + str(colorama.Style.RESET_ALL)
def diff_line(self, line: str) -> None:
style = None
if re.match(ADDED_LINE_PATTERN, line):
style = self.ADDED_LINE
elif re.match(REMOVED_LINE_PATTERN, line):
style = self.REMOVED_LINE
self.output.write(self.style_text(line, style))
def create_terminal_printer(
color: bool, output: TextIO | None = None, error: str = "", success: str = ""
) -> BasicPrinter:
if color and colorama_unavailable:
no_colorama_message = (
"\n"
"Sorry, but to use --color (color_output) the colorama python package is required.\n\n"
"Reference: https://pypi.org/project/colorama/\n\n"
"You can either install it separately on your system or as the colors extra "
"for isort. Ex: \n\n"
"$ pip install isort[colors]\n"
)
print(no_colorama_message, file=sys.stderr)
sys.exit(1)
if not colorama_unavailable:
colorama.init(strip=False)
return (
ColoramaPrinter(error, success, output) if color else BasicPrinter(error, success, output)
)
| ColoramaPrinter |
python | marshmallow-code__apispec | tests/test_ext_marshmallow_openapi.py | {
"start": 472,
"end": 1789
} | class ____:
def test_fields_with_load_default_load(self, openapi):
class MySchema(Schema):
field = fields.Str(dump_default="foo", load_default="bar")
res = openapi.schema2parameters(MySchema, location="query")
if openapi.openapi_version.major < 3:
assert res[0]["default"] == "bar"
else:
assert res[0]["schema"]["default"] == "bar"
# json/body is invalid for OpenAPI 3
@pytest.mark.parametrize("openapi", ("2.0",), indirect=True)
def test_fields_default_location_mapping_if_schema_many(self, openapi):
class ExampleSchema(Schema):
id = fields.Int()
schema = ExampleSchema(many=True)
res = openapi.schema2parameters(schema=schema, location="json")
assert res[0]["in"] == "body"
def test_fields_with_dump_only(self, openapi):
class UserSchema(Schema):
name = fields.Str(dump_only=True)
res = openapi.schema2parameters(schema=UserSchema(), location="query")
assert len(res) == 0
class UserSchema2(Schema):
name = fields.Str()
class Meta:
dump_only = ("name",)
res = openapi.schema2parameters(schema=UserSchema2(), location="query")
assert len(res) == 0
| TestMarshmallowFieldToOpenAPI |
python | scikit-learn__scikit-learn | sklearn/linear_model/_ridge.py | {
"start": 28185,
"end": 33337
} | class ____(LinearModel, metaclass=ABCMeta):
_parameter_constraints: dict = {
"alpha": [Interval(Real, 0, None, closed="left"), np.ndarray],
"fit_intercept": ["boolean"],
"copy_X": ["boolean"],
"max_iter": [Interval(Integral, 1, None, closed="left"), None],
"tol": [Interval(Real, 0, None, closed="left")],
"solver": [
StrOptions(
{"auto", "svd", "cholesky", "lsqr", "sparse_cg", "sag", "saga", "lbfgs"}
)
],
"positive": ["boolean"],
"random_state": ["random_state"],
}
@abstractmethod
def __init__(
self,
alpha=1.0,
*,
fit_intercept=True,
copy_X=True,
max_iter=None,
tol=1e-4,
solver="auto",
positive=False,
random_state=None,
):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
self.positive = positive
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
xp, is_array_api_compliant = get_namespace(X, y, sample_weight)
if self.solver == "lbfgs" and not self.positive:
raise ValueError(
"'lbfgs' solver can be used only when positive=True. "
"Please use another solver."
)
if self.positive:
if self.solver not in ["auto", "lbfgs"]:
raise ValueError(
f"solver='{self.solver}' does not support positive fitting. Please"
" set the solver to 'auto' or 'lbfgs', or set `positive=False`"
)
else:
solver = self.solver
elif sparse.issparse(X) and self.fit_intercept:
if self.solver not in ["auto", "lbfgs", "lsqr", "sag", "sparse_cg"]:
raise ValueError(
"solver='{}' does not support fitting the intercept "
"on sparse data. Please set the solver to 'auto' or "
"'lsqr', 'sparse_cg', 'sag', 'lbfgs' "
"or set `fit_intercept=False`".format(self.solver)
)
if self.solver in ["lsqr", "lbfgs"]:
solver = self.solver
elif self.solver == "sag" and self.max_iter is None and self.tol > 1e-4:
warnings.warn(
'"sag" solver requires many iterations to fit '
"an intercept with sparse inputs. Either set the "
'solver to "auto" or "sparse_cg", or set a low '
'"tol" and a high "max_iter" (especially if inputs are '
"not standardized)."
)
solver = "sag"
else:
solver = "sparse_cg"
else:
solver = self.solver
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
# when X is sparse we only remove offset from y
X, y, X_offset, y_offset, X_scale, _ = _preprocess_data(
X,
y,
fit_intercept=self.fit_intercept,
copy=self.copy_X,
sample_weight=sample_weight,
rescale_with_sw=False,
)
if solver == "sag" and sparse.issparse(X) and self.fit_intercept:
self.coef_, self.n_iter_, self.intercept_, self.solver_ = _ridge_regression(
X,
y,
alpha=self.alpha,
sample_weight=sample_weight,
max_iter=self.max_iter,
tol=self.tol,
solver="sag",
positive=self.positive,
random_state=self.random_state,
return_n_iter=True,
return_intercept=True,
return_solver=True,
check_input=False,
)
# add the offset which was subtracted by _preprocess_data
self.intercept_ += y_offset
else:
if sparse.issparse(X) and self.fit_intercept:
# required to fit intercept with sparse_cg and lbfgs solver
params = {"X_offset": X_offset, "X_scale": X_scale}
else:
# for dense matrices or when intercept is set to 0
params = {}
self.coef_, self.n_iter_, self.solver_ = _ridge_regression(
X,
y,
alpha=self.alpha,
sample_weight=sample_weight,
max_iter=self.max_iter,
tol=self.tol,
solver=solver,
positive=self.positive,
random_state=self.random_state,
return_n_iter=True,
return_intercept=False,
return_solver=True,
check_input=False,
fit_intercept=self.fit_intercept,
**params,
)
self._set_intercept(X_offset, y_offset, X_scale)
return self
| _BaseRidge |
python | fastapi__sqlmodel | docs_src/tutorial/fastapi/read_one/tutorial001.py | {
"start": 426,
"end": 1568
} | class ____(HeroBase):
id: int
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
connect_args = {"check_same_thread": False}
engine = create_engine(sqlite_url, echo=True, connect_args=connect_args)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
app = FastAPI()
@app.on_event("startup")
def on_startup():
create_db_and_tables()
@app.post("/heroes/", response_model=HeroPublic)
def create_hero(hero: HeroCreate):
with Session(engine) as session:
db_hero = Hero.model_validate(hero)
session.add(db_hero)
session.commit()
session.refresh(db_hero)
return db_hero
@app.get("/heroes/", response_model=List[HeroPublic])
def read_heroes():
with Session(engine) as session:
heroes = session.exec(select(Hero)).all()
return heroes
@app.get("/heroes/{hero_id}", response_model=HeroPublic)
def read_hero(hero_id: int):
with Session(engine) as session:
hero = session.get(Hero, hero_id)
if not hero:
raise HTTPException(status_code=404, detail="Hero not found")
return hero
| HeroPublic |
python | PyCQA__pylint | tests/functional/a/access/access_member_before_definition.py | {
"start": 123,
"end": 329
} | class ____:
"""class with attributes defined in wrong order"""
def __init__(self):
var1 = self._var2 # [access-member-before-definition]
self._var2 = 3
self._var3 = var1
| Aaaa |
python | weaviate__weaviate-python-client | weaviate/collections/classes/grpc.py | {
"start": 16575,
"end": 18834
} | class ____:
"""Use this factory class to define the appropriate classes needed when defining near text and near vector sub-searches in hybrid queries."""
@staticmethod
def near_text(
query: Union[str, List[str]],
*,
certainty: Optional[float] = None,
distance: Optional[float] = None,
move_to: Optional[Move] = None,
move_away: Optional[Move] = None,
) -> _HybridNearText:
"""Define a near text search to be used within a hybrid query.
Args:
query: The text to search for as a string or a list of strings.
certainty: The minimum similarity score to return. If not specified, the default certainty specified by the server is used.
distance: The maximum distance to search. If not specified, the default distance specified by the server is used.
move_to: Define the concepts that should be moved towards in the vector space during the search.
move_away: Define the concepts that should be moved away from in the vector space during the search.
Returns:
A `_HybridNearText` object to be used in the `vector` parameter of the `query.hybrid` and `generate.hybrid` search methods.
"""
return _HybridNearText(
text=query,
distance=distance,
certainty=certainty,
move_to=move_to,
move_away=move_away,
)
@staticmethod
def near_vector(
vector: NearVectorInputType,
*,
certainty: Optional[float] = None,
distance: Optional[float] = None,
) -> _HybridNearVector:
"""Define a near vector search to be used within a hybrid query.
Args:
certainty: The minimum similarity score to return. If not specified, the default certainty specified by the server is used.
distance: The maximum distance to search. If not specified, the default distance specified by the server is used.
Returns:
A `_HybridNearVector` object to be used in the `vector` parameter of the `query.hybrid` and `generate.hybrid` search methods.
"""
return _HybridNearVector(vector=vector, distance=distance, certainty=certainty)
| HybridVector |
python | has2k1__plotnine | plotnine/themes/themeable.py | {
"start": 63727,
"end": 64159
} | class ____(strip_align_x, strip_align_y):
"""
Alignment of the strip & its background w.r.t the panel border
Parameters
----------
theme_element : float
Value as a proportion of the strip text size. A good value
should be the range `[-1, 0.5]`. A negative value
puts the strip inside the axes and a positive value
creates a space between the strip and the axes.
"""
| strip_align |
python | plotly__plotly.py | plotly/graph_objs/heatmap/_textfont.py | {
"start": 233,
"end": 9856
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "heatmap"
_path_str = "heatmap.textfont"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Textfont object
Sets the text font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.heatmap.Textfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Textfont
"""
super().__init__("textfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.heatmap.Textfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.heatmap.Textfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Textfont |
python | ray-project__ray | rllib/examples/learners/separate_vf_lr_and_optimizer.py | {
"start": 1158,
"end": 5788
} | class ____ details on how to override the main (torch) `configure_optimizers_for_module`
function.
We assume here that the users properly sets up their RLModule to have separate policy-
and value function networks. If any model pieces are shared between the two optimizers,
you should experience learning instability up to the point where your algorithm can't
learn any useful policy anymore.
How to run this script
----------------------
`python [script file name].py --lr-vf=0.001 --lr-policy=0.0005`
Use the `--lr-policy` option to set the policy learning rate (used by the policy
optimizer) and the `--lr-vf` option to set the value function learning rate (used by the
value function optimizer).
For debugging, use the following additional command line options
`--no-tune --num-env-runners=0`
which should allow you to set breakpoints anywhere in the RLlib code and
have the execution stop there for inspection and debugging.
For logging to your WandB account, use:
`--wandb-key=[your WandB API key] --wandb-project=[some project name]
--wandb-run-name=[optional: WandB run name (within the defined project)]`
Results to expect
-----------------
You should expect to observe decent learning behavior from your console output:
With --lr-vf=0.0005 and --lr-policy=0.001
+-----------------------------+------------+-----------------+--------+
| Trial name | status | loc | iter |
| | | | |
|-----------------------------+------------+-----------------+--------+
| PPO_CartPole-v1_7b404_00000 | TERMINATED | 127.0.0.1:16845 | 19 |
+-----------------------------+------------+-----------------+--------+
+------------------+------------------------+---------------------+
| total time (s) | num_env_steps_sampled_ | episode_return_mean |
| | _lifetime | |
|------------------+------------------------+---------------------+
| 19.4179 | 76000 | 459.94 |
+------------------+------------------------+---------------------+
"""
from ray.rllib.algorithms.ppo import PPOConfig
from ray.rllib.core.rl_module.default_model_config import DefaultModelConfig
from ray.rllib.examples.learners.classes.separate_vf_lr_and_optimizer_learner import (
PPOTorchLearnerWithSeparateVfOptimizer,
)
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.test_utils import (
add_rllib_example_script_args,
run_rllib_example_script_experiment,
)
torch, _ = try_import_torch()
parser = add_rllib_example_script_args(default_reward=450.0)
parser.add_argument(
"--lr-vf",
type=float,
default=0.0005,
help="The learning rate used in the value function optimizer.",
)
parser.add_argument(
"--lr-policy",
type=float,
default=0.001,
help="The learning rate used in the policy optimizer.",
)
if __name__ == "__main__":
args = parser.parse_args()
assert args.algo == "PPO", "Must set --algo=PPO when running this script!"
base_config = (
PPOConfig()
.environment("CartPole-v1")
.training(
# This is the most important setting in this script: We point our PPO
# algorithm to use the custom Learner (instead of the default
# PPOTorchLearner).
learner_class=PPOTorchLearnerWithSeparateVfOptimizer,
# We use this simple method here to inject a new setting that our
# custom Learner class uses in its `configure_optimizers_for_module`
# method. This is convenient and avoids having to subclass `PPOConfig` only
# to add a few new settings to it. Within our Learner, we can access this
# new setting through:
# `self.config.learner_config_dict['lr_vf']`
learner_config_dict={"lr_vf": args.lr_vf},
# Some settings to make this example learn better.
num_epochs=6,
# Since we are using separate optimizers for the two NN components, the
# value of `vf_loss_coeff` does not matter anymore. We set this to 1.0 here.
vf_loss_coeff=1.0,
# The policy learning rate, settable through the command line `--lr` arg.
lr=args.lr_policy,
)
.rl_module(
# Another very important setting is this here. Make sure you use
# completely separate NNs for policy and value-functions.
model_config=DefaultModelConfig(vf_share_layers=False),
)
)
run_rllib_example_script_experiment(base_config, args)
| for |
python | pytest-dev__pytest-cov | src/pytest_cov/engine.py | {
"start": 14516,
"end": 17161
} | class ____(CovController):
"""Implementation for distributed workers."""
@_ensure_topdir
def start(self):
# Determine whether we are collocated with master.
self.is_collocated = (
socket.gethostname() == self.config.workerinput['cov_master_host']
and self.topdir == self.config.workerinput['cov_master_topdir']
)
# If we are not collocated, then rewrite master paths to worker paths.
if not self.is_collocated:
master_topdir = self.config.workerinput['cov_master_topdir']
worker_topdir = self.topdir
if self.cov_source is not None:
self.cov_source = [source.replace(master_topdir, worker_topdir) for source in self.cov_source]
self.cov_config = self.cov_config.replace(master_topdir, worker_topdir)
# Erase any previous data and start coverage.
self.cov = coverage.Coverage(
source=self.cov_source,
branch=self.cov_branch,
data_suffix=True,
config_file=self.cov_config,
)
# Prevent workers from issuing module-not-measured type of warnings (expected for a workers to not have coverage in all the files).
self.cov._warn_unimported_source = False
self.cov.start()
super().start()
@_ensure_topdir
def finish(self):
"""Stop coverage and send relevant info back to the master."""
super().finish()
self.cov.stop()
if self.is_collocated:
# We don't combine data if we're collocated - we can get
# race conditions in the .combine() call (it's not atomic)
# The data is going to be combined in the master.
self.cov.save()
# If we are collocated then just inform the master of our
# data file to indicate that we have finished.
self.config.workeroutput['cov_worker_node_id'] = self.nodeid
else:
self.cov.combine()
self.cov.save()
# If we are not collocated then add the current path
# and coverage data to the output so we can combine
# it on the master node.
# Send all the data to the master over the channel.
data = self.cov.get_data().dumps()
self.config.workeroutput.update(
{
'cov_worker_path': self.topdir,
'cov_worker_node_id': self.nodeid,
'cov_worker_data': data,
}
)
def summary(self, stream):
"""Only the master reports so do nothing."""
| DistWorker |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/hstore.py | {
"start": 8342,
"end": 8461
} | class ____(sqlfunc.GenericFunction):
type = HSTORE
name = "delete"
inherit_cache = True
| _HStoreDeleteFunction |
python | joke2k__faker | faker/providers/phone_number/ru_RU/__init__.py | {
"start": 49,
"end": 379
} | class ____(PhoneNumberProvider):
formats = (
"+7 ### ### ####",
"+7 ### ### ## ##",
"+7 (###) ###-##-##",
"+7 (###) ###-####",
"+7##########",
"8 ### ### ####",
"8 ### ### ## ##",
"8 (###) ###-##-##",
"8 (###) ###-####",
"8##########",
)
| Provider |
python | huggingface__transformers | src/transformers/models/edgetam/modeling_edgetam.py | {
"start": 19316,
"end": 21559
} | class ____(ModelOutput):
r"""
iou_scores (`torch.FloatTensor` of shape `(batch_size, point_batch_size, num_masks)`):
The Intersection over Union (IoU) scores of the predicted masks.
pred_masks (`torch.FloatTensor` of shape `(batch_size, point_batch_size, num_masks, height, width)`):
The predicted low-resolution masks. This is an alias for `low_res_masks`. These masks need to be post-processed
by the processor to be brought to the original image size.
object_score_logits (`torch.FloatTensor` of shape `(batch_size, point_batch_size, 1)`):
Logits for the object score, indicating if an object is present.
image_embeddings (`tuple(torch.FloatTensor)`):
The features from the FPN, which are used by the mask decoder. This is a tuple of `torch.FloatTensor` where each
tensor has shape `(batch_size, channels, height, width)`.
vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of each stage) of shape `(batch_size, height, width, hidden_size)`.
Hidden-states of the vision model at the output of each stage.
vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights of the vision model.
mask_decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights of the mask decoder.
"""
iou_scores: Optional[torch.FloatTensor] = None
pred_masks: Optional[torch.FloatTensor] = None
object_score_logits: Optional[torch.FloatTensor] = None
image_embeddings: tuple[torch.FloatTensor, ...] = None
vision_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
vision_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
mask_decoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
| EdgeTamImageSegmentationOutput |
python | google__flatbuffers | python/flatbuffers/number_types.py | {
"start": 974,
"end": 1116
} | class ____(object):
bytewidth = 1
min_val = False
max_val = True
py_type = bool
name = "bool"
packer_type = packer.boolean
| BoolFlags |
python | ray-project__ray | rllib/connectors/learner/add_columns_from_episodes_to_train_batch.py | {
"start": 374,
"end": 6617
} | class ____(ConnectorV2):
"""Adds actions/rewards/terminateds/... to train batch. Excluding the infos column.
Note: This is one of the default Learner ConnectorV2 pieces that are added
automatically by RLlib into every Learner connector pipeline, unless
`config.add_default_connectors_to_learner_pipeline` is set to False.
The default Learner connector pipeline is:
[
[0 or more user defined ConnectorV2 pieces],
AddObservationsFromEpisodesToBatch,
AddColumnsFromEpisodesToTrainBatch,
AddTimeDimToBatchAndZeroPad,
AddStatesFromEpisodesToBatch,
AgentToModuleMapping, # only in multi-agent setups!
BatchIndividualItems,
NumpyToTensor,
]
Does NOT add observations or infos to train batch.
Observations should have already been added by another ConnectorV2 piece:
`AddObservationsToTrainBatch` in the same pipeline.
Infos can be added manually by the user through setting this in the config:
.. testcode::
:skipif: True
from ray.rllib.connectors.learner import AddInfosFromEpisodesToTrainBatch
config.training(
learner_connector=lambda obs_sp, act_sp: AddInfosFromEpisodesToTrainBatch()
)`
If provided with `episodes` data, this connector piece makes sure that the final
train batch going into the RLModule for updating (`forward_train()` call) contains
at the minimum:
- Observations: From all episodes under the Columns.OBS key.
- Actions, rewards, terminal/truncation flags: From all episodes under the
respective keys.
- All data inside the episodes' `extra_model_outs` property, e.g. action logp and
action probs under the respective keys.
- Internal states: These will NOT be added to the batch by this connector piece
as this functionality is handled by a different default connector piece:
`AddStatesFromEpisodesToBatch`.
If the user wants to customize their own data under the given keys (e.g. obs,
actions, ...), they can extract from the episodes or recompute from `data`
their own data and store it in `data` under those keys. In this case, the default
connector will not change the data under these keys and simply act as a
pass-through.
"""
@override(ConnectorV2)
def __call__(
self,
*,
rl_module: RLModule,
batch: Optional[Dict[str, Any]],
episodes: List[EpisodeType],
explore: Optional[bool] = None,
shared_data: Optional[dict] = None,
**kwargs,
) -> Any:
# Actions.
if Columns.ACTIONS not in batch:
for sa_episode in self.single_agent_episode_iterator(
episodes,
agents_that_stepped_only=False,
):
self.add_n_batch_items(
batch,
Columns.ACTIONS,
items_to_add=[
sa_episode.get_actions(indices=ts)
for ts in range(len(sa_episode))
],
num_items=len(sa_episode),
single_agent_episode=sa_episode,
)
# Rewards.
if Columns.REWARDS not in batch:
for sa_episode in self.single_agent_episode_iterator(
episodes,
agents_that_stepped_only=False,
):
self.add_n_batch_items(
batch,
Columns.REWARDS,
items_to_add=[
sa_episode.get_rewards(indices=ts)
for ts in range(len(sa_episode))
],
num_items=len(sa_episode),
single_agent_episode=sa_episode,
)
# Terminateds.
if Columns.TERMINATEDS not in batch:
for sa_episode in self.single_agent_episode_iterator(
episodes,
agents_that_stepped_only=False,
):
self.add_n_batch_items(
batch,
Columns.TERMINATEDS,
items_to_add=(
[False] * (len(sa_episode) - 1) + [sa_episode.is_terminated]
if len(sa_episode) > 0
else []
),
num_items=len(sa_episode),
single_agent_episode=sa_episode,
)
# Truncateds.
if Columns.TRUNCATEDS not in batch:
for sa_episode in self.single_agent_episode_iterator(
episodes,
agents_that_stepped_only=False,
):
self.add_n_batch_items(
batch,
Columns.TRUNCATEDS,
items_to_add=(
[False] * (len(sa_episode) - 1) + [sa_episode.is_truncated]
if len(sa_episode) > 0
else []
),
num_items=len(sa_episode),
single_agent_episode=sa_episode,
)
# Extra model outputs (except for STATE_OUT, which will be handled by another
# default connector piece). Also, like with all the fields above, skip
# those that the user already seemed to have populated via custom connector
# pieces.
skip_columns = set(batch.keys()) | {Columns.STATE_IN, Columns.STATE_OUT}
for sa_episode in self.single_agent_episode_iterator(
episodes,
agents_that_stepped_only=False,
):
for column in sa_episode.extra_model_outputs.keys():
if column not in skip_columns:
self.add_n_batch_items(
batch,
column,
items_to_add=[
sa_episode.get_extra_model_outputs(key=column, indices=ts)
for ts in range(len(sa_episode))
],
num_items=len(sa_episode),
single_agent_episode=sa_episode,
)
return batch
| AddColumnsFromEpisodesToTrainBatch |
python | apache__airflow | providers/elasticsearch/src/airflow/providers/elasticsearch/hooks/elasticsearch.py | {
"start": 4678,
"end": 7140
} | class ____(DbApiHook):
"""
Interact with Elasticsearch through the elasticsearch-dbapi.
This hook uses the Elasticsearch conn_id.
:param elasticsearch_conn_id: The :ref:`ElasticSearch connection id <howto/connection:elasticsearch>`
used for Elasticsearch credentials.
"""
conn_name_attr = "elasticsearch_conn_id"
default_conn_name = "elasticsearch_default"
connector = ESConnection # type: ignore[assignment]
conn_type = "elasticsearch"
hook_name = "Elasticsearch"
def __init__(self, schema: str = "http", connection: AirflowConnection | None = None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.schema = schema
def get_conn(self) -> ESConnection:
"""Return an elasticsearch connection object."""
conn = self.connection
conn_args = {
"host": cast("str", conn.host),
"port": cast("int", conn.port),
"user": conn.login or None,
"password": conn.password or None,
"scheme": conn.schema or "http",
}
conn_args.update(conn.extra_dejson)
if conn_args.get("http_compress", False):
conn_args["http_compress"] = bool(conn_args["http_compress"])
return connect(**conn_args) # type: ignore[arg-type]
def get_uri(self) -> str:
conn = self.connection
login = ""
if conn.login:
login = f"{conn.login}:{conn.password}@"
host = conn.host or ""
if conn.port is not None:
host += f":{conn.port}"
uri = f"{conn.conn_type}+{conn.schema}://{login}{host}/"
extras_length = len(conn.extra_dejson)
if not extras_length:
return uri
uri += "?"
for arg_key, arg_value in conn.extra_dejson.items():
extras_length -= 1
uri += f"{arg_key}={arg_value}"
if extras_length:
uri += "&"
return uri
def _get_polars_df(
self,
sql,
parameters: list | tuple | Mapping[str, Any] | None = None,
**kwargs,
):
# TODO: Custom ElasticsearchSQLCursor is incompatible with polars.read_database.
# To support: either adapt cursor to polars._executor interface or create custom polars reader.
# https://github.com/apache/airflow/pull/50454
raise NotImplementedError("Polars is not supported for Elasticsearch")
| ElasticsearchSQLHook |
python | pytorch__pytorch | torch/ao/quantization/fx/graph_module.py | {
"start": 1301,
"end": 3188
} | class ____(GraphModule):
def __init__(
self,
root: torch.nn.Module | dict[str, Any],
graph: Graph,
preserved_attr_names: set[str],
):
self.preserved_attr_names = {
"_activation_post_process_map",
"_activation_post_process_indexes",
"_patterns",
"_node_name_to_qconfig",
"_prepare_custom_config",
"_equalization_node_name_to_qconfig",
"_node_name_to_scope",
"_qconfig_mapping",
"_is_qat",
"_observed_node_names",
}.union(preserved_attr_names)
preserved_attrs = {
attr: getattr(root, attr)
for attr in self.preserved_attr_names
if hasattr(root, attr)
}
super().__init__(root, graph)
for attr in preserved_attrs:
setattr(self, attr, preserved_attrs[attr])
# GraphModule does not copy attributes which are not in the __dict__
# of vanilla nn.Module. So, we override __deepcopy__ in order
# to copy the quantization specific attributes correctly.
def __deepcopy__(self, memo):
fake_mod = torch.nn.Module()
fake_mod.__dict__ = copy.deepcopy(self.__dict__)
return ObservedGraphModule(
fake_mod,
copy.deepcopy(self.graph),
copy.deepcopy(self.preserved_attr_names),
)
def _is_observed_module(module: Any) -> bool:
return hasattr(module, "meta") and "_observed_graph_module_attrs" in module.meta
def _get_observed_graph_module_attr(
model: torch.nn.Module | GraphModule, attr_name: str
) -> Any:
if hasattr(model, "meta") and "_observed_graph_module_attrs" in model.meta: # type: ignore[operator, index]
return getattr(model.meta["_observed_graph_module_attrs"], attr_name) # type: ignore[index]
return None
| ObservedGraphModule |
python | davidhalter__parso | parso/python/errors.py | {
"start": 17865,
"end": 18215
} | class ____(IndentationRule):
message = 'expected an indented block'
def get_node(self, node):
leaf = node.get_next_leaf()
return list(leaf._split_prefix())[-1]
def is_issue(self, node):
# This is the beginning of a suite that is not indented.
return node.children[-1].type == 'newline'
| _ExpectIndentedBlock |
python | kamyu104__LeetCode-Solutions | Python/change-the-root-of-a-binary-tree.py | {
"start": 54,
"end": 110
} | class ____:
def __init__(self, val):
pass
| Node |
python | huggingface__transformers | tests/models/sam/test_image_processing_sam.py | {
"start": 1138,
"end": 4049
} | class ____:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_pad=True,
pad_size=None,
mask_size=None,
mask_pad_size=None,
do_resize=True,
size=None,
do_normalize=True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
):
size = size if size is not None else {"longest_edge": 20}
pad_size = pad_size if pad_size is not None else {"height": 20, "width": 20}
mask_size = mask_size if mask_size is not None else {"longest_edge": 12}
mask_pad_size = mask_pad_size if mask_pad_size is not None else {"height": 12, "width": 12}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_pad = do_pad
self.pad_size = pad_size
self.mask_size = mask_size
self.mask_pad_size = mask_pad_size
self.do_resize = do_resize
self.size = size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
def prepare_image_processor_dict(self):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"do_pad": self.do_pad,
"pad_size": self.pad_size,
"mask_size": self.mask_size,
"mask_pad_size": self.mask_pad_size,
}
def expected_output_image_shape(self, images):
return self.num_channels, self.pad_size["height"], self.pad_size["width"]
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
# Copied from transformers.tests.models.beit.test_image_processing_beit.prepare_semantic_single_inputs
def prepare_semantic_single_inputs():
ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test")
example = ds[0]
return example["image"], example["map"]
# Copied from transformers.tests.models.beit.test_image_processing_beit.prepare_semantic_batch_inputs
def prepare_semantic_batch_inputs():
ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test")
return list(ds["image"][:2]), list(ds["map"][:2])
@require_torch
@require_vision
| SamImageProcessingTester |
python | tensorflow__tensorflow | tensorflow/python/eager/backprop_test.py | {
"start": 54088,
"end": 62071
} | class ____(test.TestCase):
def _jacobian(self, experimental_use_pfor):
persistent = context.executing_eagerly and not experimental_use_pfor
with backprop.GradientTape(persistent=persistent) as g:
x = constant_op.constant([1., 2.])
y = constant_op.constant([3., 4.])
g.watch(x)
g.watch(y)
z = x * x * y
jacobian = g.jacobian(
z, [x, y], experimental_use_pfor=experimental_use_pfor)
answer = [array_ops.diag(2 * x * y), array_ops.diag(x * x)]
return jacobian, answer
@test_util.run_v1_only('b/120545219')
def testPfor(self):
jacobian, answer = self._jacobian(experimental_use_pfor=True)
for j, a in zip(jacobian, answer):
self.assertAllEqual(a, j)
@test_util.run_v1_only('b/120545219')
def testWhileLoop(self):
jacobian, answer = self._jacobian(experimental_use_pfor=False)
for j, a in zip(jacobian, answer):
self.assertAllEqual(a, j)
@test_util.run_v1_only('b/120545219')
def testPforDefun(self):
@def_function.function
def _f():
return self._jacobian(experimental_use_pfor=True)
jacobian, answer = _f()
for j, a in zip(jacobian, answer):
self.assertAllEqual(a, j)
@test_util.run_v1_only('b/120545219')
def testWhileLoopDefun(self):
@def_function.function
def _f():
return self._jacobian(experimental_use_pfor=False)
jacobian, answer = _f()
for j, a in zip(jacobian, answer):
self.assertAllEqual(a, j)
@test_util.run_v1_only('b/120545219')
def testPersistentTape(self):
if not context.executing_eagerly():
return
with backprop.GradientTape() as g:
x = constant_op.constant([1.0, 2.0])
g.watch(x)
y = x * x
with self.assertRaisesRegex(RuntimeError, 'persistent'):
g.jacobian(y, x, experimental_use_pfor=False)
@test_util.run_v1_only('b/120545219')
def test_parallel_iterations(self):
with backprop.GradientTape(persistent=True) as g:
x = constant_op.constant([[1., 2], [3, 4]])
g.watch(x)
y = math_ops.matmul(x, x)
self.assertAllClose(
g.jacobian(y, x, parallel_iterations=2),
g.jacobian(y, x, parallel_iterations=3))
@test_util.run_in_graph_and_eager_modes
def test_nested_jacobian(self):
if context.executing_eagerly():
# TODO(agarwal): b/128842926
self.skipTest('Conversion of function calls not implemented yet.')
x = array_ops.ones((10, 2))
with backprop.GradientTape(persistent=False) as g:
g.watch(x)
with backprop.GradientTape(persistent=False) as gg:
gg.watch(x)
y = math_ops.reduce_sum(math_ops.square(x))
dy_x = gg.jacobian(y, x)
dy_xx = g.batch_jacobian(dy_x, x)
dy_xx_answer = [[[2., 0], [0, 2.]]] * 10
self.assertAllClose(dy_xx_answer, self.evaluate(dy_xx))
def test_nested_batch_jacobian_foldl(self):
def _grad(f):
def _grad_function(primal):
with backprop.GradientTape() as tape:
tape.watch(primal)
primal_out = f(primal)
return tape.batch_jacobian(primal_out, primal)
return _grad_function
def _func(x):
return array_ops.reshape(
functional_ops.foldl_v2(lambda a, b: math_ops.cos(a + b),
array_ops.transpose(x)),
[1, 1])
f = _func
x = constant_op.constant([[1., 2.]])
for _ in range(2):
theoretical, numerical = gradient_checker_v2.compute_gradient(f, [x])
self.assertAllClose(theoretical, numerical, rtol=1e-3)
f = _grad(f)
expected_flat = array_ops.reshape(numerical, [-1])
self.assertAllClose(expected_flat,
array_ops.reshape(f(x), [-1]),
rtol=1e-3)
self.assertAllClose(expected_flat,
array_ops.reshape(def_function.function(f)(x), [-1]),
rtol=1e-3)
def test_grad_jacobian_conv(self):
def _inner(x):
kernel = array_ops.ones([3, 3, 1, 9])
with backprop.GradientTape() as tape:
tape.watch(x)
y = nn_ops.conv2d(x, kernel, strides=(1, 1), padding='SAME',
data_format='NHWC')
reduced = math_ops.reduce_sum(y ** 2., axis=[2, 3])
return math_ops.reduce_sum(tape.batch_jacobian(reduced, x))
theoretical, numerical = gradient_checker_v2.compute_gradient(
def_function.function(_inner), [array_ops.ones([10, 4, 4, 1])])
self.assertAllClose(numerical, theoretical, rtol=1e-1)
@def_function.function
def _outer():
with backprop.GradientTape() as tape:
x = array_ops.ones([10, 4, 4, 1])
tape.watch(x)
y = _inner(x)
return tape.gradient(y, x)
self.assertAllClose(array_ops.reshape(numerical, [-1]),
array_ops.reshape(_outer(), [-1]), rtol=1e-1)
@test_util.run_in_graph_and_eager_modes
def test_indexed_slices(self):
with backprop.GradientTape(persistent=True) as g:
inp = random_ops.random_uniform([3, 2])
g.watch(inp)
output = nn.embedding_lookup(inp, [0, 2])
self.assertAllClose(
g.jacobian(output, inp, experimental_use_pfor=True),
g.jacobian(output, inp, experimental_use_pfor=False))
def test_foldl_partial_function(self):
x = array_ops.zeros([3])
with backprop.GradientTape(persistent=True) as tape:
tape.watch(x)
result = def_function.function(
functools.partial(functional_ops.foldl_v2, lambda a, b: a + b))(
x)
self.assertAllClose([1., 1., 1.],
tape.jacobian(result, x, experimental_use_pfor=True))
self.assertAllClose([1., 1., 1.],
tape.jacobian(result, x, experimental_use_pfor=False))
# Non-persistent tapes take a different function gradient path, but also
# work with pfor=True.
x = array_ops.zeros([3])
with backprop.GradientTape() as tape:
tape.watch(x)
result = def_function.function(
functools.partial(functional_ops.foldl_v2, lambda a, b: a + b))(
x)
self.assertAllClose([1., 1., 1.],
tape.jacobian(result, x, experimental_use_pfor=True))
def test_foldl_pure_function(self):
@def_function.function
def compute_jacobian(use_pfor):
x = array_ops.zeros([3])
with backprop.GradientTape(persistent=True) as tape:
tape.watch(x)
result = functools.partial(functional_ops.foldl_v2, lambda a, b: a + b)(
x)
return tape.jacobian(result, x, experimental_use_pfor=use_pfor)
self.assertAllClose(compute_jacobian(use_pfor=True),
compute_jacobian(use_pfor=False))
def test_cond_func_grad_jacobian(self):
@def_function.function
def f(x):
y = tf_cond.cond(x > 0., lambda: x**3., lambda: x**2.)
return y
with backprop.GradientTape(persistent=True) as tape:
x = constant_op.constant(1.)
tape.watch(x)
y = f(x)
grad = tape.gradient(y, x)
self.assertAllClose(3., grad)
jacobian = tape.jacobian(grad, x, experimental_use_pfor=False)
self.assertAllClose(6., jacobian)
jacobian_pfor = tape.jacobian(grad, x, experimental_use_pfor=True)
self.assertAllClose(6., jacobian_pfor)
def test_empty_tensor_consistent_jacobian(self):
variable = variables.Variable(1.0)
inputs = (
constant_op.constant(np.random.uniform(size=(0, 4))),
constant_op.constant(np.random.uniform(size=(0, 3))),
)
with backprop.GradientTape(persistent=True) as tape:
outputs = variable * math_ops.cast(
array_ops.concat(inputs, axis=-1), dtypes.float32
)
jacobians_pfor = tape.jacobian(
outputs,
variable,
experimental_use_pfor=True,
)
jacobians_loop = tape.jacobian(
outputs,
variable,
experimental_use_pfor=False,
)
self.assertAllClose(jacobians_pfor, jacobians_loop)
@test_util.run_all_in_graph_and_eager_modes
| JacobianTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.