language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 485943,
"end": 486135
} | class ____(VegaLiteSchema):
"""Interpolate schema wrapper."""
_schema = {"$ref": "#/definitions/Interpolate"}
def __init__(self, *args):
super().__init__(*args)
| Interpolate |
python | networkx__networkx | networkx/readwrite/tests/test_graph6.py | {
"start": 1548,
"end": 1880
} | class ____:
def test_read_many_graph6(self):
"""Test for reading many graphs from a file into a list."""
data = b"DF{\nD`{\nDqK\nD~{\n"
fh = BytesIO(data)
glist = nx.read_graph6(fh)
assert len(glist) == 4
for G in glist:
assert sorted(G) == list(range(5))
| TestReadGraph6 |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/execution_api/versions/head/test_task_instances.py | {
"start": 4166,
"end": 28571
} | class ____:
def setup_method(self):
clear_db_runs()
clear_db_serialized_dags()
clear_db_dags()
def teardown_method(self):
clear_db_runs()
clear_db_serialized_dags()
clear_db_dags()
@pytest.mark.parametrize(
("max_tries", "should_retry"),
[
pytest.param(0, False, id="max_retries=0"),
pytest.param(3, True, id="should_retry"),
],
)
def test_ti_run_state_to_running(
self,
client,
session,
create_task_instance,
time_machine,
max_tries,
should_retry,
):
"""
Test that the Task Instance state is updated to running when the Task Instance is in a state where it can be
marked as running.
"""
instant_str = "2024-09-30T12:00:00Z"
instant = timezone.parse(instant_str)
time_machine.move_to(instant, tick=False)
ti = create_task_instance(
task_id="test_ti_run_state_to_running",
state=State.QUEUED,
dagrun_state=DagRunState.RUNNING,
session=session,
start_date=instant,
dag_id=str(uuid4()),
)
ti.max_tries = max_tries
session.commit()
response = client.patch(
f"/execution/task-instances/{ti.id}/run",
json={
"state": "running",
"hostname": "random-hostname",
"unixname": "random-unixname",
"pid": 100,
"start_date": instant_str,
},
)
assert response.status_code == 200
assert response.json() == {
"dag_run": {
"dag_id": ti.dag_id,
"run_id": "test",
"clear_number": 0,
"logical_date": instant_str,
"data_interval_start": instant.subtract(days=1).to_iso8601_string(),
"data_interval_end": instant_str,
"run_after": instant_str,
"start_date": instant_str,
"state": "running",
"end_date": None,
"run_type": "manual",
"conf": {},
"triggering_user_name": None,
"consumed_asset_events": [],
"partition_key": None,
},
"task_reschedule_count": 0,
"upstream_map_indexes": {},
"max_tries": max_tries,
"should_retry": should_retry,
"variables": [],
"connections": [],
"xcom_keys_to_clear": [],
}
# Refresh the Task Instance from the database so that we can check the updated values
session.refresh(ti)
assert ti.state == State.RUNNING
assert ti.hostname == "random-hostname"
assert ti.unixname == "random-unixname"
assert ti.pid == 100
response1 = response.json()
# Test that if we make a second request (simulating a network glitch so the client issues a retry)
# that it is accepted and we get the same info back
response = client.patch(
f"/execution/task-instances/{ti.id}/run",
json={
"state": "running",
"hostname": "random-hostname",
"unixname": "random-unixname",
"pid": 100,
"start_date": instant_str,
},
)
assert response.status_code == 200
assert response.json() == response1
# But that for a different pid on the same host (etc) it fails
response = client.patch(
f"/execution/task-instances/{ti.id}/run",
json={
"state": "running",
"hostname": "random-hostname",
"unixname": "random-unixname",
"pid": 101,
"start_date": instant_str,
},
)
assert response.status_code == 409
def test_dynamic_task_mapping_with_parse_time_value(self, client, dag_maker):
"""
Test that the Task Instance upstream_map_indexes is correctly fetched when to running the Task Instances
"""
with dag_maker("test_dynamic_task_mapping_with_parse_time_value", serialized=True):
@task_group
def task_group_1(arg1):
@task
def group1_task_1(arg1):
return {"a": arg1}
@task
def group1_task_2(arg2):
return arg2
group1_task_2(group1_task_1(arg1))
@task
def task2():
return None
task_group_1.expand(arg1=[0, 1]) >> task2()
dr = dag_maker.create_dagrun()
for ti in dr.get_task_instances():
ti.set_state(State.QUEUED)
dag_maker.session.flush()
# key: (task_id, map_index)
# value: result upstream_map_indexes ({task_id: map_indexes})
expected_upstream_map_indexes = {
# no upstream task for task_group_1.group_task_1
("task_group_1.group1_task_1", 0): {},
("task_group_1.group1_task_1", 1): {},
# the upstream task for task_group_1.group_task_2 is task_group_1.group_task_2
# since they are in the same task group, the upstream map index should be the same as the task
("task_group_1.group1_task_2", 0): {"task_group_1.group1_task_1": 0},
("task_group_1.group1_task_2", 1): {"task_group_1.group1_task_1": 1},
# the upstream task for task2 is the last tasks of task_group_1, which is
# task_group_1.group_task_2
# since they are not in the same task group, the upstream map index should include all the
# expanded tasks
("task2", -1): {"task_group_1.group1_task_2": [0, 1]},
}
for ti in dr.get_task_instances():
response = client.patch(
f"/execution/task-instances/{ti.id}/run",
json={
"state": "running",
"hostname": "random-hostname",
"unixname": "random-unixname",
"pid": 100,
"start_date": "2024-09-30T12:00:00Z",
},
)
assert response.status_code == 200
upstream_map_indexes = response.json()["upstream_map_indexes"]
assert upstream_map_indexes == expected_upstream_map_indexes[(ti.task_id, ti.map_index)]
def test_nested_mapped_task_group_upstream_indexes(self, client, dag_maker):
"""
Test that upstream_map_indexes are correctly computed for tasks in nested mapped task groups.
"""
with dag_maker("test_nested_mapped_tg", serialized=True):
@task
def alter_input(inp: str) -> str:
return f"{inp}_Altered"
@task
def print_task(orig_input: str, altered_input: str) -> str:
return f"orig:{orig_input},altered:{altered_input}"
@task_group
def inner_task_group(orig_input: str) -> None:
altered_input = alter_input(orig_input)
print_task(orig_input, altered_input)
@task_group
def expandable_task_group(param: str) -> None:
inner_task_group(param)
expandable_task_group.expand(param=["One", "Two", "Three"])
dr = dag_maker.create_dagrun()
# Set all alter_input tasks to success so print_task can run
for ti in dr.get_task_instances():
if "alter_input" in ti.task_id and ti.map_index >= 0:
ti.state = State.SUCCESS
elif "print_task" in ti.task_id and ti.map_index >= 0:
ti.set_state(State.QUEUED)
dag_maker.session.flush()
# Expected upstream_map_indexes for each print_task instance
expected_upstream_map_indexes = {
("expandable_task_group.inner_task_group.print_task", 0): {
"expandable_task_group.inner_task_group.alter_input": 0
},
("expandable_task_group.inner_task_group.print_task", 1): {
"expandable_task_group.inner_task_group.alter_input": 1
},
("expandable_task_group.inner_task_group.print_task", 2): {
"expandable_task_group.inner_task_group.alter_input": 2
},
}
# Get only the expanded print_task instances (not the template)
print_task_tis = [
ti for ti in dr.get_task_instances() if "print_task" in ti.task_id and ti.map_index >= 0
]
# Test each print_task instance
for ti in print_task_tis:
response = client.patch(
f"/execution/task-instances/{ti.id}/run",
json={
"state": "running",
"hostname": "random-hostname",
"unixname": "random-unixname",
"pid": 100,
"start_date": "2024-09-30T12:00:00Z",
},
)
assert response.status_code == 200
upstream_map_indexes = response.json()["upstream_map_indexes"]
expected = expected_upstream_map_indexes[(ti.task_id, ti.map_index)]
assert upstream_map_indexes == expected, (
f"Task {ti.task_id}[{ti.map_index}] should have upstream_map_indexes {expected}, "
f"but got {upstream_map_indexes}"
)
def test_dynamic_task_mapping_with_xcom(self, client: Client, dag_maker: DagMaker, session: Session):
"""
Test that the Task Instance upstream_map_indexes is correctly fetched when to running the Task Instances with xcom
"""
from airflow.models.taskmap import TaskMap
with dag_maker(session=session, serialized=True):
@task
def task_1():
return [0, 1]
@task_group
def tg(x, y):
@task
def task_2():
pass
task_2()
@task
def task_3():
pass
tg.expand(x=task_1(), y=[1, 2, 3]) >> task_3()
dr = dag_maker.create_dagrun()
decision = dr.task_instance_scheduling_decisions(session=session)
# Simulate task_1 execution to produce TaskMap.
(ti_1,) = decision.schedulable_tis
ti_1.state = TaskInstanceState.SUCCESS
session.add(TaskMap.from_task_instance_xcom(ti_1, [0, 1]))
session.flush()
# Now task_2 in mapped tagk group is expanded.
decision = dr.task_instance_scheduling_decisions(session=session)
for ti in decision.schedulable_tis:
ti.state = TaskInstanceState.SUCCESS
session.flush()
decision = dr.task_instance_scheduling_decisions(session=session)
(task_3_ti,) = decision.schedulable_tis
task_3_ti.set_state(State.QUEUED)
response = client.patch(
f"/execution/task-instances/{task_3_ti.id}/run",
json={
"state": "running",
"hostname": "random-hostname",
"unixname": "random-unixname",
"pid": 100,
"start_date": "2024-09-30T12:00:00Z",
},
)
assert response.json()["upstream_map_indexes"] == {"tg.task_2": [0, 1, 2, 3, 4, 5]}
def test_dynamic_task_mapping_with_all_success_trigger_rule(self, dag_maker: DagMaker, session: Session):
"""
Test that the Task Instance upstream_map_indexes is not populuated but
the downstream task should not be run.
"""
with dag_maker(session=session, serialized=True):
@task
def task_1():
raise AirflowSkipException()
@task_group
def tg(x):
@task
def task_2():
raise AirflowSkipException()
task_2()
@task(trigger_rule=TriggerRule.ALL_SUCCESS)
def task_3():
pass
@task
def task_4():
pass
tg.expand(x=task_1()) >> [task_3(), task_4()]
dr = dag_maker.create_dagrun()
decision = dr.task_instance_scheduling_decisions(session=session)
# Simulate task_1 skipped
(ti_1,) = decision.schedulable_tis
ti_1.state = TaskInstanceState.SKIPPED
session.flush()
# Now task_2 in mapped task group is not expanded and also skipped.
decision = dr.task_instance_scheduling_decisions(session=session)
for ti in decision.schedulable_tis:
ti.state = TaskInstanceState.SKIPPED
session.flush()
decision = dr.task_instance_scheduling_decisions(session=session)
assert decision.schedulable_tis == []
@pytest.mark.parametrize(
"trigger_rule",
[
TriggerRule.ALL_DONE,
TriggerRule.ALL_DONE_SETUP_SUCCESS,
TriggerRule.NONE_FAILED,
TriggerRule.ALL_SKIPPED,
],
)
def test_dynamic_task_mapping_with_non_all_success_trigger_rule(
self, client: Client, dag_maker: DagMaker, session: Session, trigger_rule: TriggerRule
):
"""
Test that the Task Instance upstream_map_indexes is not populuated but
the downstream task should still be run due to trigger rule.
"""
with dag_maker(session=session, serialized=True):
@task
def task_1():
raise AirflowSkipException()
@task_group
def tg(x):
@task
def task_2():
raise AirflowSkipException()
task_2()
@task(trigger_rule=trigger_rule)
def task_3():
pass
@task
def task_4():
pass
tg.expand(x=task_1()) >> [task_3(), task_4()]
dr = dag_maker.create_dagrun()
decision = dr.task_instance_scheduling_decisions(session=session)
# Simulate task_1 skipped
(ti_1,) = decision.schedulable_tis
ti_1.state = TaskInstanceState.SKIPPED
session.flush()
# Now task_2 in mapped tagk group is not expanded and also skipped..
decision = dr.task_instance_scheduling_decisions(session=session)
for ti in decision.schedulable_tis:
ti.state = TaskInstanceState.SKIPPED
session.flush()
decision = dr.task_instance_scheduling_decisions(session=session)
# only task_3 is schedulable
(task_3_ti,) = decision.schedulable_tis
assert task_3_ti.task_id == "task_3"
task_3_ti.set_state(State.QUEUED)
response = client.patch(
f"/execution/task-instances/{task_3_ti.id}/run",
json={
"state": "running",
"hostname": "random-hostname",
"unixname": "random-unixname",
"pid": 100,
"start_date": "2024-09-30T12:00:00Z",
},
)
assert response.json()["upstream_map_indexes"] == {"tg.task_2": None}
def test_next_kwargs_still_encoded(self, client, session, create_task_instance, time_machine):
instant_str = "2024-09-30T12:00:00Z"
instant = timezone.parse(instant_str)
time_machine.move_to(instant, tick=False)
ti = create_task_instance(
task_id="test_next_kwargs_still_encoded",
state=State.QUEUED,
session=session,
start_date=instant,
dag_id=str(uuid4()),
)
ti.next_method = "execute_complete"
# ti.next_kwargs under the hood applies the serde encoding for us
ti.next_kwargs = {"moment": instant}
session.commit()
response = client.patch(
f"/execution/task-instances/{ti.id}/run",
json={
"state": "running",
"hostname": "random-hostname",
"unixname": "random-unixname",
"pid": 100,
"start_date": instant_str,
},
)
assert response.status_code == 200
assert response.json() == {
"dag_run": mock.ANY,
"task_reschedule_count": 0,
"upstream_map_indexes": {},
"max_tries": 0,
"should_retry": False,
"variables": [],
"connections": [],
"xcom_keys_to_clear": [],
"next_method": "execute_complete",
"next_kwargs": {
"__type": "dict",
"__var": {"moment": {"__type": "datetime", "__var": 1727697600.0}},
},
}
@pytest.mark.parametrize("resume", [True, False])
def test_next_kwargs_determines_start_date_update(self, client, session, create_task_instance, resume):
dag_start_time_str = "2024-09-30T12:00:00Z"
dag_start_time = timezone.parse(dag_start_time_str)
orig_task_start_time = dag_start_time.add(seconds=5)
ti = create_task_instance(
task_id="test_next_kwargs_still_encoded",
state=State.QUEUED,
session=session,
start_date=orig_task_start_time,
dag_id=str(uuid4()),
)
ti.start_date = orig_task_start_time
ti.next_method = "execute_complete"
second_start_time = orig_task_start_time.add(seconds=30)
second_start_time_str = second_start_time.isoformat()
# ti.next_kwargs under the hood applies the serde encoding for us
if resume:
ti.next_kwargs = {"moment": second_start_time}
expected_start_date = orig_task_start_time
expected_next_kwargs = {
"__type": "dict",
"__var": {"moment": {"__type": "datetime", "__var": second_start_time.timestamp()}},
}
else:
expected_start_date = second_start_time
expected_next_kwargs = None
session.commit()
response = client.patch(
f"/execution/task-instances/{ti.id}/run",
json={
"state": "running",
"hostname": "random-hostname",
"unixname": "random-unixname",
"pid": 100,
"start_date": second_start_time_str,
},
)
session.commit()
assert response.status_code == 200
assert response.json() == {
"dag_run": mock.ANY,
"task_reschedule_count": 0,
"upstream_map_indexes": {},
"max_tries": 0,
"should_retry": False,
"variables": [],
"connections": [],
"xcom_keys_to_clear": [],
"next_method": "execute_complete",
"next_kwargs": expected_next_kwargs,
}
session.expunge_all()
ti = session.get(TaskInstance, ti.id)
assert ti.start_date == expected_start_date
@pytest.mark.parametrize(
"initial_ti_state",
[s for s in TaskInstanceState if s not in (TaskInstanceState.QUEUED, TaskInstanceState.RESTARTING)],
)
def test_ti_run_state_conflict_if_not_queued(
self, client, session, create_task_instance, initial_ti_state
):
"""
Test that a 409 error is returned when the Task Instance is not in a state where it can be marked as
running. In this case, the Task Instance is first in NONE state so it cannot be marked as running.
"""
ti = create_task_instance(
task_id="test_ti_run_state_conflict_if_not_queued",
state=initial_ti_state,
)
session.commit()
response = client.patch(
f"/execution/task-instances/{ti.id}/run",
json={
"state": "running",
"hostname": "random-hostname",
"unixname": "random-unixname",
"pid": 100,
"start_date": "2024-10-31T12:00:00Z",
},
)
assert response.status_code == 409
assert response.json() == {
"detail": {
"message": "TI was not in a state where it could be marked as running",
"previous_state": initial_ti_state,
"reason": "invalid_state",
}
}
assert session.scalar(select(TaskInstance.state).where(TaskInstance.id == ti.id)) == initial_ti_state
def test_xcom_not_cleared_for_deferral(self, client, session, create_task_instance, time_machine):
"""
Test that the xcoms are not cleared when the Task Instance state is re-running after deferral.
"""
instant_str = "2024-09-30T12:00:00Z"
instant = timezone.parse(instant_str)
time_machine.move_to(instant, tick=False)
ti = create_task_instance(
task_id="test_xcom_not_cleared_for_deferral",
state=State.RUNNING,
session=session,
start_date=instant,
dag_id=str(uuid4()),
)
session.commit()
# Move this task to deferred
payload = {
"state": "deferred",
"trigger_kwargs": {"key": "value", "moment": "2024-12-18T00:00:00Z"},
"trigger_timeout": "P1D", # 1 day
"classpath": "my-classpath",
"next_method": "execute_callback",
}
response = client.patch(f"/execution/task-instances/{ti.id}/state", json=payload)
assert response.status_code == 204
assert response.text == ""
session.expire_all()
# Deferred -> Queued so that we can run it again
query = update(TaskInstance).where(TaskInstance.id == ti.id).values(state="queued")
session.execute(query)
session.commit()
# Lets stage a xcom push
ti.xcom_push(key="key", value="value")
response = client.patch(
f"/execution/task-instances/{ti.id}/run",
json={
"state": "running",
"hostname": "random-hostname",
"unixname": "random-unixname",
"pid": 100,
"start_date": instant_str,
},
)
assert response.status_code == 200
assert ti.xcom_pull(task_ids="test_xcom_not_cleared_for_deferral", key="key") == "value"
def test_ti_run_with_triggering_user_name(
self,
client,
session,
dag_maker,
time_machine,
):
"""
Test that the triggering_user_name field is correctly returned when it has a non-None value.
"""
instant_str = "2024-09-30T12:00:00Z"
instant = timezone.parse(instant_str)
time_machine.move_to(instant, tick=False)
with dag_maker(dag_id=str(uuid4()), session=session):
EmptyOperator(task_id="test_ti_run_with_triggering_user_name")
# Create DagRun with triggering_user_name set to a specific value
dr = dag_maker.create_dagrun(
run_id="test",
logical_date=instant,
state=DagRunState.RUNNING,
start_date=instant,
triggering_user_name="test_user",
)
ti = dr.get_task_instance(task_id="test_ti_run_with_triggering_user_name")
ti.set_state(State.QUEUED)
session.commit()
response = client.patch(
f"/execution/task-instances/{ti.id}/run",
json={
"state": "running",
"hostname": "test-hostname",
"unixname": "test-unixname",
"pid": 12345,
"start_date": instant_str,
},
)
assert response.status_code == 200
json_response = response.json()
# Verify the dag_run is present
assert "dag_run" in json_response
dag_run = json_response["dag_run"]
# The triggering_user_name field should be present with the correct value
assert dag_run["triggering_user_name"] == "test_user"
# Verify other expected fields are still present
assert dag_run["dag_id"] == ti.dag_id
assert dag_run["run_id"] == "test"
assert dag_run["state"] == "running"
| TestTIRunState |
python | Textualize__textual | src/textual/containers.py | {
"start": 6499,
"end": 6672
} | class ____(Widget):
"""A container with grid layout."""
DEFAULT_CSS = """
Grid {
width: 1fr;
height: 1fr;
layout: grid;
}
"""
| Grid |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_bugbear/class_as_data_structure.py | {
"start": 658,
"end": 960
} | class ____: # B903
"""This class has a docstring."""
# this next method is an init
def __init__(self,e:dict):
self.e = e
# <--- begin flake8-bugbear tests below
# (we have modified them to have type annotations,
# since our implementation only triggers in that
# stricter setting.)
| D |
python | boto__boto3 | tests/unit/docs/test_method.py | {
"start": 820,
"end": 12137
} | class ____(BaseDocsTest):
def setUp(self):
super().setUp()
self.event_emitter = HierarchicalEmitter()
self.service_model = self.client.meta.service_model
self.operation_model = self.service_model.operation_model(
'SampleOperation'
)
self.service_resource_model = self.resource.meta.resource_model
def test_default(self):
document_model_driven_resource_method(
self.doc_structure,
'foo',
self.operation_model,
event_emitter=self.event_emitter,
method_description='This describes the foo method.',
example_prefix='response = myservice.foo',
resource_action_model=self.service_resource_model.actions[0],
)
self.assert_contains_lines_in_order(
[
'.. py:method:: foo(**kwargs)',
' This describes the foo method.',
' **Request Syntax**',
' ::',
' response = myservice.foo(',
" Foo='string',",
" Bar='string'",
' )',
' :type Foo: string',
' :param Foo: Documents Foo',
' :type Bar: string',
' :param Bar: Documents Bar',
' :rtype: dict',
' :returns: ',
' **Response Syntax**',
' ::',
' {',
" 'Foo': 'string',",
" 'Bar': 'string'",
' }',
' **Response Structure**',
' - *(dict) --* ',
' - **Foo** *(string) --* Documents Foo',
' - **Bar** *(string) --* Documents Bar',
]
)
def test_returns_resource(self):
resource_action = self.service_resource_model.actions[0]
# Override the return type of the action to be a resource
# instead of the regular dictionary.
resource_action.resource = ResponseResource(
{
'type': 'Sample',
'identifiers': [
{
'target': 'Name',
'source': 'requestParameter',
'path': 'Foo',
}
],
},
self.resource_json_model,
)
document_model_driven_resource_method(
self.doc_structure,
'foo',
self.operation_model,
event_emitter=self.event_emitter,
method_description='This describes the foo method.',
example_prefix='sample = myservice.foo',
resource_action_model=resource_action,
)
self.assert_contains_lines_in_order(
[
'.. py:method:: foo(**kwargs)',
' This describes the foo method.',
' **Request Syntax**',
' ::',
' sample = myservice.foo(',
" Foo='string',",
" Bar='string'",
' )',
' :type Foo: string',
' :param Foo: Documents Foo',
' :type Bar: string',
' :param Bar: Documents Bar',
' :rtype: :py:class:`myservice.Sample`',
' :returns: Sample resource',
]
)
def test_returns_list_of_resource(self):
resource_action = self.service_resource_model.actions[1]
document_model_driven_resource_method(
self.doc_structure,
'foo',
self.operation_model,
event_emitter=self.event_emitter,
method_description='This describes the foo method.',
example_prefix='samples = myservice.foo',
resource_action_model=resource_action,
)
self.assert_contains_lines_in_order(
[
'.. py:method:: foo(**kwargs)',
' This describes the foo method.',
' **Request Syntax**',
' ::',
' samples = myservice.foo(',
" Foo='string',",
" Bar='string'",
' )',
' :type Foo: string',
' :param Foo: Documents Foo',
' :type Bar: string',
' :param Bar: Documents Bar',
' :rtype: list(:py:class:`myservice.Sample`)',
' :returns: A list of Sample resource',
]
)
def test_include_input(self):
include_params = [
DocumentedShape(
name='Biz', type_name='string', documentation='biz docs'
)
]
document_model_driven_resource_method(
self.doc_structure,
'foo',
self.operation_model,
event_emitter=self.event_emitter,
method_description='This describes the foo method.',
example_prefix='response = myservice.foo',
include_input=include_params,
resource_action_model=self.service_resource_model.actions[0],
)
self.assert_contains_lines_in_order(
[
'.. py:method:: foo(**kwargs)',
' This describes the foo method.',
' **Request Syntax**',
' ::',
' response = myservice.foo(',
" Foo='string',",
" Bar='string',",
" Biz='string'",
' )',
' :type Foo: string',
' :param Foo: Documents Foo',
' :type Bar: string',
' :param Bar: Documents Bar',
' :type Biz: string',
' :param Biz: biz docs',
' :rtype: dict',
' :returns: ',
' **Response Syntax**',
' ::',
' {',
" 'Foo': 'string',",
" 'Bar': 'string'",
' }',
' **Response Structure**',
' - *(dict) --* ',
' - **Foo** *(string) --* Documents Foo',
' - **Bar** *(string) --* Documents Bar',
]
)
def test_include_output(self):
include_params = [
DocumentedShape(
name='Biz', type_name='string', documentation='biz docs'
)
]
document_model_driven_resource_method(
self.doc_structure,
'foo',
self.operation_model,
event_emitter=self.event_emitter,
method_description='This describes the foo method.',
example_prefix='response = myservice.foo',
include_output=include_params,
resource_action_model=self.service_resource_model.actions[0],
)
self.assert_contains_lines_in_order(
[
'.. py:method:: foo(**kwargs)',
' This describes the foo method.',
' **Request Syntax**',
' ::',
' response = myservice.foo(',
" Foo='string',",
" Bar='string'",
' )',
' :type Foo: string',
' :param Foo: Documents Foo',
' :type Bar: string',
' :param Bar: Documents Bar',
' :rtype: dict',
' :returns: ',
' **Response Syntax**',
' ::',
' {',
" 'Foo': 'string',",
" 'Bar': 'string',",
" 'Biz': 'string'",
' }',
' **Response Structure**',
' - *(dict) --* ',
' - **Foo** *(string) --* Documents Foo',
' - **Bar** *(string) --* Documents Bar',
' - **Biz** *(string) --* biz docs',
]
)
def test_exclude_input(self):
document_model_driven_resource_method(
self.doc_structure,
'foo',
self.operation_model,
event_emitter=self.event_emitter,
method_description='This describes the foo method.',
example_prefix='response = myservice.foo',
exclude_input=['Bar'],
resource_action_model=self.service_resource_model.actions[0],
)
self.assert_contains_lines_in_order(
[
'.. py:method:: foo(**kwargs)',
' This describes the foo method.',
' **Request Syntax**',
' ::',
' response = myservice.foo(',
" Foo='string',",
' )',
' :type Foo: string',
' :param Foo: Documents Foo',
' :rtype: dict',
' :returns: ',
' **Response Syntax**',
' ::',
' {',
" 'Foo': 'string',",
" 'Bar': 'string'",
' }',
' **Response Structure**',
' - *(dict) --* ',
' - **Foo** *(string) --* Documents Foo',
' - **Bar** *(string) --* Documents Bar',
]
)
self.assert_not_contains_lines(
[':param Bar: string', 'Bar=\'string\'']
)
def test_exclude_output(self):
document_model_driven_resource_method(
self.doc_structure,
'foo',
self.operation_model,
event_emitter=self.event_emitter,
method_description='This describes the foo method.',
example_prefix='response = myservice.foo',
exclude_output=['Bar'],
resource_action_model=self.service_resource_model.actions[0],
)
self.assert_contains_lines_in_order(
[
'.. py:method:: foo(**kwargs)',
' This describes the foo method.',
' **Request Syntax**',
' ::',
' response = myservice.foo(',
" Foo='string',",
" Bar='string'",
' )',
' :type Foo: string',
' :param Foo: Documents Foo',
' :type Bar: string',
' :param Bar: Documents Bar',
' :rtype: dict',
' :returns: ',
' **Response Syntax**',
' ::',
' {',
" 'Foo': 'string'",
' }',
' **Response Structure**',
' - *(dict) --* ',
' - **Foo** *(string) --* Documents Foo',
]
)
self.assert_not_contains_lines(
[
'\'Bar\': \'string\'',
'- **Bar** *(string) --*',
]
)
| TestDocumentModelDrivenResourceMethod |
python | readthedocs__readthedocs.org | readthedocs/rtd_tests/tests/test_version_querysets.py | {
"start": 3000,
"end": 4506
} | class ____(TestVersionQuerySetBase):
def test_public(self):
query = Version.objects.public()
versions = {
self.version_latest,
self.version,
self.another_version,
self.another_version_latest,
self.shared_version,
self.shared_version_latest,
}
self.assertEqual(query.count(), len(versions))
self.assertEqual(set(query), versions)
def test_public_user(self):
query = Version.objects.public(user=self.user)
versions = self.user_versions | {
self.another_version_latest,
self.another_version,
}
self.assertEqual(query.count(), len(versions))
self.assertEqual(set(query), versions)
def test_public_project(self):
query = self.project.versions.public(user=self.user)
versions = {
self.version,
self.version_latest,
self.version_private,
}
self.assertEqual(query.count(), len(versions))
self.assertEqual(set(query), versions)
def test_api(self):
query = Version.objects.api()
versions = {
self.version_latest,
self.version,
self.another_version,
self.another_version_latest,
self.shared_version,
self.shared_version_latest,
}
self.assertEqual(query.count(), len(versions))
self.assertEqual(set(query), versions)
| VersionQuerySetTests |
python | python-openxml__python-docx | src/docx/image/constants.py | {
"start": 2486,
"end": 3466
} | class ____:
"""Tag codes for TIFF Image File Directory (IFD) entries."""
IMAGE_WIDTH = 0x0100
IMAGE_LENGTH = 0x0101
X_RESOLUTION = 0x011A
Y_RESOLUTION = 0x011B
RESOLUTION_UNIT = 0x0128
tag_names = {
0x00FE: "NewSubfileType",
0x0100: "ImageWidth",
0x0101: "ImageLength",
0x0102: "BitsPerSample",
0x0103: "Compression",
0x0106: "PhotometricInterpretation",
0x010E: "ImageDescription",
0x010F: "Make",
0x0110: "Model",
0x0111: "StripOffsets",
0x0112: "Orientation",
0x0115: "SamplesPerPixel",
0x0117: "StripByteCounts",
0x011A: "XResolution",
0x011B: "YResolution",
0x011C: "PlanarConfiguration",
0x0128: "ResolutionUnit",
0x0131: "Software",
0x0132: "DateTime",
0x0213: "YCbCrPositioning",
0x8769: "ExifTag",
0x8825: "GPS IFD",
0xC4A5: "PrintImageMatching",
}
| TIFF_TAG |
python | run-llama__llama_index | llama-index-core/llama_index/core/indices/keyword_table/retrievers.py | {
"start": 6207,
"end": 6697
} | class ____(BaseKeywordTableRetriever):
"""
Keyword Table Index Simple Retriever.
Extracts keywords using simple regex-based keyword extractor.
Set when `retriever_mode="simple"`.
See BaseGPTKeywordTableQuery for arguments.
"""
def _get_keywords(self, query_str: str) -> List[str]:
"""Extract keywords."""
return list(
simple_extract_keywords(query_str, max_keywords=self.max_keywords_per_query)
)
| KeywordTableSimpleRetriever |
python | google__jax | benchmarks/api_benchmark.py | {
"start": 1970,
"end": 25068
} | class ____(enum.IntEnum):
A = 123
B = 456
@google_benchmark.register
def eager_unary_dispatch(state):
a = jax.device_put(1)
x = lax.neg(a)
while state:
x = lax.neg(a)
x.block_until_ready()
@google_benchmark.register
def eager_unary(state):
a = jax.device_put(1)
lax.neg(a).block_until_ready()
while state:
lax.neg(a).block_until_ready()
@google_benchmark.register
def eager_binary_dispatch(state):
a = jax.device_put(1)
b = jax.device_put(2)
lax.add(a, b)
while state:
lax.add(a, b)
@google_benchmark.register
def eager_binary(state):
a = jax.device_put(1)
b = jax.device_put(2)
x = lax.add(a, b).block_until_ready()
while state:
x = lax.add(a, b).block_until_ready()
x.block_until_ready()
@google_benchmark.register
def jit_trivial_dispatch(state):
"""Benchmarks only the duration for jitted_f to return the future."""
f = jax.jit(swap)
a, b = f(1, 2)
x = f(a, b)
while state:
x = f(a, b)
x[0].block_until_ready()
@google_benchmark.register
def jit_trivial(state):
f = jax.jit(swap)
a, b = f(1, 2)
f(a, b)
while state:
c, d = f(a, b)
c.block_until_ready()
d.block_until_ready()
@google_benchmark.register
def jit_simple_dispatch(state):
a = jax.device_put(1)
b = jax.device_put(2)
f = jax.jit(operator.add)
x = f(a, b)
while state:
x = f(a, b)
x.block_until_ready()
@google_benchmark.register
def jit_simple(state):
a = jax.device_put(1)
b = jax.device_put(2)
f = jax.jit(operator.add)
f(a, b)
while state:
f(a, b).block_until_ready()
@google_benchmark.register
def jit_simple_dispatch_array(state):
a = jax.device_put(1)
b = jax.device_put(2)
f = jax.jit(operator.add)
x = f(a, b)
while state:
x = f(a, b)
x.block_until_ready()
@google_benchmark.register
def jit_simple_array(state):
a = jax.device_put(1)
b = jax.device_put(2)
f = jax.jit(operator.add)
f(a, b)
while state:
f(a, b).block_until_ready()
@google_benchmark.register
def jit_small_matmul(state):
x = np.random.uniform(size=(2, 2)).astype(np.float32)
x = jax.device_put(x)
f = jax.jit(lambda x: jnp.dot(x, x))
f(x).block_until_ready()
while state:
f(x).block_until_ready()
@google_benchmark.register
def jit_big_matmul(state):
x = np.random.uniform(size=(100, 100)).astype(np.float32)
x = jax.device_put(x)
f = jax.jit(lambda x: jnp.dot(x, x))
f(x).block_until_ready()
while state:
f(x).block_until_ready()
@google_benchmark.register
@google_benchmark.option.arg_names(['num_args'])
@google_benchmark.option.args([10])
@google_benchmark.option.args([10])
@google_benchmark.option.args([100])
@google_benchmark.option.args([100])
@google_benchmark.option.args([1000])
@google_benchmark.option.args([1000])
@google_benchmark.option.args([2000])
@google_benchmark.option.args([2000])
def jit_simple_many_args_dispatch(state):
args = [jax.device_put(i) for i in range(state.range(0))]
f = jax.jit(sum)
x = f(args)
x.block_until_ready()
while state:
x = f(args)
x.block_until_ready()
@google_benchmark.register
@google_benchmark.option.arg_names(['num_args'])
@google_benchmark.option.args([10])
@google_benchmark.option.args([10])
@google_benchmark.option.args([100])
@google_benchmark.option.args([100])
@google_benchmark.option.args([1000])
@google_benchmark.option.args([1000])
@google_benchmark.option.args([2000])
@google_benchmark.option.args([2000])
def jit_simple_many_args(state):
args = [jax.device_put(i) for i in range(state.range(0))]
f = jax.jit(sum)
f(args).block_until_ready()
while state:
f(args).block_until_ready()
def jit_simple_pruned_args_dispatch(n, state):
args = [jax.device_put(i) for i in range(n)]
f = jax.jit(lambda *xs: xs[0] + 1)
x = f(*args)
x.block_until_ready()
while state:
x = f(*args)
x.block_until_ready()
def jit_simple_pruned_args(n, state):
args = [jax.device_put(i) for i in range(n)]
f = jax.jit(lambda *xs: xs[0] + 1)
x = f(*args)
x.block_until_ready()
while state:
f(*args).block_until_ready()
benchmarks = []
for n in [10, 100, 1000, 2000]:
benchmarks += [
google_benchmark.register(partial(jit_simple_pruned_args_dispatch, n),
name=f"jit_simple_pruned_args_dispatch_{n}"),
google_benchmark.register(partial(jit_simple_pruned_args, n),
name=f"jit_simple_pruned_args_{n}")
]
@google_benchmark.register
def jit_dispatch_without_transfer(state):
# We pick up a realistic input. 224 is usual for classification and 128 a
# TPU-friendly batch-size.
imgs = np.ones((128, 224, 224), np.float32)
imgs = jax.device_put(imgs)
f = jax.jit(lambda x: x+1)
x = f(imgs)
while state:
x = f(imgs)
x.block_until_ready()
@google_benchmark.register
def jit_dispatch_with_transfer(state):
imgs = np.ones((128, 224, 224), np.float32)
f = jax.jit(lambda x: x+1)
x = f(imgs).block_until_ready()
while state:
x = f(imgs)
x.block_until_ready()
@google_benchmark.register
@required_devices(2)
def pmap_trivial_2_devices(state):
f = jax.pmap(swap)
a, b = f(jnp.array([1, 2]), jnp.array([3, 4]))
while state:
c, d = f(a, b)
c.block_until_ready()
d.block_until_ready()
@google_benchmark.register
@required_devices(8)
def pmap_trivial_dispatch_8_devices(state):
f = jax.pmap(swap)
a, b = f(jnp.array([1, 2, 3, 4, 5, 6, 7, 8]),
jnp.array([2, 3, 4, 5, 6, 7, 8, 9]))
while state:
a, b = f(a, b)
a.block_until_ready()
b.block_until_ready()
@google_benchmark.register
@required_devices(8)
def pmap_trivial_8_devices(state):
f = jax.pmap(swap)
a, b = f(jnp.array([1, 2, 3, 4, 5, 6, 7, 8]),
jnp.array([2, 3, 4, 5, 6, 7, 8, 9]))
while state:
c, d = f(a, b)
c.block_until_ready()
d.block_until_ready()
@google_benchmark.register
@required_devices(2)
def pmap_simple_2_devices(state):
f = jax.pmap(lambda a, b: (a + b, a - b))
a, b = f(jnp.array([1, 2]), jnp.array([3, 4]))
while state:
c, d = f(a, b)
c.block_until_ready()
d.block_until_ready()
@google_benchmark.register
@required_devices(8)
def pmap_simple_dispatch_8_devices(state):
f = jax.pmap(lambda a, b: (a + b, a - b))
a, b = f(jnp.array([1, 2, 3, 4, 5, 6, 7, 8]),
jnp.array([2, 3, 4, 5, 6, 7, 8, 9]))
while state:
a, b = f(a, b)
a.block_until_ready()
b.block_until_ready()
@google_benchmark.register
@required_devices(8)
def pmap_simple_8_devices(state):
f = jax.pmap(lambda a, b: (a + b, a - b))
a, b = f(jnp.array([1, 2, 3, 4, 5, 6, 7, 8]),
jnp.array([2, 3, 4, 5, 6, 7, 8, 9]))
while state:
c, d = f(a, b)
c.block_until_ready()
d.block_until_ready()
@google_benchmark.register
@required_devices(8)
def pmap_simple_dispatch_8_devices_100_args(state):
f = jax.pmap(lambda *args: args[1:] + (args[0] + 1,))
args = []
for i in range(100):
args.append(jnp.array(list(range(i, i+8))))
args = f(*args)
while state:
args = f(*args)
args[0].block_until_ready()
@google_benchmark.register
@required_devices(8)
def pmap_simple_8_devices_100_args(state):
f = jax.pmap(lambda *args: args[1:] + (args[0] + 1,))
args = []
for i in range(100):
args.append(jnp.array(list(range(i, i+8))))
# Warmup loop.
out = f(*args)
while state:
out = f(*args)
jax.tree_util.tree_map(lambda x: x.block_until_ready(), out)
def _run_sda_index_bench(state, num_devices):
x = jax.pmap(jnp.sin)(jnp.arange(num_devices))
jax.device_get(x)
while state:
for i in range(num_devices):
_ = x[i]
x.block_until_ready()
@google_benchmark.register
@required_devices(1)
def sda_index_1(state):
_run_sda_index_bench(state, 1)
@google_benchmark.register
@required_devices(2)
def sda_index_2(state):
_run_sda_index_bench(state, 2)
@google_benchmark.register
@required_devices(8)
def sda_index_8(state):
_run_sda_index_bench(state, 8)
@google_benchmark.register
@google_benchmark.option.unit(google_benchmark.kMillisecond)
def bench_shaped_abstractify(state):
device, *_ = jax.devices()
args = [jax.device_put_replicated(1, [device])] * 1000
while state:
_ = [core.shaped_abstractify(x) for x in args]
def _run_benchmark_for_xla_abstractify(arg, state):
while state:
core.abstractify(arg)
def bench_xla_abstractify():
_abstractify_args = [
(3, 'scalar_int'),
(3.5, 'scalar_float'),
(np.int32(3), 'scalar_numpy_int32'),
(np.uint32(7), 'scalar_numpy_uint32'),
(np.random.randn(3, 4, 5, 6), 'numpy_random'),
(np.arange(100, dtype=np.float32), 'numpy_arange_100_float32'),
(AnEnum.B, 'enum'),
]
benchmarks = []
for a, name in _abstractify_args:
benchmarks.extend([
google_benchmark.register(
partial(_run_benchmark_for_xla_abstractify, a),
name=f'bench_xla_abstractify_{name}'),
])
bench_xla_abstractify()
@google_benchmark.register
@google_benchmark.option.unit(google_benchmark.kMicrosecond)
def bench_are_hlo_shardings_equal(state):
op1 = xc.OpSharding()
op1.type = xc.OpSharding.Type.OTHER
op1.tile_assignment_dimensions = [4, 192, 16]
op1.tile_assignment_devices = list(range(12288))
op2 = xc.OpSharding()
op2.type = xc.OpSharding.Type.OTHER
op2.tile_assignment_dimensions = [4, 192, 16]
op2.tile_assignment_devices = list(range(12288))
hs1 = xc.HloSharding.from_proto(op1)
hs2 = xc.HloSharding.from_proto(op2)
while state:
op_shardings.are_hlo_shardings_equal(hs1, hs2)
@google_benchmark.register
@google_benchmark.option.unit(google_benchmark.kMillisecond)
def bench_pjit_check_aval_sharding(state):
mesh = create_mesh((4, 2), ('x', 'y'), state)
if mesh is None:
return
s = jax.sharding.NamedSharding(mesh, jax.sharding.PartitionSpec('x', 'y'))
aval = jax.core.ShapedArray((8, 2), np.int32)
while state:
pjit_check_aval_sharding([s] * 100, [aval] * 100, [''] * 100, 'benchmark', False)
@google_benchmark.register
def bench_addressable_shards_index(state):
mesh = create_mesh((4, 2), ('x', 'y'), state)
if mesh is None:
return
shape = (8, 2)
inp = np.arange(math.prod(shape)).reshape(shape)
s = jax.sharding.NamedSharding(mesh, jax.sharding.PartitionSpec('x', 'y'))
arr = jax.device_put(inp, s)
while state:
[s.index for s in arr.addressable_shards]
@google_benchmark.register
def bench_addressable_shards_replica_id(state):
mesh = create_mesh((32, 16), ('x', 'y'), state)
if mesh is None:
return
shape = (64, 32)
inp = np.arange(math.prod(shape)).reshape(shape)
s = jax.sharding.NamedSharding(mesh, jax.sharding.PartitionSpec('x', 'y'))
arr = jax.device_put(inp, s)
while state:
[s.replica_id for s in arr.addressable_shards]
@google_benchmark.register
@google_benchmark.option.unit(google_benchmark.kMillisecond)
def bench_remat_eager_retracing_overheads(state):
def double_compose(f):
return lambda x: f(f(x))
f = jnp.sin
for _ in range(6):
f = double_compose(f)
f = double_compose(checkpoint(f))
while state:
y, _ = jax.vjp(f, 3.)
y.block_until_ready()
@google_benchmark.register
@google_benchmark.option.unit(google_benchmark.kMillisecond)
def bench_remat_eager_retracing_overheads_static_argnums(state):
def double_compose(f):
return lambda x, y: f(f(x, y), y)
f = lambda x, _: jnp.sin(x)
for _ in range(6):
f = double_compose(f)
f = double_compose(checkpoint(f, static_argnums=(1,)))
while state:
y, _ = jax.vjp(f, 3., True)
y.block_until_ready()
@google_benchmark.register
@google_benchmark.option.unit(google_benchmark.kMillisecond)
def bench_slicing_compilation(state):
x = jnp.arange(3)
while state:
jax.jit(lambda x: (x[0], x[1], x[2])).lower(x).compile()
@google_benchmark.register
@google_benchmark.option.unit(google_benchmark.kMillisecond)
def bench_slicing_compilation2(state):
x = jnp.arange(3)
while state:
jax.jit(lambda x: (x[:1], x[1:2], x[2:3])).lower(x).compile()
@google_benchmark.register
@google_benchmark.option.unit(google_benchmark.kMillisecond)
def bench_repeated_static_indexing(state):
x = jnp.arange(500)
while state:
jax.block_until_ready([x[i] for i in range(500)])
@google_benchmark.register
@google_benchmark.option.unit(google_benchmark.kMillisecond)
def bench_repeated_static_slicing(state):
x = jnp.arange(1000)
while state:
jax.block_until_ready([x[i:i + 2] for i in range(0, 1000, 2)])
def pjit_simple_benchmark(state, num_devices, num_args, use_aot=False):
spec = jax.sharding.PartitionSpec('x')
mesh = create_mesh((num_devices,), ('x',), state)
if mesh is None:
return
s = jax.sharding.NamedSharding(mesh, spec)
inp_data = np.arange(num_devices).astype(np.float32)
x = array.make_array_from_callback(inp_data.shape, s, lambda idx: inp_data[idx])
x = [x for _ in range(num_args)]
in_axis_resources = jax.sharding.NamedSharding(mesh, spec)
out_axis_resources = jax.sharding.NamedSharding(mesh, spec)
f = pjit_lib.pjit(
lambda x: jax.tree.map(lambda x: x + 1, x),
in_shardings=in_axis_resources,
out_shardings=out_axis_resources,
)
if use_aot:
f = f.lower(x).compile()
x = f(x)
while state:
x = f(x)
x[0].block_until_ready()
@google_benchmark.register
@google_benchmark.option.arg_names(['num_args'])
@google_benchmark.option.args([1])
@google_benchmark.option.args([10])
@google_benchmark.option.args([100])
def pjit_simple_1_device(state):
pjit_simple_benchmark(state, num_devices=1, num_args=state.range(0))
@google_benchmark.register
@google_benchmark.option.arg_names(['num_args'])
@google_benchmark.option.args([1])
@google_benchmark.option.args([10])
@google_benchmark.option.args([100])
def pjit_simple_4_device(state):
pjit_simple_benchmark(state, num_devices=4, num_args=state.range(0))
@google_benchmark.register
@google_benchmark.option.arg_names(['num_args'])
@google_benchmark.option.args([1])
@google_benchmark.option.args([10])
@google_benchmark.option.args([100])
def pjit_simple_4000_device(state):
pjit_simple_benchmark(state, num_devices=4000, num_args=state.range(0))
@google_benchmark.register
@google_benchmark.option.arg_names(['num_args'])
@google_benchmark.option.args([1])
@google_benchmark.option.args([10])
@google_benchmark.option.args([100])
def pjit_aot_1_device(state):
pjit_simple_benchmark(
state,
num_devices=1,
num_args=state.range(0),
use_aot=True)
@google_benchmark.register
@google_benchmark.option.arg_names(['num_args'])
@google_benchmark.option.args([1])
@google_benchmark.option.args([10])
@google_benchmark.option.args([100])
def pjit_aot_4_device(state):
pjit_simple_benchmark(
state,
num_devices=4,
num_args=state.range(0),
use_aot=True)
@google_benchmark.register
@google_benchmark.option.arg_names(['num_args'])
@google_benchmark.option.args([1])
@google_benchmark.option.args([10])
@google_benchmark.option.args([100])
def pjit_aot_4000_device(state):
pjit_simple_benchmark(
state,
num_devices=4000,
num_args=state.range(0),
use_aot=True)
@google_benchmark.register
@google_benchmark.option.unit(google_benchmark.kMillisecond)
def host_local_array_to_global_array(state):
global_mesh = create_mesh((4, 2), ('x', 'y'), state)
input_shape = (8, 2)
input_data = np.arange(math.prod(input_shape)).reshape(input_shape)
in_pspec = jax.sharding.PartitionSpec('x', 'y')
while state:
multihost_utils.host_local_array_to_global_array(
(input_data, input_data), global_mesh, (in_pspec, in_pspec))
@google_benchmark.register
@google_benchmark.option.arg_names(['num_args'])
@google_benchmark.option.args([1])
@google_benchmark.option.args([10])
@google_benchmark.option.args([100])
@google_benchmark.option.args([1000])
def device_put_from_numpy_array(state):
x = [np.array(1, np.int32)] * state.range(0)
while state:
_ = jax.block_until_ready(jax.device_put(x))
@google_benchmark.register
@google_benchmark.option.arg_names(['num_args'])
@google_benchmark.option.args([1])
@google_benchmark.option.args([10])
@google_benchmark.option.args([100])
@google_benchmark.option.args([1000])
@required_devices(2)
def device_put_from_jax_array(state):
x = [np.array(1, np.int32)] * state.range(0)
x = jax.block_until_ready(jax.device_put(x, device=jax.devices()[0]))
d = jax.devices()[1]
while state:
_ = jax.block_until_ready(jax.device_put(x, device=d))
@google_benchmark.register
@google_benchmark.option.unit(google_benchmark.kMillisecond)
def device_put_big(state):
x = np.arange(4000 * 10**6 // np.dtype('float32').itemsize, dtype=np.float32)
jax.device_put(x).block_until_ready()
while state:
_ = jax.device_put(x).block_until_ready()
@google_benchmark.register
def device_put_sharded(state):
arr_inp = [np.array(i) for i in range(jax.device_count())]
dev = jax.devices()
while state:
_ = jax.device_put_sharded(arr_inp, dev).block_until_ready()
@google_benchmark.register
@required_devices(8)
def device_get_8_devices(state):
mesh = jax.sharding.Mesh(
np.array(jax.devices()[:8]).reshape((4, 2)), ('x', 'y')
)
sharding = jax.sharding.NamedSharding(
mesh, jax.sharding.PartitionSpec('x', 'y')
)
inp = jax.device_put(np.zeros((8, 4), dtype=np.float32), sharding)
@jax.jit
def fn(x):
y = x + x
return [y for _ in range(50)]
jax.device_get(fn(inp))
while state:
jax.device_get(fn(inp))
@google_benchmark.register
@required_devices(8)
def np_asarray_8_devices(state):
mesh = jax.sharding.Mesh(
np.array(jax.devices()[:8]).reshape((4, 2)), ('x', 'y')
)
sharding = jax.sharding.NamedSharding(
mesh, jax.sharding.PartitionSpec('x', 'y')
)
inp = jax.device_put(np.zeros((8, 4), dtype=np.float32), sharding)
@jax.jit
def fn(x):
y = x + x
return [y for _ in range(50)]
jax.device_get(fn(inp))
while state:
[np.asarray(x) for x in fn(inp)]
@google_benchmark.register
@required_devices(8)
def jax_array_arrays_8_devices(state):
mesh = jax.sharding.Mesh(
np.array(jax.devices()[:8]).reshape((4, 2)), ('x', 'y')
)
sharding = jax.sharding.NamedSharding(
mesh, jax.sharding.PartitionSpec('x', 'y')
)
inp = jax.device_put(np.zeros((8, 4), dtype=np.float32), sharding)
@jax.jit
def fn(x):
y = x + x
return [y for _ in range(200)]
jax.device_get(fn(inp))
while state:
[x._arrays for x in fn(inp)]
def batch_inplace_while(inplace_op, state):
@jax.jit
@jax.vmap
def f(init_step, init_xs):
def cond(carry):
step, xs = carry
return step < xs.size
def body(carry):
step, xs = carry
if inplace_op == 'scatter':
xs = xs.at[step].set(1)
elif inplace_op == 'dynamic_update_slice':
xs = lax.dynamic_update_index_in_dim(xs, 1., step, 0)
else:
assert False
return step + 1, xs
return lax.while_loop(cond, body, (init_step, init_xs))
size = 100_000
args = jnp.array([0]), jnp.zeros((1, size))
jax.block_until_ready(f(*args)) # compile
while state:
jax.block_until_ready(f(*args))
google_benchmark.register(
partial(batch_inplace_while, 'scatter'), name='batch_inplace_while_scatter')
google_benchmark.register(
partial(batch_inplace_while, 'dynamic_update_slice'),
name='batch_inplace_while_dynamic_update_slice')
@google_benchmark.register
def serial_dot_products(state):
SIZE = 50
@jax.jit
@jax.vmap
@jax.grad
def f(x):
out = 0
for i in range(SIZE):
y = x @ jnp.array([i, i + 1], dtype=jnp.float32)
out = out + y * x[0]
return out
x = jax.random.normal(jax.random.key(0), (2, 2))
f(x).block_until_ready() # compile
while state:
f(x).block_until_ready()
@google_benchmark.register
@google_benchmark.option.arg_names(['arg_lengths', 'num_args'])
@google_benchmark.option.args_product([[0, 1, 2, 5, 10, 100], [1, 2, 3]])
def safe_map(state):
args = tuple(list(range(state.range(0))) for _ in range(state.range(1)))
def f(*args): return tuple(args)
while state:
jax._src.util.safe_map(f, *args)
@google_benchmark.register
@google_benchmark.option.arg_names(['arg_lengths', 'num_args'])
@google_benchmark.option.args_product([[0, 1, 2, 5, 10, 100], [1, 2, 3]])
def safe_zip(state):
args = tuple(list(range(state.range(0))) for _ in range(state.range(1)))
while state:
jax._src.util.safe_zip(*args)
@google_benchmark.register
def bench_make_array_from_callback_fully_replicated_sharding(state):
mesh = create_mesh((4, 2), ('x', 'y'), state)
if mesh is None:
return
input_shape = (8, 2)
np_arr = np.arange(math.prod(input_shape)).reshape(input_shape)
s = jax.sharding.NamedSharding(mesh, jax.sharding.PartitionSpec())
while state:
jax.make_array_from_callback(input_shape, s, np_arr.__getitem__)
@google_benchmark.register
@google_benchmark.option.unit(google_benchmark.kMillisecond)
def bench_make_array_from_callback_partially_replicated_sharding(state):
mesh = create_mesh((4, 2), ('x', 'y'), state)
if mesh is None:
return
input_shape = (8, 2)
np_arr = np.arange(math.prod(input_shape)).reshape(input_shape)
s = jax.NamedSharding(mesh, jax.sharding.PartitionSpec(None, 'y'))
while state:
jax.make_array_from_callback(input_shape, s, np_arr.__getitem__)
@google_benchmark.register
@google_benchmark.option.unit(google_benchmark.kMillisecond)
def bench_make_array_from_callback_fully_sharded_sharding(state):
mesh = create_mesh((4, 2), ('x', 'y'), state)
if mesh is None:
return
input_shape = (8, 2)
np_arr = np.arange(math.prod(input_shape)).reshape(input_shape)
s = jax.NamedSharding(mesh, jax.sharding.PartitionSpec('x', 'y'))
while state:
jax.make_array_from_callback(input_shape, s, np_arr.__getitem__)
@google_benchmark.register
@google_benchmark.option.unit(google_benchmark.kMillisecond)
def benchmark_lorentz63_cache_hits(state):
@jax.jit
def lorentz63(state, dt=0.01, sigma=10, beta=8/3, rho=28):
x, y, z = state
x_t = sigma * (y - x)
y_t = (rho - z) * x - y
z_t = x * y - beta * z
return jnp.array([x + x_t * dt, y + y_t * dt, z + z_t * dt])
def training_step(initial_conditions, steps=1, unroll=False):
def forward_sim(x0):
if unroll:
x = x0
for _ in range(steps):
x = lorentz63(x)
return x
else:
return jax.lax.fori_loop(0, steps, lambda _, x: lorentz63(x), x0)
def loss(x0):
out = jax.vmap(jax.remat(forward_sim))(x0)
return jnp.square(out).sum()
return jax.value_and_grad(loss)(initial_conditions)
x = jnp.ones((8, 3))
while state:
jax.make_jaxpr(lambda x: training_step(x, 100, unroll=True))(x)
@google_benchmark.register
def jit_add_chain(state):
SIZE = 100
@jax.jit
def g(x, y):
return lax.add(x, y)
x = jax.random.normal(jax.random.key(0), (2, 2))
while state:
@jax.jit
def f(x):
for i in range(SIZE):
x = g(x, x)
return x
f(x).block_until_ready()
if __name__ == "__main__":
google_benchmark.main()
| AnEnum |
python | coleifer__peewee | tests/schema.py | {
"start": 31659,
"end": 31759
} | class ____(TestModel):
key = CharField()
value = IntegerField()
extra = IntegerField()
| TMKV |
python | plotly__plotly.py | plotly/graph_objs/mesh3d/colorbar/_tickfont.py | {
"start": 233,
"end": 9913
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "mesh3d.colorbar"
_path_str = "mesh3d.colorbar.tickfont"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Tickfont object
Sets the color bar's tick label font
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.mesh3d.colorbar.Tickfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Tickfont
"""
super().__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.mesh3d.colorbar.Tickfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.mesh3d.colorbar.Tickfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Tickfont |
python | sqlalchemy__sqlalchemy | test/base/test_events.py | {
"start": 35063,
"end": 48985
} | class ____(TearDownLocalEventsFixture, fixtures.TestBase):
def _fixture(self):
class TargetEvents(event.Events):
def event_one(self, x, y):
pass
def event_two(self, x):
pass
def event_three(self, x):
pass
class Target:
dispatch = event.dispatcher(TargetEvents)
return Target
def _wrapped_fixture(self):
class TargetEvents(event.Events):
@classmethod
def _listen(cls, event_key):
fn = event_key._listen_fn
def adapt(value):
fn("adapted " + value)
event_key = event_key.with_wrapper(adapt)
event_key.base_listen()
def event_one(self, x):
pass
class Target:
dispatch = event.dispatcher(TargetEvents)
return Target
def test_two_subclasses_one_event(self):
"""test #12216"""
Target = self._fixture()
class TargetSubclassOne(Target):
pass
class TargetSubclassTwo(Target):
pass
m1 = Mock()
def my_event_one(x, y):
m1.my_event_one(x, y)
event.listen(TargetSubclassOne, "event_one", my_event_one)
event.listen(TargetSubclassTwo, "event_one", my_event_one)
t1 = TargetSubclassOne()
t2 = TargetSubclassTwo()
t1.dispatch.event_one("x1a", "y1a")
t2.dispatch.event_one("x2a", "y2a")
eq_(
m1.mock_calls,
[call.my_event_one("x1a", "y1a"), call.my_event_one("x2a", "y2a")],
)
event.remove(TargetSubclassOne, "event_one", my_event_one)
t1.dispatch.event_one("x1b", "y1b")
t2.dispatch.event_one("x2b", "y2b")
eq_(
m1.mock_calls,
[
call.my_event_one("x1a", "y1a"),
call.my_event_one("x2a", "y2a"),
call.my_event_one("x2b", "y2b"),
],
)
event.remove(TargetSubclassTwo, "event_one", my_event_one)
t1.dispatch.event_one("x1c", "y1c")
t2.dispatch.event_one("x2c", "y2c")
eq_(
m1.mock_calls,
[
call.my_event_one("x1a", "y1a"),
call.my_event_one("x2a", "y2a"),
call.my_event_one("x2b", "y2b"),
],
)
def test_two_subclasses_one_event_reg_cleanup(self):
"""test #12216"""
from sqlalchemy.event import registry
Target = self._fixture()
class TargetSubclassOne(Target):
pass
class TargetSubclassTwo(Target):
pass
m1 = Mock()
def my_event_one(x, y):
m1.my_event_one(x, y)
event.listen(TargetSubclassOne, "event_one", my_event_one)
event.listen(TargetSubclassTwo, "event_one", my_event_one)
key1 = (id(TargetSubclassOne), "event_one", id(my_event_one))
key2 = (id(TargetSubclassTwo), "event_one", id(my_event_one))
assert key1 in registry._key_to_collection
assert key2 in registry._key_to_collection
del TargetSubclassOne
gc_collect()
# the key remains because the gc routine would be based on deleting
# Target (I think)
assert key1 in registry._key_to_collection
assert key2 in registry._key_to_collection
del TargetSubclassTwo
gc_collect()
assert key1 in registry._key_to_collection
assert key2 in registry._key_to_collection
# event.remove(TargetSubclassTwo, "event_one", my_event_one)
def test_clslevel(self):
Target = self._fixture()
m1 = Mock()
event.listen(Target, "event_two", m1)
t1 = Target()
t1.dispatch.event_two("x")
event.remove(Target, "event_two", m1)
t1.dispatch.event_two("y")
eq_(m1.mock_calls, [call("x")])
def test_clslevel_subclass(self):
Target = self._fixture()
class SubTarget(Target):
pass
m1 = Mock()
event.listen(Target, "event_two", m1)
t1 = SubTarget()
t1.dispatch.event_two("x")
event.remove(Target, "event_two", m1)
t1.dispatch.event_two("y")
eq_(m1.mock_calls, [call("x")])
def test_instance(self):
Target = self._fixture()
class Foo:
def __init__(self):
self.mock = Mock()
def evt(self, arg):
self.mock(arg)
f1 = Foo()
f2 = Foo()
event.listen(Target, "event_one", f1.evt)
event.listen(Target, "event_one", f2.evt)
t1 = Target()
t1.dispatch.event_one("x")
event.remove(Target, "event_one", f1.evt)
t1.dispatch.event_one("y")
eq_(f1.mock.mock_calls, [call("x")])
eq_(f2.mock.mock_calls, [call("x"), call("y")])
def test_once(self):
Target = self._fixture()
m1 = Mock()
m2 = Mock()
m3 = Mock()
m4 = Mock()
event.listen(Target, "event_one", m1)
event.listen(Target, "event_one", m2, once=True)
event.listen(Target, "event_one", m3, once=True)
t1 = Target()
t1.dispatch.event_one("x")
t1.dispatch.event_one("y")
event.listen(Target, "event_one", m4, once=True)
t1.dispatch.event_one("z")
t1.dispatch.event_one("q")
eq_(m1.mock_calls, [call("x"), call("y"), call("z"), call("q")])
eq_(m2.mock_calls, [call("x")])
eq_(m3.mock_calls, [call("x")])
eq_(m4.mock_calls, [call("z")])
def test_once_unless_exception(self):
Target = self._fixture()
m1 = Mock()
m2 = Mock()
m3 = Mock()
m4 = Mock()
m1.side_effect = ValueError
m2.side_effect = ValueError
m3.side_effect = ValueError
event.listen(Target, "event_one", m1)
event.listen(Target, "event_one", m2, _once_unless_exception=True)
event.listen(Target, "event_one", m3, _once_unless_exception=True)
t1 = Target()
# only m1 is called, raises
assert_raises(ValueError, t1.dispatch.event_one, "x")
# now m1 and m2 can be called but not m3
m1.side_effect = None
assert_raises(ValueError, t1.dispatch.event_one, "y")
# now m3 can be called
m2.side_effect = None
event.listen(Target, "event_one", m4, _once_unless_exception=True)
assert_raises(ValueError, t1.dispatch.event_one, "z")
assert_raises(ValueError, t1.dispatch.event_one, "q")
eq_(m1.mock_calls, [call("x"), call("y"), call("z"), call("q")])
eq_(m2.mock_calls, [call("y"), call("z")])
eq_(m3.mock_calls, [call("z"), call("q")])
eq_(m4.mock_calls, []) # m4 never got called because m3 blocked it
# now m4 can be called
m3.side_effect = None
t1.dispatch.event_one("p")
eq_(
m1.mock_calls,
[call("x"), call("y"), call("z"), call("q"), call("p")],
)
# m2 already got called, so no "p"
eq_(m2.mock_calls, [call("y"), call("z")])
eq_(m3.mock_calls, [call("z"), call("q"), call("p")])
eq_(m4.mock_calls, [call("p")])
t1.dispatch.event_one("j")
eq_(
m1.mock_calls,
[call("x"), call("y"), call("z"), call("q"), call("p"), call("j")],
)
# nobody got "j" because they've all been successful
eq_(m2.mock_calls, [call("y"), call("z")])
eq_(m3.mock_calls, [call("z"), call("q"), call("p")])
eq_(m4.mock_calls, [call("p")])
def test_once_doesnt_dereference_listener(self):
# test for [ticket:4794]
Target = self._fixture()
canary = Mock()
def go(target, given_id):
def anonymous(run_id):
canary(run_id, given_id)
event.listen(target, "event_one", anonymous, once=True)
t1 = Target()
assert_calls = []
given_ids = []
for given_id in range(100):
given_ids.append(given_id)
go(t1, given_id)
if given_id % 10 == 0:
t1.dispatch.event_one(given_id)
assert_calls.extend(call(given_id, i) for i in given_ids)
given_ids[:] = []
eq_(canary.mock_calls, assert_calls)
def test_propagate(self):
Target = self._fixture()
m1 = Mock()
t1 = Target()
t2 = Target()
event.listen(t1, "event_one", m1, propagate=True)
event.listen(t1, "event_two", m1, propagate=False)
t2.dispatch._update(t1.dispatch)
t1.dispatch.event_one("t1e1x")
t1.dispatch.event_two("t1e2x")
t2.dispatch.event_one("t2e1x")
t2.dispatch.event_two("t2e2x")
event.remove(t1, "event_one", m1)
event.remove(t1, "event_two", m1)
t1.dispatch.event_one("t1e1y")
t1.dispatch.event_two("t1e2y")
t2.dispatch.event_one("t2e1y")
t2.dispatch.event_two("t2e2y")
eq_(m1.mock_calls, [call("t1e1x"), call("t1e2x"), call("t2e1x")])
@testing.requires.predictable_gc
def test_listener_collection_removed_cleanup(self):
from sqlalchemy.event import registry
Target = self._fixture()
m1 = Mock()
t1 = Target()
event.listen(t1, "event_one", m1)
key = (id(t1), "event_one", id(m1))
assert key in registry._key_to_collection
collection_ref = list(registry._key_to_collection[key])[0]
assert collection_ref in registry._collection_to_key
t1.dispatch.event_one("t1")
del t1
gc_collect()
assert key not in registry._key_to_collection
assert collection_ref not in registry._collection_to_key
@testing.requires.predictable_gc
def test_listener_collection_removed_cleanup_clslevel(self):
"""test related to #12216"""
from sqlalchemy.event import registry
Target = self._fixture()
m1 = Mock()
event.listen(Target, "event_one", m1)
key = (id(Target), "event_one", id(m1))
assert key in registry._key_to_collection
collection_ref = list(registry._key_to_collection[key])[0]
assert collection_ref in registry._collection_to_key
t1 = Target()
t1.dispatch.event_one("t1")
del t1
del Target
gc_collect()
# gc of a target class does not currently cause these collections
# to be cleaned up
assert key in registry._key_to_collection
assert collection_ref in registry._collection_to_key
def test_remove_not_listened(self):
Target = self._fixture()
m1 = Mock()
t1 = Target()
event.listen(t1, "event_one", m1, propagate=True)
event.listen(t1, "event_three", m1)
event.remove(t1, "event_one", m1)
assert_raises_message(
exc.InvalidRequestError,
r"No listeners found for event <.*Target.*> / "
r"'event_two' / <Mock.*> ",
event.remove,
t1,
"event_two",
m1,
)
event.remove(t1, "event_three", m1)
def test_no_remove_in_event(self):
Target = self._fixture()
t1 = Target()
def evt():
event.remove(t1, "event_one", evt)
event.listen(t1, "event_one", evt)
assert_raises_message(
Exception, "deque mutated during iteration", t1.dispatch.event_one
)
def test_no_add_in_event(self):
Target = self._fixture()
t1 = Target()
m1 = Mock()
def evt():
event.listen(t1, "event_one", m1)
event.listen(t1, "event_one", evt)
assert_raises_message(
Exception, "deque mutated during iteration", t1.dispatch.event_one
)
def test_remove_plain_named(self):
Target = self._fixture()
listen_one = Mock()
t1 = Target()
event.listen(t1, "event_one", listen_one, named=True)
t1.dispatch.event_one("t1")
eq_(listen_one.mock_calls, [call(x="t1")])
event.remove(t1, "event_one", listen_one)
t1.dispatch.event_one("t2")
eq_(listen_one.mock_calls, [call(x="t1")])
def test_remove_wrapped_named(self):
Target = self._wrapped_fixture()
listen_one = Mock()
t1 = Target()
event.listen(t1, "event_one", listen_one, named=True)
t1.dispatch.event_one("t1")
eq_(listen_one.mock_calls, [call(x="adapted t1")])
event.remove(t1, "event_one", listen_one)
t1.dispatch.event_one("t2")
eq_(listen_one.mock_calls, [call(x="adapted t1")])
def test_double_event_nonwrapped(self):
Target = self._fixture()
listen_one = Mock()
t1 = Target()
event.listen(t1, "event_one", listen_one)
event.listen(t1, "event_one", listen_one)
t1.dispatch.event_one("t1")
# doubles are eliminated
eq_(listen_one.mock_calls, [call("t1")])
# only one remove needed
event.remove(t1, "event_one", listen_one)
t1.dispatch.event_one("t2")
eq_(listen_one.mock_calls, [call("t1")])
def test_double_event_wrapped(self):
# this is issue #3199
Target = self._wrapped_fixture()
listen_one = Mock()
t1 = Target()
event.listen(t1, "event_one", listen_one)
event.listen(t1, "event_one", listen_one)
t1.dispatch.event_one("t1")
# doubles are eliminated
eq_(listen_one.mock_calls, [call("adapted t1")])
# only one remove needed
event.remove(t1, "event_one", listen_one)
t1.dispatch.event_one("t2")
eq_(listen_one.mock_calls, [call("adapted t1")])
| RemovalTest |
python | jupyterlab__jupyterlab | jupyterlab/utils.py | {
"start": 303,
"end": 2345
} | class ____: # noqa
"""Decorator to mark deprecated functions with warning.
Adapted from `scikit-image/skimage/_shared/utils.py`.
Parameters
----------
alt_func : str
If given, tell user what function to use instead.
behavior : {'warn', 'raise'}
Behavior during call to deprecated function: 'warn' = warn user that
function is deprecated; 'raise' = raise error.
removed_version : str
The package version in which the deprecated function will be removed.
"""
def __init__(self, alt_func=None, behavior="warn", removed_version=None):
self.alt_func = alt_func
self.behavior = behavior
self.removed_version = removed_version
def __call__(self, func):
alt_msg = ""
if self.alt_func is not None:
alt_msg = f" Use ``{self.alt_func}`` instead."
rmv_msg = ""
if self.removed_version is not None:
rmv_msg = f" and will be removed in version {self.removed_version}"
function_description = func.__name__ + rmv_msg + "." + alt_msg
msg = f"Function ``{function_description}`` is deprecated"
@functools.wraps(func)
def wrapped(*args, **kwargs):
if self.behavior == "warn":
func_code = func.__code__
warnings.simplefilter("always", jupyterlab_deprecation)
warnings.warn_explicit(
msg,
category=jupyterlab_deprecation,
filename=func_code.co_filename,
lineno=func_code.co_firstlineno + 1,
)
elif self.behavior == "raise":
raise jupyterlab_deprecation(msg)
return func(*args, **kwargs)
# modify doc string to display deprecation warning
doc = "**Deprecated function**." + alt_msg
if wrapped.__doc__ is None:
wrapped.__doc__ = doc
else:
wrapped.__doc__ = doc + "\n\n " + wrapped.__doc__
return wrapped
| deprecated |
python | openai__openai-python | src/openai/resources/realtime/realtime.py | {
"start": 32694,
"end": 33456
} | class ____(BaseRealtimeConnectionResource):
def clear(self, *, event_id: str | Omit = omit) -> None:
"""**WebRTC Only:** Emit to cut off the current audio response.
This will trigger the server to
stop generating audio and emit a `output_audio_buffer.cleared` event. This
event should be preceded by a `response.cancel` client event to stop the
generation of the current response.
[Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc).
"""
self._connection.send(
cast(RealtimeClientEventParam, strip_not_given({"type": "output_audio_buffer.clear", "event_id": event_id}))
)
| RealtimeOutputAudioBufferResource |
python | Lightning-AI__lightning | src/lightning/pytorch/core/datamodule.py | {
"start": 1359,
"end": 14554
} | class ____(DataHooks, HyperparametersMixin):
"""A DataModule standardizes the training, val, test splits, data preparation and transforms. The main advantage is
consistent data splits, data preparation and transforms across models.
Example::
import lightning as L
import torch.utils.data as data
from lightning.pytorch.demos.boring_classes import RandomDataset
class MyDataModule(L.LightningDataModule):
def prepare_data(self):
# download, IO, etc. Useful with shared filesystems
# only called on 1 GPU/TPU in distributed
...
def setup(self, stage):
# make assignments here (val/train/test split)
# called on every process in DDP
dataset = RandomDataset(1, 100)
self.train, self.val, self.test = data.random_split(
dataset, [80, 10, 10], generator=torch.Generator().manual_seed(42)
)
def train_dataloader(self):
return data.DataLoader(self.train)
def val_dataloader(self):
return data.DataLoader(self.val)
def test_dataloader(self):
return data.DataLoader(self.test)
def on_exception(self, exception):
# clean up state after the trainer faced an exception
...
def teardown(self):
# clean up state after the trainer stops, delete files...
# called on every process in DDP
...
"""
name: Optional[str] = None
CHECKPOINT_HYPER_PARAMS_KEY = "datamodule_hyper_parameters"
CHECKPOINT_HYPER_PARAMS_NAME = "datamodule_hparams_name"
CHECKPOINT_HYPER_PARAMS_TYPE = "datamodule_hparams_type"
def __init__(self) -> None:
super().__init__()
# Pointer to the trainer object
self.trainer: Optional[pl.Trainer] = None
@classmethod
def from_datasets(
cls,
train_dataset: Optional[Union[Dataset, Iterable[Dataset]]] = None,
val_dataset: Optional[Union[Dataset, Iterable[Dataset]]] = None,
test_dataset: Optional[Union[Dataset, Iterable[Dataset]]] = None,
predict_dataset: Optional[Union[Dataset, Iterable[Dataset]]] = None,
batch_size: int = 1,
num_workers: int = 0,
**datamodule_kwargs: Any,
) -> "LightningDataModule":
r"""Create an instance from torch.utils.data.Dataset.
Args:
train_dataset: Optional dataset or iterable of datasets to be used for train_dataloader()
val_dataset: Optional dataset or iterable of datasets to be used for val_dataloader()
test_dataset: Optional dataset or iterable of datasets to be used for test_dataloader()
predict_dataset: Optional dataset or iterable of datasets to be used for predict_dataloader()
batch_size: Batch size to use for each dataloader. Default is 1. This parameter gets forwarded to the
``__init__`` if the datamodule has such a name defined in its signature.
num_workers: Number of subprocesses to use for data loading. 0 means that the
data will be loaded in the main process. Number of CPUs available. This parameter gets forwarded to the
``__init__`` if the datamodule has such a name defined in its signature.
**datamodule_kwargs: Additional parameters that get passed down to the datamodule's ``__init__``.
"""
def dataloader(ds: Dataset, shuffle: bool = False) -> DataLoader:
shuffle &= not isinstance(ds, IterableDataset)
return DataLoader(ds, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers, pin_memory=True)
def train_dataloader() -> TRAIN_DATALOADERS:
return apply_to_collection(train_dataset, Dataset, dataloader, shuffle=True)
def val_dataloader() -> EVAL_DATALOADERS:
return apply_to_collection(val_dataset, Dataset, dataloader)
def test_dataloader() -> EVAL_DATALOADERS:
return apply_to_collection(test_dataset, Dataset, dataloader)
def predict_dataloader() -> EVAL_DATALOADERS:
return apply_to_collection(predict_dataset, Dataset, dataloader)
candidate_kwargs = {"batch_size": batch_size, "num_workers": num_workers}
accepted_params = inspect.signature(cls.__init__).parameters
accepts_kwargs = any(param.kind == param.VAR_KEYWORD for param in accepted_params.values())
if accepts_kwargs:
special_kwargs = candidate_kwargs
else:
accepted_param_names = set(accepted_params)
accepted_param_names.discard("self")
special_kwargs = {k: v for k, v in candidate_kwargs.items() if k in accepted_param_names}
datamodule = cls(**datamodule_kwargs, **special_kwargs)
if train_dataset is not None:
datamodule.train_dataloader = train_dataloader # type: ignore[method-assign]
if val_dataset is not None:
datamodule.val_dataloader = val_dataloader # type: ignore[method-assign]
if test_dataset is not None:
datamodule.test_dataloader = test_dataloader # type: ignore[method-assign]
if predict_dataset is not None:
datamodule.predict_dataloader = predict_dataloader # type: ignore[method-assign]
return datamodule
def state_dict(self) -> dict[str, Any]:
"""Called when saving a checkpoint, implement to generate and save datamodule state.
Returns:
A dictionary containing datamodule state.
"""
return {}
def load_state_dict(self, state_dict: dict[str, Any]) -> None:
"""Called when loading a checkpoint, implement to reload datamodule state given datamodule state_dict.
Args:
state_dict: the datamodule state returned by ``state_dict``.
"""
pass
def on_exception(self, exception: BaseException) -> None:
"""Called when the trainer execution is interrupted by an exception."""
pass
@_restricted_classmethod
def load_from_checkpoint(
cls,
checkpoint_path: Union[_PATH, IO],
map_location: _MAP_LOCATION_TYPE = None,
hparams_file: Optional[_PATH] = None,
weights_only: Optional[bool] = None,
**kwargs: Any,
) -> Self:
r"""Primary way of loading a datamodule from a checkpoint. When Lightning saves a checkpoint it stores the
arguments passed to ``__init__`` in the checkpoint under ``"datamodule_hyper_parameters"``.
Any arguments specified through \*\*kwargs will override args stored in ``"datamodule_hyper_parameters"``.
Args:
checkpoint_path: Path to checkpoint. This can also be a URL, or file-like object
map_location:
If your checkpoint saved a GPU model and you now load on CPUs
or a different number of GPUs, use this to map to the new setup.
The behaviour is the same as in :func:`torch.load`.
hparams_file: Optional path to a ``.yaml`` or ``.csv`` file with hierarchical structure
as in this example::
dataloader:
batch_size: 32
You most likely won't need this since Lightning will always save the hyperparameters
to the checkpoint.
However, if your checkpoint weights don't have the hyperparameters saved,
use this method to pass in a ``.yaml`` file with the hparams you'd like to use.
These will be converted into a :class:`~dict` and passed into your
:class:`LightningDataModule` for use.
If your datamodule's ``hparams`` argument is :class:`~argparse.Namespace`
and ``.yaml`` file has hierarchical structure, you need to refactor your datamodule to treat
``hparams`` as :class:`~dict`.
weights_only: If ``True``, restricts loading to ``state_dicts`` of plain ``torch.Tensor`` and other
primitive types. If loading a checkpoint from a trusted source that contains an ``nn.Module``, use
``weights_only=False``. If loading checkpoint from an untrusted source, we recommend using
``weights_only=True``. For more information, please refer to the
`PyTorch Developer Notes on Serialization Semantics <https://docs.pytorch.org/docs/main/notes/serialization.html#id3>`_.
\**kwargs: Any extra keyword args needed to init the datamodule. Can also be used to override saved
hyperparameter values.
Return:
:class:`LightningDataModule` instance with loaded weights and hyperparameters (if available).
Note:
``load_from_checkpoint`` is a **class** method. You must use your :class:`LightningDataModule`
**class** to call it instead of the :class:`LightningDataModule` instance, or a
``TypeError`` will be raised.
Example::
# load weights without mapping ...
datamodule = MyLightningDataModule.load_from_checkpoint('path/to/checkpoint.ckpt')
# or load weights and hyperparameters from separate files.
datamodule = MyLightningDataModule.load_from_checkpoint(
'path/to/checkpoint.ckpt',
hparams_file='/path/to/hparams_file.yaml'
)
# override some of the params with new values
datamodule = MyLightningDataModule.load_from_checkpoint(
PATH,
batch_size=32,
num_workers=10,
)
"""
loaded = _load_from_checkpoint(
cls,
checkpoint_path,
map_location=map_location,
hparams_file=hparams_file,
strict=None,
weights_only=weights_only,
**kwargs,
)
return cast(Self, loaded)
def __str__(self) -> str:
"""Return a string representation of the datasets that are set up.
Returns:
A string representation of the datasets that are setup.
"""
class dataset_info:
def __init__(self, available: bool, length: str) -> None:
self.available = available
self.length = length
def retrieve_dataset_info(loader: DataLoader) -> dataset_info:
"""Helper function to compute dataset information."""
dataset = loader.dataset
size: str = str(len(dataset)) if isinstance(dataset, Sized) else "NA"
return dataset_info(True, size)
def loader_info(
loader: Union[DataLoader, Iterable[DataLoader]],
) -> Union[dataset_info, Iterable[dataset_info]]:
"""Helper function to compute dataset information."""
return apply_to_collection(loader, DataLoader, retrieve_dataset_info)
def extract_loader_info(methods: list[tuple[str, str]]) -> dict:
"""Helper function to extract information for each dataloader method."""
info: dict[str, Union[dataset_info, Iterable[dataset_info]]] = {}
for loader_name, func_name in methods:
loader_method = getattr(self, func_name, None)
try:
loader = loader_method() # type: ignore
info[loader_name] = loader_info(loader)
except Exception:
info[loader_name] = dataset_info(False, "")
return info
def format_loader_info(info: dict[str, Union[dataset_info, Iterable[dataset_info]]]) -> str:
"""Helper function to format loader information."""
output = []
for loader_name, loader_info in info.items():
# Single dataset
if isinstance(loader_info, dataset_info):
loader_info_formatted = "None" if not loader_info.available else f"size={loader_info.length}"
# Iterable of datasets
else:
loader_info_formatted = " ; ".join(
"None" if not loader_info_i.available else f"{i}. size={loader_info_i.length}"
for i, loader_info_i in enumerate(loader_info, start=1)
)
output.append(f"{{{loader_name}: {loader_info_formatted}}}")
return os.linesep.join(output)
# Available dataloader methods
datamodule_loader_methods: list[tuple[str, str]] = [
("Train dataloader", "train_dataloader"),
("Validation dataloader", "val_dataloader"),
("Test dataloader", "test_dataloader"),
("Predict dataloader", "predict_dataloader"),
]
# Retrieve information for each dataloader method
dataloader_info = extract_loader_info(datamodule_loader_methods)
# Format the information
dataloader_str = format_loader_info(dataloader_info)
return dataloader_str
| LightningDataModule |
python | Netflix__metaflow | test/core/metaflow_test/formatter.py | {
"start": 63,
"end": 7886
} | class ____(object):
def __init__(self, graphspec, test):
self.graphspec = graphspec
self.test = test
self.should_resume = getattr(test, "RESUME", False)
self.resume_step = getattr(test, "RESUME_STEP", None)
self.should_fail = getattr(test, "SHOULD_FAIL", False)
self.flow_name = "%sFlow" % self.test.__class__.__name__
self.used = set()
self._code_cache = {}
self.steps = self._index_steps(test)
self.copy_files = getattr(test, "REQUIRED_FILES", [])
self.skip_graphs = getattr(test, "SKIP_GRAPHS", [])
self.only_graphs = getattr(test, "ONLY_GRAPHS", [])
self.valid = True
graph_name = self.graphspec.get("name", "")
if graph_name in self.skip_graphs:
self.valid = False
elif self.only_graphs and graph_name not in self.only_graphs:
self.valid = False
if self.valid:
self.flow_code = self._pretty_print(self._flow_lines())
self.check_code = self._pretty_print(self._check_lines())
for step in self.steps:
if step.required and step not in self.used:
self.valid = False
def _format_method(self, step):
def lines():
lines, lineno = inspect.getsourcelines(step)
lines_iter = iter(lines)
is_next_line = False
for line in lines_iter:
head = line.lstrip()
if is_next_line:
first_line = line
break
if head.startswith("def "):
is_next_line = True
indent = len(first_line) - len(first_line.lstrip())
yield first_line[indent:].rstrip()
for line in lines_iter:
yield line[indent:].rstrip()
code = self._code_cache.get(step)
if code is None:
code = self._code_cache[step] = list(lines())
return code
def _index_steps(self, test):
steps = []
for attr in dir(test):
obj = getattr(test, attr)
if isinstance(obj, StepMutator):
steps.append(obj._my_step)
if hasattr(obj, "is_step"):
steps.append(obj)
return list(sorted(steps, key=lambda x: x.prio))
def _node_quals(self, name, node):
quals = {"all"}
quals.update(node.get("quals", []))
if name in ("start", "end"):
quals.add(name)
if "join" in node:
quals.add("join")
if "parallel_step" in node:
quals.add("parallel-step")
if "linear" in node:
quals.add("linear")
if "switch" in node:
quals.add("switch-step")
for qual in node.get("quals", []):
quals.add(qual)
return quals
def _choose_step(self, name, node):
node_quals = self._node_quals(name, node)
for step in self.steps:
if step.quals & node_quals:
return step
raise Exception(
"Test %s doesn't have a match for step %s in graph %s"
% (self.test, name, self.graphspec["name"])
)
def _flow_lines(self):
tags = []
for step in self.steps:
tags.extend(tag.split("(")[0] for tag in step.tags)
yield 0, "# -*- coding: utf-8 -*-"
yield 0, (
"from metaflow import Config, config_expr, FlowSpec, step, Parameter, "
"project, IncludeFile, JSONType, current, parallel, FlowMutator, "
"StepMutator, UserStepDecorator, user_step_decorator"
)
yield 0, (
"from metaflow_test import assert_equals, assert_equals_metadata, "
"assert_exception, ExpectationFailed, is_resumed, ResumeFromHere, "
"TestRetry, try_to_get_card"
)
if tags:
yield 0, "from metaflow import %s" % ",".join(tags)
yield 0, self.test.HEADER
yield 0, "class %s(FlowSpec):" % self.flow_name
for var, val in self.test.CLASS_VARS.items():
yield 1, "%s = %s" % (var, val)
for var, parameter in self.test.PARAMETERS.items():
kwargs = ["%s=%s" % (k, v) for k, v in parameter.items()]
yield 1, '%s = Parameter("%s", %s)' % (var, var, ",".join(kwargs))
for var, include in self.test.INCLUDE_FILES.items():
kwargs = ["%s=%s" % (k, v) for k, v in include.items()]
yield 1, '%s = IncludeFile("%s", %s)' % (var, var, ",".join(kwargs))
for var, include in self.test.CONFIGS.items():
kwargs = ["%s=%s" % (k, v) for k, v in include.items()]
yield 1, '%s = Config("%s", %s)' % (var, var, ",".join(kwargs))
for name, node in self.graphspec["graph"].items():
step = self._choose_step(name, node)
self.used.add(step)
for tagspec in step.tags:
yield 1, "@%s" % tagspec
if "parallel_step" in node:
yield 1, "@parallel"
yield 1, "@step"
if "join" in node:
yield 1, "def %s(self, inputs):" % name
else:
yield 1, "def %s(self):" % name
if "foreach" in node:
yield 2, "self.%s = %s" % (
node["foreach_var"],
node["foreach_var_default"],
)
for line in self._format_method(step):
yield 2, line
if "linear" in node:
yield 2, "self.next(self.%s)" % node["linear"]
elif "branch" in node:
branches = ",".join("self.%s" % x for x in node["branch"])
yield 2, "self.next(%s)" % branches
elif "switch" in node:
# Handle switch nodes - generate the switch dictionary and condition
switch_dict = node["switch"]
condition = node["condition"]
switch_branches = (
"{"
+ ", ".join(
'"%s": self.%s' % (key, branch)
for key, branch in switch_dict.items()
)
+ "}"
)
yield 2, "self.next(%s, condition='%s')" % (switch_branches, condition)
elif "foreach" in node:
yield 2, 'self.next(self.%s, foreach="%s")' % (
node["foreach"],
node["foreach_var"],
)
elif "num_parallel" in node:
yield 2, "self.next(self.%s, num_parallel=%d)" % (
node["parallel"],
node["num_parallel"],
)
yield 0, "if __name__ == '__main__':"
yield 1, "%s()" % self.flow_name
def _check_lines(self):
yield 0, "# -*- coding: utf-8 -*-"
yield 0, "import sys"
yield 0, "from metaflow_test import assert_equals, assert_equals_metadata, assert_exception, new_checker"
yield 0, "def check_results(flow, checker):"
for line in self._format_method(self.test.check_results):
yield 1, line
yield 0, "if __name__ == '__main__':"
yield 1, "from test_flow import %s" % self.flow_name
yield 1, "flow = %s(use_cli=False)" % self.flow_name
yield 1, "check = new_checker(flow)"
yield 1, "check_results(flow, check)"
def _pretty_print(self, lines):
def _lines():
for indent, line in lines:
yield "".join((" " * (indent * INDENT), line))
return "\n".join(_lines())
def __str__(self):
return "test '%s' graph '%s'" % (
self.test.__class__.__name__,
self.graphspec["name"],
)
| FlowFormatter |
python | pydata__xarray | xarray/tests/test_indexes.py | {
"start": 3279,
"end": 11845
} | class ____:
def test_constructor(self) -> None:
pd_idx = pd.Index([1, 2, 3])
index = PandasIndex(pd_idx, "x")
assert index.index.equals(pd_idx)
# makes a shallow copy
assert index.index is not pd_idx
assert index.dim == "x"
# test no name set for pd.Index
pd_idx.name = None
index = PandasIndex(pd_idx, "x")
assert index.index.name == "x"
def test_from_variables(self) -> None:
# pandas has only Float64Index but variable dtype should be preserved
data = np.array([1.1, 2.2, 3.3], dtype=np.float32)
var = xr.Variable(
"x", data, attrs={"unit": "m"}, encoding={"dtype": np.float64}
)
index = PandasIndex.from_variables({"x": var}, options={})
assert index.dim == "x"
assert index.index.equals(pd.Index(data))
assert index.coord_dtype == data.dtype
var2 = xr.Variable(("x", "y"), [[1, 2, 3], [4, 5, 6]])
with pytest.raises(ValueError, match=r".*only accepts one variable.*"):
PandasIndex.from_variables({"x": var, "foo": var2}, options={})
with pytest.raises(
ValueError, match=r".*cannot set a PandasIndex.*scalar variable.*"
):
PandasIndex.from_variables({"foo": xr.Variable((), 1)}, options={})
with pytest.raises(
ValueError, match=r".*only accepts a 1-dimensional variable.*"
):
PandasIndex.from_variables({"foo": var2}, options={})
def test_from_variables_index_adapter(self) -> None:
# test index type is preserved when variable wraps a pd.Index
data = pd.Series(["foo", "bar"], dtype="category")
pd_idx = pd.Index(data)
var = xr.Variable("x", pd_idx)
index = PandasIndex.from_variables({"x": var}, options={})
assert isinstance(index.index, pd.CategoricalIndex)
def test_concat_periods(self):
periods = pd.period_range("2000-01-01", periods=10)
indexes = [PandasIndex(periods[:5], "t"), PandasIndex(periods[5:], "t")]
expected = PandasIndex(periods, "t")
actual = PandasIndex.concat(indexes, dim="t")
assert actual.equals(expected)
assert isinstance(actual.index, pd.PeriodIndex)
positions = [list(range(5)), list(range(5, 10))]
actual = PandasIndex.concat(indexes, dim="t", positions=positions)
assert actual.equals(expected)
assert isinstance(actual.index, pd.PeriodIndex)
@pytest.mark.parametrize("dtype", [str, bytes])
def test_concat_str_dtype(self, dtype) -> None:
a = PandasIndex(np.array(["a"], dtype=dtype), "x", coord_dtype=dtype)
b = PandasIndex(np.array(["b"], dtype=dtype), "x", coord_dtype=dtype)
expected = PandasIndex(
np.array(["a", "b"], dtype=dtype), "x", coord_dtype=dtype
)
actual = PandasIndex.concat([a, b], "x")
assert actual.equals(expected)
assert np.issubdtype(actual.coord_dtype, dtype)
def test_concat_empty(self) -> None:
idx = PandasIndex.concat([], "x")
assert idx.coord_dtype is np.dtype("O")
def test_concat_dim_error(self) -> None:
indexes = [PandasIndex([0, 1], "x"), PandasIndex([2, 3], "y")]
with pytest.raises(ValueError, match=r"Cannot concatenate.*dimensions.*"):
PandasIndex.concat(indexes, "x")
def test_create_variables(self) -> None:
# pandas has only Float64Index but variable dtype should be preserved
data = np.array([1.1, 2.2, 3.3], dtype=np.float32)
pd_idx = pd.Index(data, name="foo")
index = PandasIndex(pd_idx, "x", coord_dtype=data.dtype)
index_vars = {
"foo": IndexVariable(
"x", data, attrs={"unit": "m"}, encoding={"fill_value": 0.0}
)
}
actual = index.create_variables(index_vars)
assert_identical(actual["foo"], index_vars["foo"])
assert actual["foo"].dtype == index_vars["foo"].dtype
assert actual["foo"].dtype == index.coord_dtype
def test_to_pandas_index(self) -> None:
pd_idx = pd.Index([1, 2, 3], name="foo")
index = PandasIndex(pd_idx, "x")
assert index.to_pandas_index() is index.index
def test_sel(self) -> None:
# TODO: add tests that aren't just for edge cases
index = PandasIndex(pd.Index([1, 2, 3]), "x")
with pytest.raises(KeyError, match=r"not all values found"):
index.sel({"x": [0]})
with pytest.raises(KeyError):
index.sel({"x": 0})
with pytest.raises(ValueError, match=r"does not have a MultiIndex"):
index.sel({"x": {"one": 0}})
def test_sel_boolean(self) -> None:
# index should be ignored and indexer dtype should not be coerced
# see https://github.com/pydata/xarray/issues/5727
index = PandasIndex(pd.Index([0.0, 2.0, 1.0, 3.0]), "x")
actual = index.sel({"x": [False, True, False, True]})
expected_dim_indexers = {"x": [False, True, False, True]}
np.testing.assert_array_equal(
actual.dim_indexers["x"], expected_dim_indexers["x"]
)
def test_sel_datetime(self) -> None:
index = PandasIndex(
pd.to_datetime(["2000-01-01", "2001-01-01", "2002-01-01"]), "x"
)
actual = index.sel({"x": "2001-01-01"})
expected_dim_indexers = {"x": 1}
assert actual.dim_indexers == expected_dim_indexers
actual = index.sel({"x": index.to_pandas_index().to_numpy()[1]})
assert actual.dim_indexers == expected_dim_indexers
def test_sel_unsorted_datetime_index_raises(self) -> None:
index = PandasIndex(pd.to_datetime(["2001", "2000", "2002"]), "x")
with pytest.raises(KeyError):
# pandas will try to convert this into an array indexer. We should
# raise instead, so we can be sure the result of indexing with a
# slice is always a view.
index.sel({"x": slice("2001", "2002")})
def test_equals(self) -> None:
index1 = PandasIndex([1, 2, 3], "x")
index2 = PandasIndex([1, 2, 3], "x")
assert index1.equals(index2) is True
def test_join(self) -> None:
index1 = PandasIndex(["a", "aa", "aaa"], "x", coord_dtype="<U3")
index2 = PandasIndex(["aa", "aaa", "aaaa"], "x", coord_dtype="<U4")
expected = PandasIndex(["aa", "aaa"], "x")
actual = index1.join(index2)
print(actual.index)
assert actual.equals(expected)
assert actual.coord_dtype == "=U4"
expected = PandasIndex(["a", "aa", "aaa", "aaaa"], "x")
actual = index1.join(index2, how="outer")
print(actual.index)
assert actual.equals(expected)
assert actual.coord_dtype == "=U4"
def test_reindex_like(self) -> None:
index1 = PandasIndex([0, 1, 2], "x")
index2 = PandasIndex([1, 2, 3, 4], "x")
expected = {"x": [1, 2, -1, -1]}
actual = index1.reindex_like(index2)
assert actual.keys() == expected.keys()
np.testing.assert_array_equal(actual["x"], expected["x"])
index3 = PandasIndex([1, 1, 2], "x")
with pytest.raises(ValueError, match=r".*index has duplicate values"):
index3.reindex_like(index2)
def test_rename(self) -> None:
index = PandasIndex(pd.Index([1, 2, 3], name="a"), "x", coord_dtype=np.int32)
# shortcut
new_index = index.rename({}, {})
assert new_index is index
new_index = index.rename({"a": "b"}, {})
assert new_index.index.name == "b"
assert new_index.dim == "x"
assert new_index.coord_dtype == np.int32
new_index = index.rename({}, {"x": "y"})
assert new_index.index.name == "a"
assert new_index.dim == "y"
assert new_index.coord_dtype == np.int32
def test_copy(self) -> None:
expected = PandasIndex([1, 2, 3], "x", coord_dtype=np.int32)
actual = expected.copy()
assert actual.index.equals(expected.index)
assert actual.index is not expected.index
assert actual.dim == expected.dim
assert actual.coord_dtype == expected.coord_dtype
def test_getitem(self) -> None:
pd_idx = pd.Index([1, 2, 3])
expected = PandasIndex(pd_idx, "x", coord_dtype=np.int32)
actual = expected[1:]
assert actual.index.equals(pd_idx[1:])
assert actual.dim == expected.dim
assert actual.coord_dtype == expected.coord_dtype
| TestPandasIndex |
python | walkccc__LeetCode | solutions/2643. Row With Maximum Ones/2643.py | {
"start": 0,
"end": 241
} | class ____:
def rowAndMaximumOnes(self, mat: list[list[int]]) -> list[int]:
ans = [0, 0]
for i, row in enumerate(mat):
ones = row.count(1)
if ones > ans[1]:
ans[0] = i
ans[1] = ones
return ans
| Solution |
python | python-poetry__poetry | src/poetry/utils/isolated_build.py | {
"start": 2577,
"end": 3104
} | class ____(IsolatedBuildBaseError):
def __init__(self, requirements: Collection[str], output: str, error: str) -> None:
message = "\n\n".join(
(
f"Failed to install {', '.join(requirements)}.",
f"Output:\n{output}",
f"Error:\n{error}",
)
)
super().__init__(message)
self._requirements = requirements
@property
def requirements(self) -> Collection[str]:
return self._requirements
| IsolatedBuildInstallError |
python | davidhalter__jedi | jedi/inference/names.py | {
"start": 8432,
"end": 9454
} | class ____:
def infer(self):
return ValueSet([self._value])
def py__doc__(self):
doc = self._value.py__doc__()
if not doc and self._value.is_stub():
from jedi.inference.gradual.conversion import convert_names
names = convert_names([self], prefer_stub_to_compiled=False)
if self not in names:
return _merge_name_docs(names)
return doc
def _get_qualified_names(self):
return self._value.get_qualified_names()
def get_root_context(self):
if self.parent_context is None: # A module
return self._value.as_context()
return super().get_root_context()
def get_defining_qualified_value(self):
context = self.parent_context
if context is not None and (context.is_module() or context.is_class()):
return self.parent_context.get_value() # Might be None
return None
@property
def api_type(self):
return self._value.api_type
| ValueNameMixin |
python | pytorch__pytorch | torch/__init__.py | {
"start": 73007,
"end": 73233
} | class ____(_LegacyStorage):
@classproperty
def dtype(self):
_warn_typed_storage_removal(stacklevel=3)
return self._dtype
@classproperty
def _dtype(self):
return torch.quint8
| QUInt8Storage |
python | huggingface__transformers | src/transformers/models/nougat/image_processing_nougat.py | {
"start": 2090,
"end": 24618
} | class ____(BaseImageProcessor):
r"""
Constructs a Nougat image processor.
Args:
do_crop_margin (`bool`, *optional*, defaults to `True`):
Whether to crop the image margins.
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
`do_resize` in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"height": 896, "width": 672}`):
Size of the image after resizing. Can be overridden by `size` in the `preprocess` method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
do_thumbnail (`bool`, *optional*, defaults to `True`):
Whether to resize the image using thumbnail method.
do_align_long_axis (`bool`, *optional*, defaults to `False`):
Whether to align the long axis of the image with the long axis of `size` by rotating by 90 degrees.
do_pad (`bool`, *optional*, defaults to `True`):
Whether to pad the images to the largest image size in the batch.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
`preprocess` method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`):
Image standard deviation.
"""
model_input_names = ["pixel_values"]
valid_kwargs = NougatImageProcessorKwargs
def __init__(
self,
do_crop_margin: bool = True,
do_resize: bool = True,
size: Optional[dict[str, int]] = None,
resample: PILImageResampling = PILImageResampling.BILINEAR,
do_thumbnail: bool = True,
do_align_long_axis: bool = False,
do_pad: bool = True,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_normalize: bool = True,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
size = size if size is not None else {"height": 896, "width": 672}
size = get_size_dict(size)
self.do_crop_margin = do_crop_margin
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_thumbnail = do_thumbnail
self.do_align_long_axis = do_align_long_axis
self.do_pad = do_pad
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def python_find_non_zero(self, image: np.ndarray):
"""This is a reimplementation of a findNonZero function equivalent to cv2."""
non_zero_indices = np.column_stack(np.nonzero(image))
idxvec = non_zero_indices[:, [1, 0]]
idxvec = idxvec.reshape(-1, 1, 2)
return idxvec
def python_bounding_rect(self, coordinates):
"""This is a reimplementation of a BoundingRect function equivalent to cv2."""
min_values = np.min(coordinates, axis=(0, 1)).astype(int)
max_values = np.max(coordinates, axis=(0, 1)).astype(int)
x_min, y_min = min_values[0], min_values[1]
width = max_values[0] - x_min + 1
height = max_values[1] - y_min + 1
return x_min, y_min, width, height
def crop_margin(
self,
image: np.ndarray,
gray_threshold: int = 200,
data_format: Optional[ChannelDimension] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> np.ndarray:
"""
Crops the margin of the image. Gray pixels are considered margin (i.e., pixels with a value below the
threshold).
Args:
image (`np.ndarray`):
The image to be cropped.
gray_threshold (`int`, *optional*, defaults to `200`)
Value below which pixels are considered to be gray.
data_format (`ChannelDimension`, *optional*):
The channel dimension format of the output image. If unset, will use the inferred format from the
input.
input_data_format (`ChannelDimension`, *optional*):
The channel dimension format of the input image. If unset, will use the inferred format from the input.
"""
if input_data_format is None:
input_data_format = infer_channel_dimension_format(image)
image = to_pil_image(image, input_data_format=input_data_format)
data = np.array(image.convert("L")).astype(np.uint8)
max_val = data.max()
min_val = data.min()
if max_val == min_val:
image = np.array(image)
image = to_channel_dimension_format(image, input_data_format, ChannelDimension.LAST)
image = (
to_channel_dimension_format(image, data_format, input_data_format)
if data_format is not None
else image
)
return image
data = (data - min_val) / (max_val - min_val) * 255
gray = data < gray_threshold
coords = self.python_find_non_zero(gray)
x_min, y_min, width, height = self.python_bounding_rect(coords)
image = image.crop((x_min, y_min, x_min + width, y_min + height))
image = np.array(image).astype(np.uint8)
image = to_channel_dimension_format(image, input_data_format, ChannelDimension.LAST)
image = (
to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image
)
return image
# Copied from transformers.models.donut.image_processing_donut.DonutImageProcessor.align_long_axis
def align_long_axis(
self,
image: np.ndarray,
size: dict[str, int],
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> np.ndarray:
"""
Align the long axis of the image to the longest axis of the specified size.
Args:
image (`np.ndarray`):
The image to be aligned.
size (`dict[str, int]`):
The size `{"height": h, "width": w}` to align the long axis to.
data_format (`str` or `ChannelDimension`, *optional*):
The data format of the output image. If unset, the same format as the input image is used.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
Returns:
`np.ndarray`: The aligned image.
"""
input_height, input_width = get_image_size(image, channel_dim=input_data_format)
output_height, output_width = size["height"], size["width"]
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(image)
if input_data_format == ChannelDimension.LAST:
rot_axes = (0, 1)
elif input_data_format == ChannelDimension.FIRST:
rot_axes = (1, 2)
else:
raise ValueError(f"Unsupported data format: {input_data_format}")
if (output_width < output_height and input_width > input_height) or (
output_width > output_height and input_width < input_height
):
image = np.rot90(image, 3, axes=rot_axes)
if data_format is not None:
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
return image
def pad_image(
self,
image: np.ndarray,
size: dict[str, int],
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> np.ndarray:
"""
Pad the image to the specified size at the top, bottom, left and right.
Args:
image (`np.ndarray`):
The image to be padded.
size (`dict[str, int]`):
The size `{"height": h, "width": w}` to pad the image to.
data_format (`str` or `ChannelDimension`, *optional*):
The data format of the output image. If unset, the same format as the input image is used.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
output_height, output_width = size["height"], size["width"]
input_height, input_width = get_image_size(image, channel_dim=input_data_format)
delta_width = output_width - input_width
delta_height = output_height - input_height
pad_top = delta_height // 2
pad_left = delta_width // 2
pad_bottom = delta_height - pad_top
pad_right = delta_width - pad_left
padding = ((pad_top, pad_bottom), (pad_left, pad_right))
return pad(image, padding, data_format=data_format, input_data_format=input_data_format)
# Copied from transformers.models.donut.image_processing_donut.DonutImageProcessor.thumbnail
def thumbnail(
self,
image: np.ndarray,
size: dict[str, int],
resample: PILImageResampling = PILImageResampling.BICUBIC,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize the image to make a thumbnail. The image is resized so that no dimension is larger than any
corresponding dimension of the specified size.
Args:
image (`np.ndarray`):
The image to be resized.
size (`dict[str, int]`):
The size `{"height": h, "width": w}` to resize the image to.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
The resampling filter to use.
data_format (`Optional[Union[str, ChannelDimension]]`, *optional*):
The data format of the output image. If unset, the same format as the input image is used.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
input_height, input_width = get_image_size(image, channel_dim=input_data_format)
output_height, output_width = size["height"], size["width"]
# We always resize to the smallest of either the input or output size.
height = min(input_height, output_height)
width = min(input_width, output_width)
if height == input_height and width == input_width:
return image
if input_height > input_width:
width = int(input_width * height / input_height)
elif input_width > input_height:
height = int(input_height * width / input_width)
return resize(
image,
size=(height, width),
resample=resample,
reducing_gap=2.0,
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
# Copied from transformers.models.donut.image_processing_donut.DonutImageProcessor.resize
def resize(
self,
image: np.ndarray,
size: dict[str, int],
resample: PILImageResampling = PILImageResampling.BICUBIC,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resizes `image` to `(height, width)` specified by `size` using the PIL library.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
size = get_size_dict(size)
shortest_edge = min(size["height"], size["width"])
output_size = get_resize_output_image_size(
image, size=shortest_edge, default_to_square=False, input_data_format=input_data_format
)
resized_image = resize(
image,
size=output_size,
resample=resample,
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
return resized_image
@filter_out_non_signature_kwargs()
def preprocess(
self,
images: ImageInput,
do_crop_margin: Optional[bool] = None,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
do_thumbnail: Optional[bool] = None,
do_align_long_axis: Optional[bool] = None,
do_pad: Optional[bool] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[Union[int, float]] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255.
do_crop_margin (`bool`, *optional*, defaults to `self.do_crop_margin`):
Whether to crop the image margins.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing. Shortest edge of the image is resized to min(size["height"],
size["width"]) with the longest edge resized to keep the input aspect ratio.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
has an effect if `do_resize` is set to `True`.
do_thumbnail (`bool`, *optional*, defaults to `self.do_thumbnail`):
Whether to resize the image using thumbnail method.
do_align_long_axis (`bool`, *optional*, defaults to `self.do_align_long_axis`):
Whether to align the long axis of the image with the long axis of `size` by rotating by 90 degrees.
do_pad (`bool`, *optional*, defaults to `self.do_pad`):
Whether to pad the images to the largest image size in the batch.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image by the specified scale `rescale_factor`.
rescale_factor (`int` or `float`, *optional*, defaults to `self.rescale_factor`):
Scale factor to use if rescaling the image.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use for normalization.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use for normalization.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: defaults to the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_crop_margin = do_crop_margin if do_crop_margin is not None else self.do_crop_margin
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
resample = resample if resample is not None else self.resample
do_thumbnail = do_thumbnail if do_thumbnail is not None else self.do_thumbnail
do_align_long_axis = do_align_long_axis if do_align_long_axis is not None else self.do_align_long_axis
do_pad = do_pad if do_pad is not None else self.do_pad
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_resize=do_resize,
size=size,
resample=resample,
)
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
if do_crop_margin:
images = [self.crop_margin(image, input_data_format=input_data_format) for image in images]
if do_align_long_axis:
images = [self.align_long_axis(image, size=size, input_data_format=input_data_format) for image in images]
if do_resize:
images = [
self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
for image in images
]
if do_thumbnail:
images = [self.thumbnail(image=image, size=size, input_data_format=input_data_format) for image in images]
if do_pad:
images = [self.pad_image(image=image, size=size, input_data_format=input_data_format) for image in images]
if do_rescale:
images = [
self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
for image in images
]
if do_normalize:
images = [
self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
for image in images
]
images = [
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
]
data = {"pixel_values": images}
return BatchFeature(data=data, tensor_type=return_tensors)
__all__ = ["NougatImageProcessor"]
| NougatImageProcessor |
python | pytorch__pytorch | test/distributed/fsdp/test_fsdp_unshard_params.py | {
"start": 27040,
"end": 29426
} | class ____(TestUnshardParamsBase):
@property
def world_size(self) -> int:
return 2
@skip_if_lt_x_gpu(2)
def test_unshard_params_from_forward_raises(self):
class MyModule(nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = nn.Parameter(torch.zeros(5))
def forward(self, fsdp_module):
with fsdp_module.summon_full_params(fsdp_module):
pass
model = FSDP(MyModule()).to(device_type.type)
with self.assertRaisesRegex(
AssertionError, "Cannot manually unshard parameters during forward/backward"
):
model(model)
@skip_if_lt_x_gpu(2)
def test_unshard_params_from_backward_raises(self):
model = FSDP(nn.Linear(2, 1, device=device_type.type))
output = model(torch.ones(2, device=device_type.type))
def invalid_backward_hook(*args, **kwargs):
with FSDP.summon_full_params(model):
pass
self.assertTrue(output.requires_grad)
output.register_hook(invalid_backward_hook)
with self.assertRaisesRegex(
AssertionError, "Cannot manually unshard parameters during forward/backward"
):
output.backward()
@skip_if_lt_x_gpu(2)
def test_rank0_only_with_writeback_raises(self):
nested_wrapped_module = NestedWrappedModule.init(
self.process_group,
FSDPInitMode.RECURSIVE,
DEVICEInitMode.DEVICE_BEFORE,
)
with self.assertRaisesRegex(NotImplementedError, "is not supported"):
with FSDP.summon_full_params(
nested_wrapped_module, rank0_only=True, writeback=True
):
pass
@skip_if_lt_x_gpu(2)
def test_offload_to_cpu_no_shard_raises(self):
nested_wrapped_module = NestedWrappedModule.init(
self.process_group,
FSDPInitMode.RECURSIVE,
DEVICEInitMode.DEVICE_BEFORE,
{"sharding_strategy": ShardingStrategy.NO_SHARD},
)
with self.assertRaisesRegex(NotImplementedError, "is not supported"):
with FSDP.summon_full_params(
nested_wrapped_module, rank0_only=True, writeback=True
):
pass
if __name__ == "__main__":
run_tests()
| TestUnshardParamsErrors |
python | keon__algorithms | tests/test_array.py | {
"start": 12937,
"end": 13144
} | class ____(unittest.TestCase):
def test_top_1(self):
self.assertListEqual(top_1([1, 1, 2, 2, 3]), [1, 2])
self.assertListEqual(top_1([1, 2, 3, 324, 234, 23, 23, 1, 23, 23]), [23])
| TestTop1 |
python | pallets__click | src/click/types.py | {
"start": 19122,
"end": 19264
} | class ____(_NumberParamTypeBase):
name = "integer"
_number_class = int
def __repr__(self) -> str:
return "INT"
| IntParamType |
python | crytic__slither | slither/slithir/operations/operation.py | {
"start": 398,
"end": 781
} | class ____(abc.ABC):
@property
@abc.abstractmethod
def read(self):
"""
Return the list of variables READ
"""
pass # pylint: disable=unnecessary-pass
@property
@abc.abstractmethod
def used(self):
"""
Return the list of variables used
"""
pass # pylint: disable=unnecessary-pass
| AbstractOperation |
python | pytorch__pytorch | test/test_datapipe.py | {
"start": 78322,
"end": 90311
} | class ____(TestCase):
def _serialization_test_helper(self, datapipe, use_dill):
if use_dill:
serialized_dp = dill.dumps(datapipe)
deserialized_dp = dill.loads(serialized_dp)
else:
serialized_dp = pickle.dumps(datapipe)
deserialized_dp = pickle.loads(serialized_dp)
try:
self.assertEqual(list(datapipe), list(deserialized_dp))
except AssertionError as e:
print(f"{datapipe} is failing.")
raise e
def _serialization_test_for_single_dp(self, dp, use_dill=False):
# 1. Testing for serialization before any iteration starts
self._serialization_test_helper(dp, use_dill)
# 2. Testing for serialization after DataPipe is partially read
it = iter(dp)
_ = next(it)
self._serialization_test_helper(dp, use_dill)
# 3. Testing for serialization after DataPipe is fully read
_ = list(dp)
self._serialization_test_helper(dp, use_dill)
def test_serializable(self):
picklable_datapipes: list = [
(dp.map.Batcher, None, (2,), {}),
(dp.map.Concater, None, (dp.map.SequenceWrapper(range(10)),), {}),
(dp.map.Mapper, None, (), {}),
(dp.map.Mapper, None, (_fake_fn,), {}),
(dp.map.Mapper, None, (partial(_fake_add, 1),), {}),
(dp.map.SequenceWrapper, range(10), (), {}),
(dp.map.Shuffler, dp.map.SequenceWrapper([0] * 5), (), {}),
(dp.map.Zipper, None, (dp.map.SequenceWrapper(range(10)),), {}),
]
for dpipe, custom_input, dp_args, dp_kwargs in picklable_datapipes:
if custom_input is None:
custom_input = dp.map.SequenceWrapper(range(10))
datapipe = dpipe(custom_input, *dp_args, **dp_kwargs) # type: ignore[call-arg]
self._serialization_test_for_single_dp(datapipe)
def test_serializable_with_dill(self):
"""Only for DataPipes that take in a function as argument"""
input_dp = dp.map.SequenceWrapper(range(10))
datapipes_with_lambda_fn: list[
tuple[type[MapDataPipe], tuple, dict[str, Any]]
] = [
(dp.map.Mapper, (lambda_fn1,), {}),
]
def _local_fns():
def _fn1(x):
return x
return _fn1
fn1 = _local_fns()
datapipes_with_local_fn: list[
tuple[type[MapDataPipe], tuple, dict[str, Any]]
] = [
(dp.map.Mapper, (fn1,), {}),
]
if HAS_DILL:
for dpipe, dp_args, dp_kwargs in (
datapipes_with_lambda_fn + datapipes_with_local_fn
):
_ = dill.dumps(dpipe(input_dp, *dp_args, **dp_kwargs)) # type: ignore[call-arg]
else:
msgs = (
r"^Lambda function is not supported by pickle",
r"^Local function is not supported by pickle",
)
for dps, msg in zip(
(datapipes_with_lambda_fn, datapipes_with_local_fn), msgs
):
for dpipe, dp_args, dp_kwargs in dps:
with self.assertWarnsRegex(UserWarning, msg):
datapipe = dpipe(input_dp, *dp_args, **dp_kwargs) # type: ignore[call-arg]
with self.assertRaises((pickle.PicklingError, AttributeError)):
pickle.dumps(datapipe)
def test_docstring(self):
"""
Ensure functional form of MapDataPipe has the correct docstring from
the class form.
Regression test for https://github.com/pytorch/data/issues/792.
"""
input_dp = dp.map.SequenceWrapper(range(10))
for dp_funcname in [
"batch",
"concat",
"map",
"shuffle",
"zip",
]:
docstring = pydoc.render_doc(
thing=getattr(input_dp, dp_funcname), forceload=True
)
assert f"(functional name: ``{dp_funcname}``)" in docstring
assert "Args:" in docstring
assert "Example:" in docstring or "Examples:" in docstring
def test_sequence_wrapper_datapipe(self):
seq = list(range(10))
input_dp = dp.map.SequenceWrapper(seq)
# Functional Test: all elements are equal in the same order
self.assertEqual(seq, list(input_dp))
# Functional Test: confirm deepcopy works by default
seq.append(11)
self.assertEqual(list(range(10)), list(input_dp)) # input_dp shouldn't have 11
# Functional Test: non-deepcopy version is working
seq2 = [1, 2, 3]
input_dp_non_deep = dp.map.SequenceWrapper(seq2, deepcopy=False)
seq2.append(4)
self.assertEqual(list(seq2), list(input_dp_non_deep)) # should have 4
# Reset Test: reset the DataPipe
seq = list(range(10))
n_elements_before_reset = 5
res_before_reset, res_after_reset = reset_after_n_next_calls(
input_dp, n_elements_before_reset
)
self.assertEqual(list(range(5)), res_before_reset)
self.assertEqual(seq, res_after_reset)
# __len__ Test: inherits length from sequence
self.assertEqual(len(seq), len(input_dp))
def test_concat_mapdatapipe(self):
input_dp1 = dp.map.SequenceWrapper(range(10))
input_dp2 = dp.map.SequenceWrapper(range(5))
with self.assertRaisesRegex(ValueError, r"Expected at least one DataPipe"):
dp.map.Concater()
with self.assertRaisesRegex(
TypeError, r"Expected all inputs to be `MapDataPipe`"
):
dp.map.Concater(input_dp1, ()) # type: ignore[arg-type]
concat_dp = input_dp1.concat(input_dp2)
self.assertEqual(len(concat_dp), 15)
for index in range(15):
self.assertEqual(
concat_dp[index], (list(range(10)) + list(range(5)))[index]
)
self.assertEqual(list(concat_dp), list(range(10)) + list(range(5)))
def test_zip_mapdatapipe(self):
input_dp1 = dp.map.SequenceWrapper(range(10))
input_dp2 = dp.map.SequenceWrapper(range(5))
input_dp3 = dp.map.SequenceWrapper(range(15))
# Functional Test: requires at least one input DataPipe
with self.assertRaisesRegex(ValueError, r"Expected at least one DataPipe"):
dp.map.Zipper()
# Functional Test: all inputs must be MapDataPipes
with self.assertRaisesRegex(
TypeError, r"Expected all inputs to be `MapDataPipe`"
):
dp.map.Zipper(input_dp1, ()) # type: ignore[arg-type]
# Functional Test: Zip the elements up as a tuples
zip_dp = input_dp1.zip(input_dp2, input_dp3)
self.assertEqual([(i, i, i) for i in range(5)], [zip_dp[i] for i in range(5)])
# Functional Test: Raise IndexError when index equal or exceed the length of the shortest DataPipe
with self.assertRaisesRegex(IndexError, r"out of range"):
input_dp1.zip(input_dp2, input_dp3)[5]
# Functional Test: Ensure `zip` can combine `Batcher` with others
dp1 = dp.map.SequenceWrapper(range(10))
shuffle_dp1 = dp1.batch(2)
dp2 = dp.map.SequenceWrapper(range(10))
shuffle_dp2 = dp2.batch(3)
zip_dp1 = shuffle_dp1.zip(shuffle_dp2)
self.assertEqual(4, len(list(zip_dp1)))
zip_dp2 = shuffle_dp1.zip(dp2)
self.assertEqual(5, len(list(zip_dp2)))
# __len__ Test: returns the length of the shortest DataPipe
zip_dp = input_dp1.zip(input_dp2, input_dp3)
self.assertEqual(5, len(zip_dp))
def test_shuffler_mapdatapipe(self):
input_dp1 = dp.map.SequenceWrapper(range(10))
input_dp2 = dp.map.SequenceWrapper({"a": 1, "b": 2, "c": 3, "d": 4, "e": 5})
# Functional Test: Assumes 0-index when indices is not given
shuffler_dp = input_dp1.shuffle()
self.assertEqual(set(range(10)), set(shuffler_dp))
# Functional Test: Custom indices are working
shuffler_dp = input_dp2.shuffle(indices=["a", "b", "c", "d", "e"])
self.assertEqual(set(range(1, 6)), set(shuffler_dp))
# Functional Test: With global seed
torch.manual_seed(123)
shuffler_dp = input_dp1.shuffle()
res = list(shuffler_dp)
torch.manual_seed(123)
self.assertEqual(list(shuffler_dp), res)
# Functional Test: Set seed
shuffler_dp = input_dp1.shuffle().set_seed(123)
res = list(shuffler_dp)
shuffler_dp.set_seed(123)
self.assertEqual(list(shuffler_dp), res)
# Functional Test: deactivate shuffling via set_shuffle
unshuffled_dp = input_dp1.shuffle().set_shuffle(False)
self.assertEqual(list(unshuffled_dp), list(input_dp1))
# Reset Test:
shuffler_dp = input_dp1.shuffle()
n_elements_before_reset = 5
res_before_reset, res_after_reset = reset_after_n_next_calls(
shuffler_dp, n_elements_before_reset
)
self.assertEqual(5, len(res_before_reset))
for x in res_before_reset:
self.assertTrue(x in set(range(10)))
self.assertEqual(set(range(10)), set(res_after_reset))
# __len__ Test: returns the length of the input DataPipe
shuffler_dp = input_dp1.shuffle()
self.assertEqual(10, len(shuffler_dp))
# Serialization Test
from torch.utils.data.datapipes._hook_iterator import _SnapshotState
shuffler_dp = input_dp1.shuffle()
it = iter(shuffler_dp)
for _ in range(2):
next(it)
shuffler_dp_copy = pickle.loads(pickle.dumps(shuffler_dp))
exp = list(it)
shuffler_dp_copy._snapshot_state = _SnapshotState.Restored
self.assertEqual(exp, list(shuffler_dp_copy))
def test_map_mapdatapipe(self):
arr = range(10)
input_dp = dp.map.SequenceWrapper(arr)
def fn(item, dtype=torch.float, *, sum=False):
data = torch.tensor(item, dtype=dtype)
return data if not sum else data.sum()
map_dp = input_dp.map(fn)
self.assertEqual(len(input_dp), len(map_dp))
for index in arr:
self.assertEqual(
map_dp[index], torch.tensor(input_dp[index], dtype=torch.float)
)
map_dp = input_dp.map(partial(fn, dtype=torch.int, sum=True))
self.assertEqual(len(input_dp), len(map_dp))
for index in arr:
self.assertEqual(
map_dp[index], torch.tensor(input_dp[index], dtype=torch.int).sum()
)
def test_batch_mapdatapipe(self):
arr = list(range(13))
input_dp = dp.map.SequenceWrapper(arr)
# Functional Test: batches top level by default
batch_dp = dp.map.Batcher(input_dp, batch_size=2)
self.assertEqual(
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11], [12]], list(batch_dp)
)
# Functional Test: drop_last on command
batch_dp = dp.map.Batcher(input_dp, batch_size=2, drop_last=True)
self.assertEqual(
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]], list(batch_dp)
)
# Functional Test: nested batching
batch_dp_2 = batch_dp.batch(batch_size=3)
self.assertEqual(
[[[0, 1], [2, 3], [4, 5]], [[6, 7], [8, 9], [10, 11]]], list(batch_dp_2)
)
# Reset Test:
n_elements_before_reset = 3
res_before_reset, res_after_reset = reset_after_n_next_calls(
batch_dp, n_elements_before_reset
)
self.assertEqual([[0, 1], [2, 3], [4, 5]], res_before_reset)
self.assertEqual(
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]], res_after_reset
)
# __len__ Test:
self.assertEqual(6, len(batch_dp))
self.assertEqual(2, len(batch_dp_2))
| TestFunctionalMapDataPipe |
python | imageio__imageio | imageio/plugins/_bsdf.py | {
"start": 3490,
"end": 17263
} | class ____(object):
"""Instances of this class represent a BSDF encoder/decoder.
It acts as a placeholder for a set of extensions and encoding/decoding
options. Use this to predefine extensions and options for high
performance encoding/decoding. For general use, see the functions
`save()`, `encode()`, `load()`, and `decode()`.
This implementation of BSDF supports streaming lists (keep adding
to a list after writing the main file), lazy loading of blobs, and
in-place editing of blobs (for streams opened with a+).
Options for encoding:
* compression (int or str): ``0`` or "no" for no compression (default),
``1`` or "zlib" for Zlib compression (same as zip files and PNG), and
``2`` or "bz2" for Bz2 compression (more compact but slower writing).
Note that some BSDF implementations (e.g. JavaScript) may not support
compression.
* use_checksum (bool): whether to include a checksum with binary blobs.
* float64 (bool): Whether to write floats as 64 bit (default) or 32 bit.
Options for decoding:
* load_streaming (bool): if True, and the final object in the structure was
a stream, will make it available as a stream in the decoded object.
* lazy_blob (bool): if True, bytes are represented as Blob objects that can
be used to lazily access the data, and also overwrite the data if the
file is open in a+ mode.
"""
def __init__(self, extensions=None, **options):
self._extensions = {} # name -> extension
self._extensions_by_cls = {} # cls -> (name, extension.encode)
if extensions is None:
extensions = standard_extensions
for extension in extensions:
self.add_extension(extension)
self._parse_options(**options)
def _parse_options(
self,
compression=0,
use_checksum=False,
float64=True,
load_streaming=False,
lazy_blob=False,
):
# Validate compression
if isinstance(compression, string_types):
m = {"no": 0, "zlib": 1, "bz2": 2}
compression = m.get(compression.lower(), compression)
if compression not in (0, 1, 2):
raise TypeError("Compression must be 0, 1, 2, " '"no", "zlib", or "bz2"')
self._compression = compression
# Other encoding args
self._use_checksum = bool(use_checksum)
self._float64 = bool(float64)
# Decoding args
self._load_streaming = bool(load_streaming)
self._lazy_blob = bool(lazy_blob)
def add_extension(self, extension_class):
"""Add an extension to this serializer instance, which must be
a subclass of Extension. Can be used as a decorator.
"""
# Check class
if not (
isinstance(extension_class, type) and issubclass(extension_class, Extension)
):
raise TypeError("add_extension() expects a Extension class.")
extension = extension_class()
# Get name
name = extension.name
if not isinstance(name, str):
raise TypeError("Extension name must be str.")
if len(name) == 0 or len(name) > 250:
raise NameError(
"Extension names must be nonempty and shorter " "than 251 chars."
)
if name in self._extensions:
logger.warning(
'BSDF warning: overwriting extension "%s", '
"consider removing first" % name
)
# Get classes
cls = extension.cls
if not cls:
clss = []
elif isinstance(cls, (tuple, list)):
clss = cls
else:
clss = [cls]
for cls in clss:
if not isinstance(cls, classtypes):
raise TypeError("Extension classes must be types.")
# Store
for cls in clss:
self._extensions_by_cls[cls] = name, extension.encode
self._extensions[name] = extension
return extension_class
def remove_extension(self, name):
"""Remove a converted by its unique name."""
if not isinstance(name, str):
raise TypeError("Extension name must be str.")
if name in self._extensions:
self._extensions.pop(name)
for cls in list(self._extensions_by_cls.keys()):
if self._extensions_by_cls[cls][0] == name:
self._extensions_by_cls.pop(cls)
def _encode(self, f, value, streams, ext_id):
"""Main encoder function."""
x = encode_type_id
if value is None:
f.write(x(b"v", ext_id)) # V for void
elif value is True:
f.write(x(b"y", ext_id)) # Y for yes
elif value is False:
f.write(x(b"n", ext_id)) # N for no
elif isinstance(value, integer_types):
if -32768 <= value <= 32767:
f.write(x(b"h", ext_id) + spack("h", value)) # H for ...
else:
f.write(x(b"i", ext_id) + spack("<q", value)) # I for int
elif isinstance(value, float):
if self._float64:
f.write(x(b"d", ext_id) + spack("<d", value)) # D for double
else:
f.write(x(b"f", ext_id) + spack("<f", value)) # f for float
elif isinstance(value, unicode_types):
bb = value.encode("UTF-8")
f.write(x(b"s", ext_id) + lencode(len(bb))) # S for str
f.write(bb)
elif isinstance(value, (list, tuple)):
f.write(x(b"l", ext_id) + lencode(len(value))) # L for list
for v in value:
self._encode(f, v, streams, None)
elif isinstance(value, dict):
f.write(x(b"m", ext_id) + lencode(len(value))) # M for mapping
for key, v in value.items():
if PY3:
assert key.isidentifier() # faster
else: # pragma: no cover
assert _isidentifier(key)
# yield ' ' * indent + key
name_b = key.encode("UTF-8")
f.write(lencode(len(name_b)))
f.write(name_b)
self._encode(f, v, streams, None)
elif isinstance(value, bytes):
f.write(x(b"b", ext_id)) # B for blob
blob = Blob(
value, compression=self._compression, use_checksum=self._use_checksum
)
blob._to_file(f) # noqa
elif isinstance(value, Blob):
f.write(x(b"b", ext_id)) # B for blob
value._to_file(f) # noqa
elif isinstance(value, BaseStream):
# Initialize the stream
if value.mode != "w":
raise ValueError("Cannot serialize a read-mode stream.")
elif isinstance(value, ListStream):
f.write(x(b"l", ext_id) + spack("<BQ", 255, 0)) # L for list
else:
raise TypeError("Only ListStream is supported")
# Mark this as *the* stream, and activate the stream.
# The save() function verifies this is the last written object.
if len(streams) > 0:
raise ValueError("Can only have one stream per file.")
streams.append(value)
value._activate(f, self._encode, self._decode) # noqa
else:
if ext_id is not None:
raise ValueError(
"Extension %s wronfully encodes object to another "
"extension object (though it may encode to a list/dict "
"that contains other extension objects)." % ext_id
)
# Try if the value is of a type we know
ex = self._extensions_by_cls.get(value.__class__, None)
# Maybe its a subclass of a type we know
if ex is None:
for name, c in self._extensions.items():
if c.match(self, value):
ex = name, c.encode
break
else:
ex = None
# Success or fail
if ex is not None:
ext_id2, extension_encode = ex
self._encode(f, extension_encode(self, value), streams, ext_id2)
else:
t = (
"Class %r is not a valid base BSDF type, nor is it "
"handled by an extension."
)
raise TypeError(t % value.__class__.__name__)
def _decode(self, f):
"""Main decoder function."""
# Get value
char = f.read(1)
c = char.lower()
# Conversion (uppercase value identifiers signify converted values)
if not char:
raise EOFError()
elif char != c:
n = strunpack("<B", f.read(1))[0]
# if n == 253: n = strunpack('<Q', f.read(8))[0] # noqa - noneed
ext_id = f.read(n).decode("UTF-8")
else:
ext_id = None
if c == b"v":
value = None
elif c == b"y":
value = True
elif c == b"n":
value = False
elif c == b"h":
value = strunpack("<h", f.read(2))[0]
elif c == b"i":
value = strunpack("<q", f.read(8))[0]
elif c == b"f":
value = strunpack("<f", f.read(4))[0]
elif c == b"d":
value = strunpack("<d", f.read(8))[0]
elif c == b"s":
n_s = strunpack("<B", f.read(1))[0]
if n_s == 253:
n_s = strunpack("<Q", f.read(8))[0] # noqa
value = f.read(n_s).decode("UTF-8")
elif c == b"l":
n = strunpack("<B", f.read(1))[0]
if n >= 254:
# Streaming
closed = n == 254
n = strunpack("<Q", f.read(8))[0]
if self._load_streaming:
value = ListStream(n if closed else "r")
value._activate(f, self._encode, self._decode) # noqa
elif closed:
value = [self._decode(f) for i in range(n)]
else:
value = []
try:
while True:
value.append(self._decode(f))
except EOFError:
pass
else:
# Normal
if n == 253:
n = strunpack("<Q", f.read(8))[0] # noqa
value = [self._decode(f) for i in range(n)]
elif c == b"m":
value = dict()
n = strunpack("<B", f.read(1))[0]
if n == 253:
n = strunpack("<Q", f.read(8))[0] # noqa
for i in range(n):
n_name = strunpack("<B", f.read(1))[0]
if n_name == 253:
n_name = strunpack("<Q", f.read(8))[0] # noqa
assert n_name > 0
name = f.read(n_name).decode("UTF-8")
value[name] = self._decode(f)
elif c == b"b":
if self._lazy_blob:
value = Blob((f, True))
else:
blob = Blob((f, False))
value = blob.get_bytes()
else:
raise RuntimeError("Parse error %r" % char)
# Convert value if we have an extension for it
if ext_id is not None:
extension = self._extensions.get(ext_id, None)
if extension is not None:
value = extension.decode(self, value)
else:
logger.warning("BSDF warning: no extension found for %r" % ext_id)
return value
def encode(self, ob):
"""Save the given object to bytes."""
f = BytesIO()
self.save(f, ob)
return f.getvalue()
def save(self, f, ob):
"""Write the given object to the given file object."""
f.write(b"BSDF")
f.write(struct.pack("<B", VERSION[0]))
f.write(struct.pack("<B", VERSION[1]))
# Prepare streaming, this list will have 0 or 1 item at the end
streams = []
self._encode(f, ob, streams, None)
# Verify that stream object was at the end, and add initial elements
if len(streams) > 0:
stream = streams[0]
if stream._start_pos != f.tell():
raise ValueError(
"The stream object must be " "the last object to be encoded."
)
def decode(self, bb):
"""Load the data structure that is BSDF-encoded in the given bytes."""
f = BytesIO(bb)
return self.load(f)
def load(self, f):
"""Load a BSDF-encoded object from the given file object."""
# Check magic string
f4 = f.read(4)
if f4 != b"BSDF":
raise RuntimeError("This does not look like a BSDF file: %r" % f4)
# Check version
major_version = strunpack("<B", f.read(1))[0]
minor_version = strunpack("<B", f.read(1))[0]
file_version = "%i.%i" % (major_version, minor_version)
if major_version != VERSION[0]: # major version should be 2
t = (
"Reading file with different major version (%s) "
"from the implementation (%s)."
)
raise RuntimeError(t % (__version__, file_version))
if minor_version > VERSION[1]: # minor should be < ours
t = (
"BSDF warning: reading file with higher minor version (%s) "
"than the implementation (%s)."
)
logger.warning(t % (__version__, file_version))
return self._decode(f)
# %% Streaming and blob-files
| BsdfSerializer |
python | simonw__datasette | datasette/views/special.py | {
"start": 6415,
"end": 12329
} | class ____(BaseView):
name = "allowed"
has_json_alternate = False
async def get(self, request):
await self.ds.refresh_schemas()
# Check if user has permissions-debug (to show sensitive fields)
has_debug_permission = await self.ds.allowed(
action="permissions-debug", actor=request.actor
)
# Check if this is a request for JSON (has .json extension)
as_format = request.url_vars.get("format")
if not as_format:
# Render the HTML form (even if query parameters are present)
# Put most common/interesting actions first
priority_actions = [
"view-instance",
"view-database",
"view-table",
"view-query",
"execute-sql",
"insert-row",
"update-row",
"delete-row",
]
actions = list(self.ds.actions.keys())
# Priority actions first (in order), then remaining alphabetically
sorted_actions = [a for a in priority_actions if a in actions]
sorted_actions.extend(
sorted(a for a in actions if a not in priority_actions)
)
return await self.render(
["debug_allowed.html"],
request,
{
"supported_actions": sorted_actions,
"has_debug_permission": has_debug_permission,
},
)
payload, status = await self._allowed_payload(request, has_debug_permission)
headers = {}
if self.ds.cors:
add_cors_headers(headers)
return Response.json(payload, status=status, headers=headers)
async def _allowed_payload(self, request, has_debug_permission):
action = request.args.get("action")
if not action:
return {"error": "action parameter is required"}, 400
if action not in self.ds.actions:
return {"error": f"Unknown action: {action}"}, 404
actor = request.actor if isinstance(request.actor, dict) else None
actor_id = actor.get("id") if actor else None
parent_filter = request.args.get("parent")
child_filter = request.args.get("child")
if child_filter and not parent_filter:
return {"error": "parent must be provided when child is specified"}, 400
try:
page = int(request.args.get("page", "1"))
page_size = int(request.args.get("page_size", "50"))
except ValueError:
return {"error": "page and page_size must be integers"}, 400
if page < 1:
return {"error": "page must be >= 1"}, 400
if page_size < 1:
return {"error": "page_size must be >= 1"}, 400
max_page_size = 200
if page_size > max_page_size:
page_size = max_page_size
offset = (page - 1) * page_size
# Use the simplified allowed_resources method
# Collect all resources with optional reasons for debugging
try:
allowed_rows = []
result = await self.ds.allowed_resources(
action=action,
actor=actor,
parent=parent_filter,
include_reasons=has_debug_permission,
)
async for resource in result.all():
parent_val = resource.parent
child_val = resource.child
# Build resource path
if parent_val is None:
resource_path = "/"
elif child_val is None:
resource_path = f"/{parent_val}"
else:
resource_path = f"/{parent_val}/{child_val}"
row = {
"parent": parent_val,
"child": child_val,
"resource": resource_path,
}
# Add reason if we have it (from include_reasons=True)
if has_debug_permission and hasattr(resource, "reasons"):
row["reason"] = resource.reasons
allowed_rows.append(row)
except Exception:
# If catalog tables don't exist yet, return empty results
return (
{
"action": action,
"actor_id": actor_id,
"page": page,
"page_size": page_size,
"total": 0,
"items": [],
},
200,
)
# Apply child filter if specified
if child_filter is not None:
allowed_rows = [row for row in allowed_rows if row["child"] == child_filter]
# Pagination
total = len(allowed_rows)
paged_rows = allowed_rows[offset : offset + page_size]
# Items are already in the right format
items = paged_rows
def build_page_url(page_number):
pairs = []
for key in request.args:
if key in {"page", "page_size"}:
continue
for value in request.args.getlist(key):
pairs.append((key, value))
pairs.append(("page", str(page_number)))
pairs.append(("page_size", str(page_size)))
query = urllib.parse.urlencode(pairs)
return f"{request.path}?{query}"
response = {
"action": action,
"actor_id": actor_id,
"page": page,
"page_size": page_size,
"total": total,
"items": items,
}
if total > offset + page_size:
response["next_url"] = build_page_url(page + 1)
if page > 1:
response["previous_url"] = build_page_url(page - 1)
return response, 200
| AllowedResourcesView |
python | Netflix__metaflow | test/unit/inheritance/flows/mutator_with_derived_config_base.py | {
"start": 2164,
"end": 2453
} | class ____(FlowSpec):
"""
Base class with mutator that will use config from derived class.
The mutator looks for 'runtime_config' which will be defined in BaseC (derived class).
"""
base_param = Parameter("base_param", help="Base parameter", default="base_value")
| BaseA |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_gcs.py | {
"start": 13158,
"end": 14236
} | class ____:
def test_execute(self):
interp_dt = datetime(2015, 2, 1, 15, 16, 17, 345, tzinfo=timezone.utc)
assert GCSTimeSpanFileTransformOperator.interpolate_prefix(None, interp_dt) is None
assert (
GCSTimeSpanFileTransformOperator.interpolate_prefix("prefix_without_date", interp_dt)
== "prefix_without_date"
)
assert (
GCSTimeSpanFileTransformOperator.interpolate_prefix("prefix_with_year_%Y", interp_dt)
== "prefix_with_year_2015"
)
assert (
GCSTimeSpanFileTransformOperator.interpolate_prefix(
"prefix_with_year_month_day/%Y/%m/%d/", interp_dt
)
== "prefix_with_year_month_day/2015/02/01/"
)
assert (
GCSTimeSpanFileTransformOperator.interpolate_prefix(
"prefix_with_year_month_day_and_percent_%%/%Y/%m/%d/", interp_dt
)
== "prefix_with_year_month_day_and_percent_%/2015/02/01/"
)
| TestGCSTimeSpanFileTransformOperatorDateInterpolation |
python | kubernetes-client__python | kubernetes/client/models/v1beta1_opaque_device_configuration.py | {
"start": 383,
"end": 5952
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'driver': 'str',
'parameters': 'object'
}
attribute_map = {
'driver': 'driver',
'parameters': 'parameters'
}
def __init__(self, driver=None, parameters=None, local_vars_configuration=None): # noqa: E501
"""V1beta1OpaqueDeviceConfiguration - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._driver = None
self._parameters = None
self.discriminator = None
self.driver = driver
self.parameters = parameters
@property
def driver(self):
"""Gets the driver of this V1beta1OpaqueDeviceConfiguration. # noqa: E501
Driver is used to determine which kubelet plugin needs to be passed these configuration parameters. An admission policy provided by the driver developer could use this to decide whether it needs to validate them. Must be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver. # noqa: E501
:return: The driver of this V1beta1OpaqueDeviceConfiguration. # noqa: E501
:rtype: str
"""
return self._driver
@driver.setter
def driver(self, driver):
"""Sets the driver of this V1beta1OpaqueDeviceConfiguration.
Driver is used to determine which kubelet plugin needs to be passed these configuration parameters. An admission policy provided by the driver developer could use this to decide whether it needs to validate them. Must be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver. # noqa: E501
:param driver: The driver of this V1beta1OpaqueDeviceConfiguration. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and driver is None: # noqa: E501
raise ValueError("Invalid value for `driver`, must not be `None`") # noqa: E501
self._driver = driver
@property
def parameters(self):
"""Gets the parameters of this V1beta1OpaqueDeviceConfiguration. # noqa: E501
Parameters can contain arbitrary data. It is the responsibility of the driver developer to handle validation and versioning. Typically this includes self-identification and a version (\"kind\" + \"apiVersion\" for Kubernetes types), with conversion between different versions. The length of the raw data must be smaller or equal to 10 Ki. # noqa: E501
:return: The parameters of this V1beta1OpaqueDeviceConfiguration. # noqa: E501
:rtype: object
"""
return self._parameters
@parameters.setter
def parameters(self, parameters):
"""Sets the parameters of this V1beta1OpaqueDeviceConfiguration.
Parameters can contain arbitrary data. It is the responsibility of the driver developer to handle validation and versioning. Typically this includes self-identification and a version (\"kind\" + \"apiVersion\" for Kubernetes types), with conversion between different versions. The length of the raw data must be smaller or equal to 10 Ki. # noqa: E501
:param parameters: The parameters of this V1beta1OpaqueDeviceConfiguration. # noqa: E501
:type: object
"""
if self.local_vars_configuration.client_side_validation and parameters is None: # noqa: E501
raise ValueError("Invalid value for `parameters`, must not be `None`") # noqa: E501
self._parameters = parameters
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1OpaqueDeviceConfiguration):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1OpaqueDeviceConfiguration):
return True
return self.to_dict() != other.to_dict()
| V1beta1OpaqueDeviceConfiguration |
python | google__pytype | pytype/errors/errors.py | {
"start": 14480,
"end": 19577
} | class ____:
"""A stream of errors."""
def __init__(self, src: str):
self._errors = []
# An error filter (initially None)
self._filter = None
self._src = src
def __len__(self):
return len(self._errors)
def __iter__(self):
return iter(self._errors)
def __getitem__(self, index):
return self._errors[index]
def copy_from(self, errors, stack):
for e in errors:
with _CURRENT_ERROR_NAME.bind(e.name):
self.error(
stack,
e._message,
e.details,
e.keyword,
e.bad_call, # pylint: disable=protected-access
e.keyword_context,
)
def is_valid_error_name(self, name):
"""Return True iff name was defined in an @error_name() decorator."""
return name in _ERROR_NAMES
def set_error_filter(self, filt):
"""Set the error filter.
Args:
filt: A function or callable object that accepts a single argument of type
Error and returns True if that error should be included in the log. A
filter of None will add all errors.
NOTE: The filter may adjust some properties of the error.
"""
self._filter = filt
def has_error(self):
"""Return true iff an Error with SEVERITY_ERROR is present."""
# pylint: disable=protected-access
return any(e._severity == SEVERITY_ERROR for e in self._errors)
def _add(self, error):
if self._filter is None or self._filter(error):
_log.info("Added error to log: %s\n%s", error.name, error)
if _log.isEnabledFor(logging.DEBUG):
_log.debug(debug.stack_trace(limit=1).rstrip())
self._errors.append(error)
def warn(self, stack, message, *args):
self._add(
Error.with_stack(stack, SEVERITY_WARNING, message % args, src=self._src)
)
def error(
self,
stack,
message,
details=None,
keyword=None,
bad_call=None,
keyword_context=None,
line=None,
):
err = Error.with_stack(
stack,
SEVERITY_ERROR,
message,
details=details,
keyword=keyword,
bad_call=bad_call,
keyword_context=keyword_context,
src=self._src,
)
if line:
err.set_line(line)
self._add(err)
@contextlib.contextmanager
def checkpoint(self):
"""Record errors without adding them to the errorlog."""
_log.info("Checkpointing errorlog at %d errors", len(self._errors))
checkpoint = CheckPoint(self._errors)
try:
yield checkpoint
finally:
checkpoint.revert()
_log.info(
"Restored errorlog to checkpoint: %d errors reverted",
len(checkpoint.errors),
)
def print_to_csv_file(self, fi: IO[str]):
"""Print the errorlog to a csv file."""
csv_file = csv.writer(fi, delimiter=",", lineterminator="\n")
for error in self.unique_sorted_errors():
# pylint: disable=protected-access
if error._details and error._traceback:
details = error._details + "\n\n" + error._traceback
elif error._traceback:
details = error._traceback
else:
details = error._details
csv_file.writerow(
[error._filename, error._line, error._name, error._message, details]
)
def print_to_file(self, fi: IO[str], *, color: bool = False):
for error in self.unique_sorted_errors():
print(error.as_string(color=color), file=fi)
def unique_sorted_errors(self):
"""Gets the unique errors in this log, sorted on filename and line."""
unique_errors = {}
for error in self._sorted_errors():
error_without_traceback = error.get_unique_representation()
if error_without_traceback not in unique_errors:
unique_errors[error_without_traceback] = [error]
continue
errors = unique_errors[error_without_traceback]
for previous_error in list(errors): # make a copy, since we modify errors
traceback_cmp = _compare_traceback_strings(
error.traceback, previous_error.traceback
)
if traceback_cmp is None:
# We have multiple bad call sites, e.g.,
# def f(x): x + 42
# f("hello") # error
# f("world") # same error, different backtrace
# so we'll report this error multiple times with different backtraces.
continue
elif traceback_cmp < 0:
# If the current traceback is shorter, use the current error instead
# of the previous one.
errors.remove(previous_error)
else:
# One of the previous errors has a shorter traceback than the current
# one, so the latter can be discarded.
break
else:
if len(errors) < MAX_TRACEBACKS:
errors.append(error)
return sum(unique_errors.values(), [])
def _sorted_errors(self):
return sorted(self._errors, key=lambda x: (x.filename or "", x.line))
def print_to_stderr(self, *, color=True):
self.print_to_file(sys.stderr, color=color)
def __str__(self):
f = io.StringIO()
self.print_to_file(f)
return f.getvalue()
| ErrorLog |
python | ray-project__ray | python/ray/serve/_private/proxy_response_generator.py | {
"start": 2637,
"end": 6491
} | class ____(_ProxyResponseGeneratorBase):
"""Wraps a unary DeploymentResponse or streaming DeploymentResponseGenerator.
In the case of a unary DeploymentResponse, __anext__ will only ever return one
result.
"""
def __init__(
self,
response: Union[DeploymentResponse, DeploymentResponseGenerator],
*,
timeout_s: Optional[float] = None,
disconnected_task: Optional[asyncio.Task] = None,
result_callback: Optional[Callable[[Any], Any]] = None,
):
super().__init__(
timeout_s=timeout_s,
disconnected_task=disconnected_task,
result_callback=result_callback,
)
self._done = False
self._response = response
def cancelled(self) -> bool:
return self._response.cancelled()
async def __anext__(self):
if self._done:
raise StopAsyncIteration
try:
if isinstance(self._response, DeploymentResponseGenerator):
result = await self._get_next_streaming_result()
else:
result = await self._get_unary_result()
self._done = True
if self._result_callback is not None:
result = self._result_callback(result)
except asyncio.CancelledError as e:
# This is specifically for gRPC. The cancellation can happen from client
# dropped connection before the request is completed. If self._response is
# not already cancelled, we want to explicitly cancel the task, so it
# doesn't waste cluster resource in this case and can be terminated
# gracefully.
if not self._response.cancelled():
self._response.cancel()
self._done = True
raise e from None
except Exception as e:
self._done = True
raise e from None
return result
async def _await_response_anext(self) -> Any:
return await self._response.__anext__()
async def _get_next_streaming_result(self) -> Any:
next_result_task = asyncio.create_task(self._await_response_anext())
tasks = [next_result_task]
if self._disconnected_task is not None:
tasks.append(self._disconnected_task)
done, _ = await asyncio.wait(
tasks,
return_when=FIRST_COMPLETED,
timeout=calculate_remaining_timeout(
timeout_s=self._timeout_s,
start_time_s=self._start_time_s,
curr_time_s=time.time(),
),
)
if next_result_task in done:
return next_result_task.result()
elif self._disconnected_task in done:
next_result_task.cancel()
next_result_task.add_done_callback(swallow_cancelled)
self._response.cancel()
raise asyncio.CancelledError()
else:
next_result_task.cancel()
next_result_task.add_done_callback(swallow_cancelled)
self._response.cancel()
raise TimeoutError()
async def _await_response(self) -> Any:
return await self._response
async def _get_unary_result(self) -> Any:
result_task = asyncio.create_task(self._await_response())
tasks = [result_task]
if self._disconnected_task is not None:
tasks.append(self._disconnected_task)
done, _ = await asyncio.wait(
tasks, return_when=FIRST_COMPLETED, timeout=self._timeout_s
)
if result_task in done:
return result_task.result()
elif self._disconnected_task in done:
self._response.cancel()
raise asyncio.CancelledError()
else:
self._response.cancel()
raise TimeoutError()
| ProxyResponseGenerator |
python | walkccc__LeetCode | solutions/1021. Remove Outermost Parentheses/1021.py | {
"start": 0,
"end": 314
} | class ____:
def removeOuterParentheses(self, s: str) -> str:
ans = []
opened = 0
for c in s:
if c == '(':
opened += 1
if opened > 1:
ans.append(c)
else: # c == ')'
opened -= 1
if opened > 0:
ans.append(c)
return ''.join(ans)
| Solution |
python | tensorflow__tensorflow | tensorflow/python/distribute/collective_all_reduce_strategy.py | {
"start": 11538,
"end": 12856
} | class ____(distribute_lib.StrategyV1):
__doc__ = CollectiveAllReduceStrategy.__doc__
# The starting number for collective keys. This should only be set in tests.
_collective_key_base = 0
def __init__(
self,
communication=collective_util.CommunicationImplementation.AUTO,
cluster_resolver=None,
):
"""Initializes the object."""
communication_options = collective_util.Options(
implementation=communication
)
super(CollectiveAllReduceStrategyV1, self).__init__(
CollectiveAllReduceExtended(
self,
cluster_resolver=cluster_resolver,
communication_options=communication_options,
)
)
distribute_lib.distribution_strategy_gauge.get_cell("V1").set(
"MultiWorkerMirroredStrategy"
)
# pylint: disable=protected-access
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_workers"
).set(self.extended._num_workers)
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_gpu_per_worker"
).set(
self.extended._num_devices_per_worker
if self.extended._local_device_type == "GPU"
else 0
)
def _is_gpu_device(device):
return tf_device.DeviceSpec.from_string(device).device_type == "GPU"
| CollectiveAllReduceStrategyV1 |
python | getsentry__sentry | src/sentry/db/models/utils.py | {
"start": 3709,
"end": 4524
} | class ____(Generic[FieldSetType, FieldGetType]):
"""
A descriptor that invokes `to_python` when attributes are set.
This provides backwards compatibility for fields that used to use
SubfieldBase which will be removed in Django1.10
"""
def __init__(self, field: Field[FieldSetType, FieldGetType]) -> None:
self.field = field
@overload
def __get__(self, inst: Model, owner: type[Any]) -> Any: ...
@overload
def __get__(self, inst: None, owner: type[Any]) -> Self: ...
def __get__(self, inst: Model | None, owner: type[Any]) -> Self | Any:
if inst is None:
return self
return inst.__dict__[self.field.name]
def __set__(self, obj: Model, value: Any) -> None:
obj.__dict__[self.field.name] = self.field.to_python(value)
| Creator |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/core_api/routes/ui/test_connections.py | {
"start": 1022,
"end": 1993
} | class ____:
@skip_if_force_lowest_dependencies_marker
def test_hook_meta_data(self, test_client):
with assert_queries_count(0):
response = test_client.get("/connections/hook_meta")
response_data = response.json()
assert any(hook_data["connection_type"] == "generic" for hook_data in response_data)
assert any(hook_data["connection_type"] == "fs" for hook_data in response_data)
for hook_data in response_data:
if hook_data["connection_type"] == "fs":
assert hook_data["hook_name"] == "File (path)"
def test_should_respond_401(self, unauthenticated_test_client):
response = unauthenticated_test_client.get("/connections/hook_meta")
assert response.status_code == 401
def test_should_respond_403(self, unauthorized_test_client):
response = unauthorized_test_client.get("/connections/hook_meta")
assert response.status_code == 403
| TestHookMetaData |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/instance/methods/storage_methods.py | {
"start": 721,
"end": 4288
} | class ____:
"""Mixin class providing storage capabilities for DagsterInstance.
This class contains all non-public storage-related methods that were previously
in StorageDomain. Public methods remain directly on DagsterInstance.
"""
# These attributes are provided by DagsterInstance
_event_storage: "EventLogStorage"
_run_storage: "RunStorage"
_schedule_storage: Optional["ScheduleStorage"]
_local_artifact_storage: "LocalArtifactStorage"
_defs_state_storage: Optional["DefsStateStorage"]
@traced
def get_latest_storage_id_by_partition(
self,
asset_key: "AssetKey",
event_type: "DagsterEventType",
partitions: Optional[set[str]] = None,
) -> Mapping[str, int]:
"""Fetch the latest materialization storage id for each partition for a given asset key.
Returns a mapping of partition to storage id.
"""
return self._event_storage.get_latest_storage_id_by_partition(
asset_key, event_type, partitions
)
@traced
def get_paginated_dynamic_partitions(
self,
partitions_def_name: str,
limit: int,
ascending: bool,
cursor: Optional[str] = None,
) -> PaginatedResults[str]:
"""Get a paginatable subset of partition keys for the specified :py:class:`DynamicPartitionsDefinition`.
Args:
partitions_def_name (str): The name of the `DynamicPartitionsDefinition`.
limit (int): Maximum number of partition keys to return.
ascending (bool): The order of dynamic partitions to return.
cursor (Optional[str]): Cursor to use for pagination. Defaults to None.
"""
return self._event_storage.get_paginated_dynamic_partitions(
partitions_def_name=partitions_def_name,
limit=limit,
ascending=ascending,
cursor=cursor,
)
def optimize_for_webserver(
self,
statement_timeout: int,
pool_recycle: int,
max_overflow: int,
) -> None:
if self._schedule_storage:
self._schedule_storage.optimize_for_webserver(
statement_timeout=statement_timeout,
pool_recycle=pool_recycle,
max_overflow=max_overflow,
)
self._run_storage.optimize_for_webserver(
statement_timeout=statement_timeout,
pool_recycle=pool_recycle,
max_overflow=max_overflow,
)
self._event_storage.optimize_for_webserver(
statement_timeout=statement_timeout,
pool_recycle=pool_recycle,
max_overflow=max_overflow,
)
def reindex(self, print_fn: PrintFn = lambda _: None) -> None:
print_fn("Checking for reindexing...")
self._event_storage.reindex_events(print_fn)
self._event_storage.reindex_assets(print_fn)
self._run_storage.optimize(print_fn)
if self._schedule_storage:
self._schedule_storage.optimize(print_fn)
print_fn("Done.")
def dispose(self) -> None:
self._local_artifact_storage.dispose()
self._run_storage.dispose()
self._event_storage.dispose()
def file_manager_directory(self, run_id: str) -> str:
return self._local_artifact_storage.file_manager_dir(run_id)
def storage_directory(self) -> str:
return self._local_artifact_storage.storage_dir
def schedules_directory(self) -> str:
return self._local_artifact_storage.schedules_dir
| StorageMethods |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 792425,
"end": 792977
} | class ____(sgqlc.types.Type, Node):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("actor", "created_at", "label", "labelable")
actor = sgqlc.types.Field(Actor, graphql_name="actor")
created_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="createdAt"
)
label = sgqlc.types.Field(sgqlc.types.non_null(Label), graphql_name="label")
labelable = sgqlc.types.Field(
sgqlc.types.non_null(Labelable), graphql_name="labelable"
)
| LabeledEvent |
python | dagster-io__dagster | python_modules/libraries/dagster-shared/dagster_shared_tests/test_record.py | {
"start": 8654,
"end": 11256
} | class ____(IHaveNew):
name: str
secrets: list[str]
def __new__(cls, name: str, **kwargs):
return super().__new__(
cls,
name=name,
secrets=kwargs.get("secrets", []),
)
def test_pickle():
p = Person(name="Lyra", age=2)
assert p == pickle.loads(pickle.dumps(p))
a = Agent(name="smith", secrets=["many"])
assert a == pickle.loads(pickle.dumps(a))
a2 = Agent(name="mr. clean")
assert a2 == pickle.loads(pickle.dumps(a2))
def test_base_class_conflicts() -> None:
class ConflictPropBase(ABC):
@property
def prop(self): ...
with pytest.raises(check.CheckError, match="Conflicting non-abstract @property"):
@record
class X(ConflictPropBase):
prop: Any
class AbsPropBase(ABC):
@property
@abstractmethod
def abstract_prop(self): ...
@property
@abstractmethod
def abstract_prop_with_default(self) -> int: ...
class DidntImpl(AbsPropBase): ...
with pytest.raises(
TypeError,
match="Can't instantiate abstract class DidntImpl",
):
DidntImpl() # type: ignore # good job type checker
@record
class A(AbsPropBase):
abstract_prop: Any
abstract_prop_with_default: int = 0 # pyright: ignore[reportIncompatibleMethodOverride]
assert A(abstract_prop=4).abstract_prop == 4
assert A(abstract_prop=4).abstract_prop_with_default == 0
class ConflictFnBase:
def some_method(self): ...
with pytest.raises(check.CheckError, match="Conflicting function"):
@record
class _(ConflictFnBase):
some_method: Any
with pytest.raises(check.CheckError, match="will have to override __new__"):
def _some_func():
return 4
@record
class _:
thing: Any = _some_func
def test_lazy_import():
@record
class BadModel:
foos: list["TestType"]
with pytest.raises(check.CheckError, match="Unable to resolve"):
BadModel(foos=[])
@record
class AnnotatedModel:
foos: list[Annotated["TestType", ImportFrom("dagster_shared.utils.test")]]
assert AnnotatedModel(foos=[])
with pytest.raises(
check.CheckError, match="Expected <class 'dagster_shared.utils.test.TestType'>"
):
AnnotatedModel(foos=[1, 2, 3]) # pyright: ignore[reportArgumentType]
def _out_of_scope():
from dagster_shared.utils.test import TestType
return AnnotatedModel(foos=[TestType()])
assert _out_of_scope()
| Agent |
python | dagster-io__dagster | python_modules/libraries/dagster-aws/dagster_aws/ecr/resources.py | {
"start": 269,
"end": 1130
} | class ____:
def __init__(
self,
region_name: Optional[str] = None,
endpoint_url: Optional[str] = None,
use_ssl: bool = True,
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_session_token: Optional[str] = None,
verify: Optional[bool] = None,
):
self.client = boto3.client(
"ecr-public",
region_name=region_name,
use_ssl=use_ssl,
verify=verify,
endpoint_url=endpoint_url,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
)
def get_login_password(self):
return self.client.get_authorization_token()["authorizationData"]["authorizationToken"]
| ECRPublicClient |
python | mlflow__mlflow | mlflow/entities/lifecycle_stage.py | {
"start": 95,
"end": 1202
} | class ____:
ACTIVE = "active"
DELETED = "deleted"
_VALID_STAGES = {ACTIVE, DELETED}
@classmethod
def view_type_to_stages(cls, view_type=ViewType.ALL):
stages = []
if view_type in (ViewType.ACTIVE_ONLY, ViewType.ALL):
stages.append(cls.ACTIVE)
if view_type in (ViewType.DELETED_ONLY, ViewType.ALL):
stages.append(cls.DELETED)
return stages
@classmethod
def is_valid(cls, lifecycle_stage):
return lifecycle_stage in cls._VALID_STAGES
@classmethod
def matches_view_type(cls, view_type, lifecycle_stage):
if not cls.is_valid(lifecycle_stage):
raise MlflowException(f"Invalid lifecycle stage '{lifecycle_stage}'")
if view_type == ViewType.ALL:
return True
elif view_type == ViewType.ACTIVE_ONLY:
return lifecycle_stage == LifecycleStage.ACTIVE
elif view_type == ViewType.DELETED_ONLY:
return lifecycle_stage == LifecycleStage.DELETED
else:
raise MlflowException(f"Invalid view type '{view_type}'")
| LifecycleStage |
python | ethereum__web3.py | web3/contract/async_contract.py | {
"start": 7194,
"end": 11965
} | class ____(BaseContractFunction):
# mypy types
w3: "AsyncWeb3[Any]"
async def call(
self,
transaction: TxParams | None = None,
block_identifier: BlockIdentifier = None,
state_override: StateOverride | None = None,
ccip_read_enabled: bool | None = None,
) -> Any:
"""
Execute a contract function call using the `eth_call` interface.
This method prepares a ``Caller`` object that exposes the contract
functions and public variables as callable Python functions.
Reading a public ``owner`` address variable example:
.. code-block:: python
ContractFactory = w3.eth.contract(
abi=wallet_contract_definition["abi"]
)
# Not a real contract address
contract = ContractFactory("0x2f70d3d26829e412A602E83FE8EeBF80255AEeA5")
# Read "owner" public variable
addr = contract.functions.owner().call()
:param transaction: Dictionary of transaction info for web3 interface
:param block_identifier TODO
:param state_override TODO
:param ccip_read_enabled TODO
:return: ``Caller`` object that has contract public functions
and variables exposed as Python methods
"""
call_transaction = self._get_call_txparams(transaction)
block_id = await async_parse_block_identifier(self.w3, block_identifier)
abi_element_identifier = abi_to_signature(self.abi)
return await async_call_contract_function(
self.w3,
self.address,
self._return_data_normalizers,
abi_element_identifier,
call_transaction,
block_id,
self.contract_abi,
self.abi,
state_override,
ccip_read_enabled,
self.decode_tuples,
*self.args or (),
**self.kwargs or {},
)
async def transact(self, transaction: TxParams | None = None) -> HexBytes:
setup_transaction = self._transact(transaction)
abi_element_identifier = abi_to_signature(self.abi)
return await async_transact_with_contract_function(
self.address,
self.w3,
abi_element_identifier,
setup_transaction,
self.contract_abi,
self.abi,
*self.args or (),
**self.kwargs or {},
)
async def estimate_gas(
self,
transaction: TxParams | None = None,
block_identifier: BlockIdentifier | None = None,
state_override: StateOverride | None = None,
) -> int:
setup_transaction = self._estimate_gas(transaction)
abi_element_identifier = abi_to_signature(self.abi)
return await async_estimate_gas_for_function(
self.address,
self.w3,
abi_element_identifier,
setup_transaction,
self.contract_abi,
self.abi,
block_identifier,
state_override,
*self.args or (),
**self.kwargs or {},
)
async def build_transaction(self, transaction: TxParams | None = None) -> TxParams:
built_transaction = self._build_transaction(transaction)
abi_element_identifier = abi_to_signature(self.abi)
return await async_build_transaction_for_function(
self.address,
self.w3,
abi_element_identifier,
built_transaction,
self.contract_abi,
self.abi,
*self.args or (),
**self.kwargs or {},
)
@staticmethod
def get_fallback_function(
abi: ABI,
async_w3: "AsyncWeb3[Any]",
address: ChecksumAddress | None = None,
) -> "AsyncContractFunction":
if abi and fallback_func_abi_exists(abi):
return AsyncContractFunction.factory(
"fallback",
w3=async_w3,
contract_abi=abi,
address=address,
abi_element_identifier=FallbackFn,
)()
return cast(AsyncContractFunction, NonExistentFallbackFunction())
@staticmethod
def get_receive_function(
abi: ABI,
async_w3: "AsyncWeb3[Any]",
address: ChecksumAddress | None = None,
) -> "AsyncContractFunction":
if abi and receive_func_abi_exists(abi):
return AsyncContractFunction.factory(
"receive",
w3=async_w3,
contract_abi=abi,
address=address,
abi_element_identifier=ReceiveFn,
)()
return cast(AsyncContractFunction, NonExistentReceiveFunction())
| AsyncContractFunction |
python | pytorch__pytorch | torch/_functorch/_aot_autograd/schemas.py | {
"start": 53498,
"end": 53788
} | class ____(Protocol):
handle: JointFnHandle
def __call__(
self, primals: list[FxValue], tangents: list[FxValue]
) -> tuple[
tuple[list[FxValue], list[Optional[Tensor]]],
tuple[list[AOTOutput], list[Optional[AOTOutput]]],
]: ...
@dataclass
| JointTraceFn |
python | imageio__imageio | imageio/plugins/grab.py | {
"start": 1935,
"end": 2776
} | class ____(BaseGrabFormat):
"""The ClipboardGrabFormat provided a means to grab image data from
the clipboard, using the uri "<clipboard>"
This functionality is provided via Pillow. Note that "<clipboard>" is
only supported on Windows.
Parameters for reading
----------------------
No parameters.
"""
def _can_read(self, request):
if request.filename != "<clipboard>":
return False
return bool(self._init_pillow())
def _get_data(self, index):
ImageGrab = self._init_pillow()
assert ImageGrab
pil_im = ImageGrab.grabclipboard()
if pil_im is None:
raise RuntimeError(
"There seems to be no image data on the " "clipboard now."
)
im = np.asarray(pil_im)
return im, {}
| ClipboardGrabFormat |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/splice_a/package.py | {
"start": 217,
"end": 974
} | class ____(Package):
"""Simple package with one optional dependency"""
homepage = "http://www.example.com"
url = "http://www.example.com/splice-a-1.0.tar.gz"
version("1.0", md5="0123456789abcdef0123456789efghij")
variant("foo", default=False, description="nope")
variant("bar", default=False, description="nope")
variant("baz", default=False, description="nope")
depends_on("splice-z")
depends_on("splice-z+foo", when="+foo")
provides("something")
provides("somethingelse")
def install(self, spec, prefix):
with open(prefix.join("splice-a"), "w", encoding="utf-8") as f:
f.write("splice-a: {0}".format(prefix))
f.write("splice-z: {0}".format(spec["splice-z"].prefix))
| SpliceA |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/call_graph.py | {
"start": 394,
"end": 517
} | class ____:
def __init__(self) -> None:
pass
def method(self) -> str:
return _test_source()
| IsSource |
python | huggingface__transformers | src/transformers/models/t5/tokenization_t5.py | {
"start": 999,
"end": 6505
} | class ____(TokenizersBackend):
"""
Construct a T5 tokenizer (backed by HuggingFace's *tokenizers* library). Based on
[Unigram](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models).
This tokenizer inherits from [`TokenizersBackend`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`, *optional*):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
extra_ids (`int`, *optional*, defaults to 100):
Add a number of extra ids added to the vocabulary for use as sentinels. These tokens are accessible as
"<extra_id_{%d}>" where "{%d}" is a number between 0 and extra_ids-1. These tokens can be retrieved by
calling get_sentinel_tokens method and token ids can be by calling get_sentinel_token_ids method
additional_special_tokens (`list[str]`, *optional*):
Additional special tokens used by the tokenizer.
vocab (`dict`, *optional*):
Custom vocabulary dict. If not provided, a minimal vocabulary is created using the special tokens.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
slow_tokenizer_class = None
def __init__(
self,
eos_token="</s>",
unk_token="<unk>",
pad_token="<pad>",
extra_ids=100,
additional_special_tokens=None,
vocab=None,
vocab_file=None,
**kwargs,
):
self.vocab_file = vocab_file
self._extra_ids = extra_ids
# Handle extra_ids and additional_special_tokens
if additional_special_tokens is not None:
extra_tokens = [x for x in additional_special_tokens if "<extra_id_" in str(x)]
if len(extra_tokens) < 1:
additional_special_tokens += [f"<extra_id_{i}>" for i in range(extra_ids)]
elif extra_ids > 0 and extra_ids != len(extra_tokens):
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens"
)
else:
extra_tokens = [f"<extra_id_{i}>" for i in range(extra_ids)]
additional_special_tokens = extra_tokens
# T5 vocab structure: <pad>=0, </s>=1, <unk>=2, then regular vocab, then extra_ids in reverse
if vocab is not None:
self._vocab_scores = vocab
else:
self._vocab_scores = [
(str(pad_token), 0.0),
(str(eos_token), 0.0),
(str(unk_token), 0.0),
("▁", -2.0), # Space token
]
for i in range(extra_ids - 1, -1, -1):
self._vocab_scores.append((f"<extra_id_{i}>", 0.0))
self._tokenizer = Tokenizer(
Unigram(
self._vocab_scores,
unk_id=2,
byte_fallback=False,
)
)
self._tokenizer.normalizer = None
self._tokenizer.pre_tokenizer = pre_tokenizers.Sequence(
[
pre_tokenizers.WhitespaceSplit(),
pre_tokenizers.Metaspace(replacement="▁", prepend_scheme="always", split=True),
]
)
self._tokenizer.decoder = decoders.Metaspace(replacement="▁", prepend_scheme="always", split=True)
tokenizer_object = self._tokenizer
super().__init__(
tokenizer_object=tokenizer_object,
eos_token=eos_token,
unk_token=unk_token,
pad_token=pad_token,
extra_ids=extra_ids,
additional_special_tokens=additional_special_tokens,
**kwargs,
)
self._tokenizer.post_processor = processors.TemplateProcessing(
single=["$A", "</s>"],
pair=["$A", "</s>", "$B", "</s>"],
special_tokens=[
("</s>", self.eos_token_id),
],
)
def get_sentinel_tokens(self):
"""Get the list of sentinel tokens (extra_id tokens) from additional_special_tokens."""
return list(
set(filter(lambda x: bool(re.search(r"<extra_id_\d+>", x)) is not None, self.additional_special_tokens))
)
def get_sentinel_token_ids(self):
"""Get the token IDs for sentinel tokens."""
return [self.convert_tokens_to_ids(token) for token in self.get_sentinel_tokens()]
__all__ = ["T5Tokenizer"]
| T5Tokenizer |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/inputs.py | {
"start": 3127,
"end": 3337
} | class ____(graphene.InputObjectType):
stepKey = graphene.NonNull(graphene.String)
outputName = graphene.NonNull(graphene.String)
class Meta:
name = "StepOutputHandle"
| GrapheneStepOutputHandle |
python | langchain-ai__langchain | libs/core/langchain_core/runnables/base.py | {
"start": 102462,
"end": 130204
} | class ____(RunnableSerializable[Input, Output]):
"""Sequence of `Runnable` objects, where the output of one is the input of the next.
**`RunnableSequence`** is the most important composition operator in LangChain
as it is used in virtually every chain.
A `RunnableSequence` can be instantiated directly or more commonly by using the
`|` operator where either the left or right operands (or both) must be a
`Runnable`.
Any `RunnableSequence` automatically supports sync, async, batch.
The default implementations of `batch` and `abatch` utilize threadpools and
asyncio gather and will be faster than naive invocation of `invoke` or `ainvoke`
for IO bound `Runnable`s.
Batching is implemented by invoking the batch method on each component of the
`RunnableSequence` in order.
A `RunnableSequence` preserves the streaming properties of its components, so if
all components of the sequence implement a `transform` method -- which
is the method that implements the logic to map a streaming input to a streaming
output -- then the sequence will be able to stream input to output!
If any component of the sequence does not implement transform then the
streaming will only begin after this component is run. If there are
multiple blocking components, streaming begins after the last one.
!!! note
`RunnableLambdas` do not support `transform` by default! So if you need to
use a `RunnableLambdas` be careful about where you place them in a
`RunnableSequence` (if you need to use the `stream`/`astream` methods).
If you need arbitrary logic and need streaming, you can subclass
Runnable, and implement `transform` for whatever logic you need.
Here is a simple example that uses simple functions to illustrate the use of
`RunnableSequence`:
```python
from langchain_core.runnables import RunnableLambda
def add_one(x: int) -> int:
return x + 1
def mul_two(x: int) -> int:
return x * 2
runnable_1 = RunnableLambda(add_one)
runnable_2 = RunnableLambda(mul_two)
sequence = runnable_1 | runnable_2
# Or equivalently:
# sequence = RunnableSequence(first=runnable_1, last=runnable_2)
sequence.invoke(1)
await sequence.ainvoke(1)
sequence.batch([1, 2, 3])
await sequence.abatch([1, 2, 3])
```
Here's an example that uses streams JSON output generated by an LLM:
```python
from langchain_core.output_parsers.json import SimpleJsonOutputParser
from langchain_openai import ChatOpenAI
prompt = PromptTemplate.from_template(
"In JSON format, give me a list of {topic} and their "
"corresponding names in French, Spanish and in a "
"Cat Language."
)
model = ChatOpenAI()
chain = prompt | model | SimpleJsonOutputParser()
async for chunk in chain.astream({"topic": "colors"}):
print("-") # noqa: T201
print(chunk, sep="", flush=True) # noqa: T201
```
"""
# The steps are broken into first, middle and last, solely for type checking
# purposes. It allows specifying the `Input` on the first type, the `Output` of
# the last type.
first: Runnable[Input, Any]
"""The first `Runnable` in the sequence."""
middle: list[Runnable[Any, Any]] = Field(default_factory=list)
"""The middle `Runnable` in the sequence."""
last: Runnable[Any, Output]
"""The last `Runnable` in the sequence."""
def __init__(
self,
*steps: RunnableLike,
name: str | None = None,
first: Runnable[Any, Any] | None = None,
middle: list[Runnable[Any, Any]] | None = None,
last: Runnable[Any, Any] | None = None,
) -> None:
"""Create a new `RunnableSequence`.
Args:
steps: The steps to include in the sequence.
name: The name of the `Runnable`.
first: The first `Runnable` in the sequence.
middle: The middle `Runnable` objects in the sequence.
last: The last `Runnable` in the sequence.
Raises:
ValueError: If the sequence has less than 2 steps.
"""
steps_flat: list[Runnable] = []
if not steps and first is not None and last is not None:
steps_flat = [first] + (middle or []) + [last]
for step in steps:
if isinstance(step, RunnableSequence):
steps_flat.extend(step.steps)
else:
steps_flat.append(coerce_to_runnable(step))
if len(steps_flat) < _RUNNABLE_SEQUENCE_MIN_STEPS:
msg = (
f"RunnableSequence must have at least {_RUNNABLE_SEQUENCE_MIN_STEPS} "
f"steps, got {len(steps_flat)}"
)
raise ValueError(msg)
super().__init__(
first=steps_flat[0],
middle=list(steps_flat[1:-1]),
last=steps_flat[-1],
name=name,
)
@classmethod
@override
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the LangChain object.
Returns:
`["langchain", "schema", "runnable"]`
"""
return ["langchain", "schema", "runnable"]
@property
def steps(self) -> list[Runnable[Any, Any]]:
"""All the `Runnable`s that make up the sequence in order.
Returns:
A list of `Runnable`s.
"""
return [self.first, *self.middle, self.last]
@classmethod
@override
def is_lc_serializable(cls) -> bool:
"""Return `True` as this class is serializable."""
return True
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
@property
@override
def InputType(self) -> type[Input]:
"""The type of the input to the `Runnable`."""
return self.first.InputType
@property
@override
def OutputType(self) -> type[Output]:
"""The type of the output of the `Runnable`."""
return self.last.OutputType
@override
def get_input_schema(self, config: RunnableConfig | None = None) -> type[BaseModel]:
"""Get the input schema of the `Runnable`.
Args:
config: The config to use.
Returns:
The input schema of the `Runnable`.
"""
return _seq_input_schema(self.steps, config)
@override
def get_output_schema(
self, config: RunnableConfig | None = None
) -> type[BaseModel]:
"""Get the output schema of the `Runnable`.
Args:
config: The config to use.
Returns:
The output schema of the `Runnable`.
"""
return _seq_output_schema(self.steps, config)
@property
@override
def config_specs(self) -> list[ConfigurableFieldSpec]:
"""Get the config specs of the `Runnable`.
Returns:
The config specs of the `Runnable`.
"""
# Import locally to prevent circular import
return get_unique_config_specs(
[spec for step in self.steps for spec in step.config_specs]
)
@override
def get_graph(self, config: RunnableConfig | None = None) -> Graph:
"""Get the graph representation of the `Runnable`.
Args:
config: The config to use.
Returns:
The graph representation of the `Runnable`.
Raises:
ValueError: If a `Runnable` has no first or last node.
"""
# Import locally to prevent circular import
from langchain_core.runnables.graph import Graph # noqa: PLC0415
graph = Graph()
for step in self.steps:
current_last_node = graph.last_node()
step_graph = step.get_graph(config)
if step is not self.first:
step_graph.trim_first_node()
if step is not self.last:
step_graph.trim_last_node()
step_first_node, _ = graph.extend(step_graph)
if not step_first_node:
msg = f"Runnable {step} has no first node"
raise ValueError(msg)
if current_last_node:
graph.add_edge(current_last_node, step_first_node)
return graph
@override
def __repr__(self) -> str:
return "\n| ".join(
repr(s) if i == 0 else indent_lines_after_first(repr(s), "| ")
for i, s in enumerate(self.steps)
)
@override
def __or__(
self,
other: Runnable[Any, Other]
| Callable[[Iterator[Any]], Iterator[Other]]
| Callable[[AsyncIterator[Any]], AsyncIterator[Other]]
| Callable[[Any], Other]
| Mapping[str, Runnable[Any, Other] | Callable[[Any], Other] | Any],
) -> RunnableSerializable[Input, Other]:
if isinstance(other, RunnableSequence):
return RunnableSequence(
self.first,
*self.middle,
self.last,
other.first,
*other.middle,
other.last,
name=self.name or other.name,
)
return RunnableSequence(
self.first,
*self.middle,
self.last,
coerce_to_runnable(other),
name=self.name,
)
@override
def __ror__(
self,
other: Runnable[Other, Any]
| Callable[[Iterator[Other]], Iterator[Any]]
| Callable[[AsyncIterator[Other]], AsyncIterator[Any]]
| Callable[[Other], Any]
| Mapping[str, Runnable[Other, Any] | Callable[[Other], Any] | Any],
) -> RunnableSerializable[Other, Output]:
if isinstance(other, RunnableSequence):
return RunnableSequence(
other.first,
*other.middle,
other.last,
self.first,
*self.middle,
self.last,
name=other.name or self.name,
)
return RunnableSequence(
coerce_to_runnable(other),
self.first,
*self.middle,
self.last,
name=self.name,
)
@override
def invoke(
self, input: Input, config: RunnableConfig | None = None, **kwargs: Any
) -> Output:
# setup callbacks and context
config = ensure_config(config)
callback_manager = get_callback_manager_for_config(config)
# start the root run
run_manager = callback_manager.on_chain_start(
None,
input,
name=config.get("run_name") or self.get_name(),
run_id=config.pop("run_id", None),
)
input_ = input
# invoke all steps in sequence
try:
for i, step in enumerate(self.steps):
# mark each step as a child run
config = patch_config(
config, callbacks=run_manager.get_child(f"seq:step:{i + 1}")
)
with set_config_context(config) as context:
if i == 0:
input_ = context.run(step.invoke, input_, config, **kwargs)
else:
input_ = context.run(step.invoke, input_, config)
# finish the root run
except BaseException as e:
run_manager.on_chain_error(e)
raise
else:
run_manager.on_chain_end(input_)
return cast("Output", input_)
@override
async def ainvoke(
self,
input: Input,
config: RunnableConfig | None = None,
**kwargs: Any | None,
) -> Output:
# setup callbacks and context
config = ensure_config(config)
callback_manager = get_async_callback_manager_for_config(config)
# start the root run
run_manager = await callback_manager.on_chain_start(
None,
input,
name=config.get("run_name") or self.get_name(),
run_id=config.pop("run_id", None),
)
input_ = input
# invoke all steps in sequence
try:
for i, step in enumerate(self.steps):
# mark each step as a child run
config = patch_config(
config, callbacks=run_manager.get_child(f"seq:step:{i + 1}")
)
with set_config_context(config) as context:
if i == 0:
part = functools.partial(step.ainvoke, input_, config, **kwargs)
else:
part = functools.partial(step.ainvoke, input_, config)
input_ = await coro_with_context(part(), context, create_task=True)
# finish the root run
except BaseException as e:
await run_manager.on_chain_error(e)
raise
else:
await run_manager.on_chain_end(input_)
return cast("Output", input_)
@override
def batch(
self,
inputs: list[Input],
config: RunnableConfig | list[RunnableConfig] | None = None,
*,
return_exceptions: bool = False,
**kwargs: Any | None,
) -> list[Output]:
if not inputs:
return []
# setup callbacks and context
configs = get_config_list(config, len(inputs))
callback_managers = [
CallbackManager.configure(
inheritable_callbacks=config.get("callbacks"),
local_callbacks=None,
verbose=False,
inheritable_tags=config.get("tags"),
local_tags=None,
inheritable_metadata=config.get("metadata"),
local_metadata=None,
)
for config in configs
]
# start the root runs, one per input
run_managers = [
cm.on_chain_start(
None,
input_,
name=config.get("run_name") or self.get_name(),
run_id=config.pop("run_id", None),
)
for cm, input_, config in zip(
callback_managers, inputs, configs, strict=False
)
]
# invoke
try:
if return_exceptions:
# Track which inputs (by index) failed so far
# If an input has failed it will be present in this map,
# and the value will be the exception that was raised.
failed_inputs_map: dict[int, Exception] = {}
for stepidx, step in enumerate(self.steps):
# Assemble the original indexes of the remaining inputs
# (i.e. the ones that haven't failed yet)
remaining_idxs = [
i for i in range(len(configs)) if i not in failed_inputs_map
]
# Invoke the step on the remaining inputs
inputs = step.batch(
[
inp
for i, inp in zip(remaining_idxs, inputs, strict=False)
if i not in failed_inputs_map
],
[
# each step a child run of the corresponding root run
patch_config(
config,
callbacks=rm.get_child(f"seq:step:{stepidx + 1}"),
)
for i, (rm, config) in enumerate(
zip(run_managers, configs, strict=False)
)
if i not in failed_inputs_map
],
return_exceptions=return_exceptions,
**(kwargs if stepidx == 0 else {}),
)
# If an input failed, add it to the map
failed_inputs_map.update(
{
i: inp
for i, inp in zip(remaining_idxs, inputs, strict=False)
if isinstance(inp, Exception)
}
)
inputs = [inp for inp in inputs if not isinstance(inp, Exception)]
# If all inputs have failed, stop processing
if len(failed_inputs_map) == len(configs):
break
# Reassemble the outputs, inserting Exceptions for failed inputs
inputs_copy = inputs.copy()
inputs = []
for i in range(len(configs)):
if i in failed_inputs_map:
inputs.append(cast("Input", failed_inputs_map[i]))
else:
inputs.append(inputs_copy.pop(0))
else:
for i, step in enumerate(self.steps):
inputs = step.batch(
inputs,
[
# each step a child run of the corresponding root run
patch_config(
config, callbacks=rm.get_child(f"seq:step:{i + 1}")
)
for rm, config in zip(run_managers, configs, strict=False)
],
return_exceptions=return_exceptions,
**(kwargs if i == 0 else {}),
)
# finish the root runs
except BaseException as e:
for rm in run_managers:
rm.on_chain_error(e)
if return_exceptions:
return cast("list[Output]", [e for _ in inputs])
raise
else:
first_exception: Exception | None = None
for run_manager, out in zip(run_managers, inputs, strict=False):
if isinstance(out, Exception):
first_exception = first_exception or out
run_manager.on_chain_error(out)
else:
run_manager.on_chain_end(out)
if return_exceptions or first_exception is None:
return cast("list[Output]", inputs)
raise first_exception
@override
async def abatch(
self,
inputs: list[Input],
config: RunnableConfig | list[RunnableConfig] | None = None,
*,
return_exceptions: bool = False,
**kwargs: Any | None,
) -> list[Output]:
if not inputs:
return []
# setup callbacks and context
configs = get_config_list(config, len(inputs))
callback_managers = [
AsyncCallbackManager.configure(
inheritable_callbacks=config.get("callbacks"),
local_callbacks=None,
verbose=False,
inheritable_tags=config.get("tags"),
local_tags=None,
inheritable_metadata=config.get("metadata"),
local_metadata=None,
)
for config in configs
]
# start the root runs, one per input
run_managers: list[AsyncCallbackManagerForChainRun] = await asyncio.gather(
*(
cm.on_chain_start(
None,
input_,
name=config.get("run_name") or self.get_name(),
run_id=config.pop("run_id", None),
)
for cm, input_, config in zip(
callback_managers, inputs, configs, strict=False
)
)
)
# invoke .batch() on each step
# this uses batching optimizations in Runnable subclasses, like LLM
try:
if return_exceptions:
# Track which inputs (by index) failed so far
# If an input has failed it will be present in this map,
# and the value will be the exception that was raised.
failed_inputs_map: dict[int, Exception] = {}
for stepidx, step in enumerate(self.steps):
# Assemble the original indexes of the remaining inputs
# (i.e. the ones that haven't failed yet)
remaining_idxs = [
i for i in range(len(configs)) if i not in failed_inputs_map
]
# Invoke the step on the remaining inputs
inputs = await step.abatch(
[
inp
for i, inp in zip(remaining_idxs, inputs, strict=False)
if i not in failed_inputs_map
],
[
# each step a child run of the corresponding root run
patch_config(
config,
callbacks=rm.get_child(f"seq:step:{stepidx + 1}"),
)
for i, (rm, config) in enumerate(
zip(run_managers, configs, strict=False)
)
if i not in failed_inputs_map
],
return_exceptions=return_exceptions,
**(kwargs if stepidx == 0 else {}),
)
# If an input failed, add it to the map
failed_inputs_map.update(
{
i: inp
for i, inp in zip(remaining_idxs, inputs, strict=False)
if isinstance(inp, Exception)
}
)
inputs = [inp for inp in inputs if not isinstance(inp, Exception)]
# If all inputs have failed, stop processing
if len(failed_inputs_map) == len(configs):
break
# Reassemble the outputs, inserting Exceptions for failed inputs
inputs_copy = inputs.copy()
inputs = []
for i in range(len(configs)):
if i in failed_inputs_map:
inputs.append(cast("Input", failed_inputs_map[i]))
else:
inputs.append(inputs_copy.pop(0))
else:
for i, step in enumerate(self.steps):
inputs = await step.abatch(
inputs,
[
# each step a child run of the corresponding root run
patch_config(
config, callbacks=rm.get_child(f"seq:step:{i + 1}")
)
for rm, config in zip(run_managers, configs, strict=False)
],
return_exceptions=return_exceptions,
**(kwargs if i == 0 else {}),
)
# finish the root runs
except BaseException as e:
await asyncio.gather(*(rm.on_chain_error(e) for rm in run_managers))
if return_exceptions:
return cast("list[Output]", [e for _ in inputs])
raise
else:
first_exception: Exception | None = None
coros: list[Awaitable[None]] = []
for run_manager, out in zip(run_managers, inputs, strict=False):
if isinstance(out, Exception):
first_exception = first_exception or out
coros.append(run_manager.on_chain_error(out))
else:
coros.append(run_manager.on_chain_end(out))
await asyncio.gather(*coros)
if return_exceptions or first_exception is None:
return cast("list[Output]", inputs)
raise first_exception
def _transform(
self,
inputs: Iterator[Input],
run_manager: CallbackManagerForChainRun,
config: RunnableConfig,
**kwargs: Any,
) -> Iterator[Output]:
steps = [self.first, *self.middle, self.last]
# transform the input stream of each step with the next
# steps that don't natively support transforming an input stream will
# buffer input in memory until all available, and then start emitting output
final_pipeline = cast("Iterator[Output]", inputs)
for idx, step in enumerate(steps):
config = patch_config(
config, callbacks=run_manager.get_child(f"seq:step:{idx + 1}")
)
if idx == 0:
final_pipeline = step.transform(final_pipeline, config, **kwargs)
else:
final_pipeline = step.transform(final_pipeline, config)
yield from final_pipeline
async def _atransform(
self,
inputs: AsyncIterator[Input],
run_manager: AsyncCallbackManagerForChainRun,
config: RunnableConfig,
**kwargs: Any,
) -> AsyncIterator[Output]:
steps = [self.first, *self.middle, self.last]
# stream the last steps
# transform the input stream of each step with the next
# steps that don't natively support transforming an input stream will
# buffer input in memory until all available, and then start emitting output
final_pipeline = cast("AsyncIterator[Output]", inputs)
for idx, step in enumerate(steps):
config = patch_config(
config,
callbacks=run_manager.get_child(f"seq:step:{idx + 1}"),
)
if idx == 0:
final_pipeline = step.atransform(final_pipeline, config, **kwargs)
else:
final_pipeline = step.atransform(final_pipeline, config)
async for output in final_pipeline:
yield output
@override
def transform(
self,
input: Iterator[Input],
config: RunnableConfig | None = None,
**kwargs: Any | None,
) -> Iterator[Output]:
yield from self._transform_stream_with_config(
input,
self._transform,
patch_config(config, run_name=(config or {}).get("run_name") or self.name),
**kwargs,
)
@override
def stream(
self,
input: Input,
config: RunnableConfig | None = None,
**kwargs: Any | None,
) -> Iterator[Output]:
yield from self.transform(iter([input]), config, **kwargs)
@override
async def atransform(
self,
input: AsyncIterator[Input],
config: RunnableConfig | None = None,
**kwargs: Any | None,
) -> AsyncIterator[Output]:
async for chunk in self._atransform_stream_with_config(
input,
self._atransform,
patch_config(config, run_name=(config or {}).get("run_name") or self.name),
**kwargs,
):
yield chunk
@override
async def astream(
self,
input: Input,
config: RunnableConfig | None = None,
**kwargs: Any | None,
) -> AsyncIterator[Output]:
async def input_aiter() -> AsyncIterator[Input]:
yield input
async for chunk in self.atransform(input_aiter(), config, **kwargs):
yield chunk
| RunnableSequence |
python | pytorch__pytorch | torch/_inductor/runtime/caching/context.py | {
"start": 7335,
"end": 10315
} | class ____(TypedDict):
"""Schema for specifying which context forms to include in cache isolation.
Attributes:
runtime_context: Either True (include all runtime context), False (exclude all),
or a SelectedRuntimeContext dict specifying which forms to include.
compile_context: Either True (include all compile context), False (exclude all),
or a SelectedCompileContext dict specifying which forms to include.
"""
runtime_context: SelectedRuntimeContext | bool
compile_context: SelectedCompileContext | bool
_DEFAULT_ISOLATION_SCHEMA: IsolationSchema = IsolationSchema(
runtime_context=True, compile_context=True
)
def _isolation_context(
ischema: IsolationSchema = _DEFAULT_ISOLATION_SCHEMA,
) -> dict[str, Any]:
"""Generate context data based on the isolation schema.
Args:
ischema: Schema specifying which context forms to include.
Defaults to including all runtime and compile context.
Returns:
A dictionary containing the selected context data with keys
"runtime_context" and "compile_context", where each value is
either None (if excluded) or a dict of context form data.
"""
isolation_context: dict[str, Any] = {}
for context_name, context_cls in (
("runtime_context", _RuntimeContext),
("compile_context", _CompileContext),
):
selected_context: dict[str, Any] | None = None
if ischema[context_name] is True: # type: ignore[literal-required]
selected_context = {
form_of_context: getattr(context_cls, form_of_context)()
for form_of_context in context_cls.forms_of_context()
}
elif ischema[context_name] is False: # type: ignore[literal-required]
selected_context = None
else:
selected_context = {}
for form_of_context in ischema[context_name]: # type: ignore[literal-required]
selected = ischema[context_name][form_of_context] # type: ignore[literal-required]
if selected:
selected_context[form_of_context] = getattr(
context_cls, form_of_context
)()
selected_context = selected_context or None
isolation_context[context_name] = selected_context
return isolation_context
def _isolation_key(ischema: IsolationSchema = _DEFAULT_ISOLATION_SCHEMA) -> str:
"""Generate a unique key for the given isolation schema.
Args:
ischema: Schema specifying which context forms to include.
Defaults to including all runtime and compile context.
Returns:
A 32-character hexadecimal string that uniquely identifies
the context specified by the isolation schema.
"""
return sha256(
json.dumps(_isolation_context(ischema), sort_keys=True).encode()
).hexdigest()[:32]
| IsolationSchema |
python | dagster-io__dagster | helm/dagster/schema/schema/charts/utils/kubernetes.py | {
"start": 4997,
"end": 5168
} | class ____(BaseModel):
model_config = {
"extra": "allow",
"json_schema_extra": {"$ref": create_definition_ref("io.k8s.api.core.v1.EnvVar")},
}
| EnvVar |
python | pytorch__pytorch | torch/_dynamo/source.py | {
"start": 24496,
"end": 25226
} | class ____(ChainedSource):
index: Any
def guard_source(self) -> GuardSource:
return self.base.guard_source()
def reconstruct(self, codegen: "PyCodegen") -> None:
codegen.add_push_null(
lambda: codegen.load_import_from(utils.__name__, "dict_keys_getitem")
)
codegen(self.base)
codegen.append_output(codegen.create_load_const(self.index))
codegen.extend_output(create_call_function(2, False))
def name(self) -> str:
# The list creation will be CSE'd by PyExprCSEPass
return f"list(dict.keys({self.base.name()}))[{self.index!r}]"
def is_dict_key(self) -> bool:
return True
@dataclasses.dataclass(frozen=True)
| ConstDictKeySource |
python | realpython__materials | python-maze-solver/source_code_final/src/maze_solver/view/primitives.py | {
"start": 1129,
"end": 1300
} | class ____(tuple[Line, ...]):
def draw(self, **attributes) -> str:
return "".join(line.draw(**attributes) for line in self)
@dataclass(frozen=True)
| DisjointLines |
python | Netflix__metaflow | metaflow/plugins/datatools/s3/s3tail.py | {
"start": 184,
"end": 2597
} | class ____(object):
def __init__(self, s3url):
url = urlparse(s3url)
self.s3, self.ClientError = get_s3_client()
self._bucket = url.netloc
self._key = url.path.lstrip("/")
self._pos = 0
self._tail = b""
def reset_client(self, hard_reset=False):
# This method is required by @aws_retry
if hard_reset or self.s3 is None:
self.s3, self.ClientError = get_s3_client()
def clone(self, s3url):
tail = S3Tail(s3url)
tail._pos = self._pos
tail._tail = self._tail
return tail
@property
def bytes_read(self):
return self._pos
@property
def tail(self):
return self._tail
def __iter__(self):
buf = self._fill_buf()
if buf is not None:
for line in buf:
if line.endswith(b"\n"):
yield line
else:
self._tail = line
break
@aws_retry
def _make_range_request(self):
try:
return self.s3.get_object(
Bucket=self._bucket, Key=self._key, Range="bytes=%d-" % self._pos
)
except self.ClientError as err:
code = err.response["Error"]["Code"]
# NOTE we deliberately regard NoSuchKey as an ignorable error.
# We assume that the file just hasn't appeared in S3 yet.
# Some S3 compatible storage systems like Dell EMC-ECS return 416 in-lieu
# of InvalidRange - https://www.delltechnologies.com/asset/en-us/products/storage/technical-support/docu95766.pdf
if code in ("InvalidRange", "NoSuchKey", "416"):
return None
else:
raise
def _fill_buf(self):
resp = self._make_range_request()
if resp is None:
return None
code = str(resp["ResponseMetadata"]["HTTPStatusCode"])
if code[0] == "2":
data = resp["Body"].read()
if data:
buf = BytesIO(self._tail + data)
self._pos += len(data)
self._tail = b""
return buf
else:
return None
elif code[0] == "5":
return None
else:
raise Exception(
"Retrieving %s/%s failed: %s" % (self._bucket, self._key, code)
)
| S3Tail |
python | numba__numba | numba/tests/test_array_return.py | {
"start": 269,
"end": 856
} | class ____(MemoryLeakMixin, unittest.TestCase):
def test_array_return(self):
a = np.arange(10)
i = 2
at, it = typeof(a), typeof(i)
cfunc = njit((at, it))(array_return)
self.assertIs(a, cfunc(a, i))
def test_array_return_start_with_loop(self):
"""
A bug breaks array return if the function starts with a loop
"""
a = np.arange(10)
at = typeof(a)
cfunc = njit((at,))(array_return_start_with_loop)
self.assertIs(a, cfunc(a))
if __name__ == '__main__':
unittest.main()
| TestArrayReturn |
python | allegroai__clearml | clearml/backend_api/services/v2_13/events.py | {
"start": 26163,
"end": 34342
} | class ____(CompoundRequest):
"""
Adds a single event
"""
_service = "events"
_action = "add"
_version = "2.13"
_item_prop_name = "event"
_schema = {
"anyOf": [
{"$ref": "#/definitions/metrics_scalar_event"},
{"$ref": "#/definitions/metrics_vector_event"},
{"$ref": "#/definitions/metrics_image_event"},
{"$ref": "#/definitions/metrics_plot_event"},
{"$ref": "#/definitions/task_log_event"},
],
"definitions": {
"log_level_enum": {
"enum": [
"notset",
"debug",
"verbose",
"info",
"warn",
"warning",
"error",
"fatal",
"critical",
],
"type": "string",
},
"metrics_image_event": {
"description": "An image or video was dumped to storage for debugging",
"properties": {
"iter": {"description": "Iteration", "type": "integer"},
"key": {"description": "File key", "type": "string"},
"metric": {
"description": "Metric name, e.g. 'count', 'loss', 'accuracy'",
"type": "string",
},
"task": {"description": "Task ID (required)", "type": "string"},
"timestamp": {
"description": "Epoch milliseconds UTC, will be set by the server if not set.",
"type": ["number", "null"],
},
"type": {"const": "training_debug_image", "description": ""},
"url": {"description": "File URL", "type": "string"},
"variant": {
"description": "E.g. 'class_1', 'total', 'average",
"type": "string",
},
},
"required": ["task", "type"],
"type": "object",
},
"metrics_plot_event": {
"description": " An entire plot (not single datapoint) and it's layout.\n Used for plotting ROC curves, confidence matrices, etc. when evaluating the net.",
"properties": {
"iter": {"description": "Iteration", "type": "integer"},
"metric": {
"description": "Metric name, e.g. 'count', 'loss', 'accuracy'",
"type": "string",
},
"plot_str": {
"description": "An entire plot (not single datapoint) and it's layout.\n Used for plotting ROC curves, confidence matrices, etc. when evaluating the net.\n ",
"type": "string",
},
"skip_validation": {
"description": "If set then plot_str is not checked for a valid json. The default is False",
"type": "boolean",
},
"task": {"description": "Task ID (required)", "type": "string"},
"timestamp": {
"description": "Epoch milliseconds UTC, will be set by the server if not set.",
"type": ["number", "null"],
},
"type": {"const": "plot", "description": "'plot'"},
"variant": {
"description": "E.g. 'class_1', 'total', 'average",
"type": "string",
},
},
"required": ["task", "type"],
"type": "object",
},
"metrics_scalar_event": {
"description": "Used for reporting scalar metrics during training task",
"properties": {
"iter": {"description": "Iteration", "type": "integer"},
"metric": {
"description": "Metric name, e.g. 'count', 'loss', 'accuracy'",
"type": "string",
},
"task": {"description": "Task ID (required)", "type": "string"},
"timestamp": {
"description": "Epoch milliseconds UTC, will be set by the server if not set.",
"type": ["number", "null"],
},
"type": {
"const": "training_stats_scalar",
"description": "training_stats_vector",
},
"value": {"description": "", "type": "number"},
"variant": {
"description": "E.g. 'class_1', 'total', 'average",
"type": "string",
},
},
"required": ["task", "type"],
"type": "object",
},
"metrics_vector_event": {
"description": "Used for reporting vector metrics during training task",
"properties": {
"iter": {"description": "Iteration", "type": "integer"},
"metric": {
"description": "Metric name, e.g. 'count', 'loss', 'accuracy'",
"type": "string",
},
"task": {"description": "Task ID (required)", "type": "string"},
"timestamp": {
"description": "Epoch milliseconds UTC, will be set by the server if not set.",
"type": ["number", "null"],
},
"type": {
"const": "training_stats_vector",
"description": "training_stats_vector",
},
"values": {
"description": "vector of float values",
"items": {"type": "number"},
"type": "array",
},
"variant": {
"description": "E.g. 'class_1', 'total', 'average",
"type": "string",
},
},
"required": ["task"],
"type": "object",
},
"task_log_event": {
"description": "A log event associated with a task.",
"properties": {
"level": {
"$ref": "#/definitions/log_level_enum",
"description": "Log level.",
},
"msg": {"description": "Log message.", "type": "string"},
"task": {"description": "Task ID (required)", "type": "string"},
"timestamp": {
"description": "Epoch milliseconds UTC, will be set by the server if not set.",
"type": ["number", "null"],
},
"type": {"const": "log", "description": "'log'"},
"worker": {
"description": "Name of machine running the task.",
"type": "string",
},
},
"required": ["task", "type"],
"type": "object",
},
},
"type": "object",
}
def __init__(self, event: Any) -> None:
super(AddRequest, self).__init__()
self.event = event
@property
def event(self) -> None:
return self._property_event
@event.setter
def event(self, value: Any) -> None:
self.assert_isinstance(
value,
"event",
(
MetricsScalarEvent,
MetricsVectorEvent,
MetricsImageEvent,
MetricsPlotEvent,
TaskLogEvent,
),
)
self._property_event = value
| AddRequest |
python | google__jax | tests/xla_metadata_test.py | {
"start": 997,
"end": 14568
} | class ____(jtu.JaxTestCase):
def _assert_metadata_appears_once_per_op(
self,
hlo_text: str,
expected_tagged_ops: list[str],
metadata: dict[str, str],
):
attribute_strings = [f'{k}="{v}"' for k, v in metadata.items()]
op_with_metadata_count = {op: 0 for op in expected_tagged_ops}
for line in hlo_text.splitlines():
for op in expected_tagged_ops:
if (str(op + "(") in line and all(attr in line for attr in attribute_strings)
and "frontend_attributes=" in line):
op_with_metadata_count[op] += 1
for op in op_with_metadata_count:
self.assertEqual(
op_with_metadata_count[op],
1,
f"Expected op '{op}' to have the metadata exactly once,"
f" but found it {op_with_metadata_count[op]} times\n"
f"Metadata: {metadata}\n"
f"HLO Graph:\n\n{hlo_text}",
)
def test_f_jitted(self):
@jax.jit
def f(a, b):
with set_xla_metadata(a="b"):
return a + b
f_jaxpr = jax.make_jaxpr(f)(1, 2)
eqns = f_jaxpr.eqns
for eq in eqns[1:]:
self.assertDictEqual(eq.ctx.attributes, {"a": "b"})
f_lowered_text = f.lower(1.0, 2.0).as_text()
self.assertIn('mhlo.frontend_attributes = {a = "b"}', f_lowered_text)
def test_f_jitted_bool_attributes(self):
@jax.jit
def f(a, b):
with set_xla_metadata(a=True):
return a + b
f_lowered_text = f.lower(1.0, 2.0).as_text()
self.assertIn('mhlo.frontend_attributes = {a = "true"}', f_lowered_text)
def test_f_jitted_int_attributes(self):
@jax.jit
def f(a, b):
with set_xla_metadata(a=10):
return a + b
f_lowered_text = f.lower(1.0, 2.0).as_text()
self.assertIn('mhlo.frontend_attributes = {a = "10"}', f_lowered_text)
def test_decorator(self):
@set_xla_metadata(a="b")
@jax.jit
def f(a, b):
return a + b
f_jaxpr = jax.make_jaxpr(f)(1, 2)
eqns = f_jaxpr.eqns
for eq in eqns[1:]:
self.assertDictEqual(eq.ctx.attributes, {"a": "b"})
f_lowered_text = f.lower(1.0, 2.0).as_text()
self.assertIn('mhlo.frontend_attributes = {a = "b"}', f_lowered_text)
def test_decorator_and_context_manager_nested(self):
@set_xla_metadata(a="b")
@jax.jit
def f(a, b):
with set_xla_metadata(c="d"):
return a + b
f_lowered_text = f.lower(1.0, 2.0).as_text()
self.assertIn(
'mhlo.frontend_attributes = {a = "b", c = "d"}',
f_lowered_text,
)
def test_f_nonjitted(self):
def f_add(a, b):
return lax.add(a, b)
arg1 = jnp.arange(2)
with set_xla_metadata(a="b"):
self.assertIn(
'mhlo.frontend_attributes = {a = "b"}',
jax.jit(f_add).lower(arg1, arg1).as_text(),
)
def test_f_attributes_overwrite(self):
@jax.jit
def g(a, b):
return a * b
with set_xla_metadata(a="b"):
@jax.jit
def f(a, b):
with set_xla_metadata(a="c"):
return a + b
f_lowered_text = f.lower(1.0, 2.0).as_text()
self.assertIn('mhlo.frontend_attributes = {a = "c"}', f_lowered_text)
self.assertIn(
'mhlo.frontend_attributes = {a = "b"}', g.lower(1.0, 2.0).as_text()
)
self.assertNotIn("mhlo.frontend_attributes", g.lower(1.0, 2.0).as_text())
def test_f_attributes_merge(self):
with set_xla_metadata(key1="val1"):
@jax.jit
def f(a, b):
with set_xla_metadata(key2="val2"):
return a + b
f_lowered_text = f.lower(1.0, 2.0).as_text()
self.assertIn(
'mhlo.frontend_attributes = {key1 = "val1", key2 = "val2"}',
f_lowered_text,
)
def test_attr_caching_jit(self):
@jax.jit
def f_add_jit(a, b):
return a + b
with set_xla_metadata(b="c"):
f_add_lowered1 = f_add_jit.lower(2.0, 3.0).as_text()
# Expect no attributes in the mlir.
f_add_lowered2 = f_add_jit.lower(1.0, 2.0).as_text()
with set_xla_metadata(c="d"):
f_add_lowered3 = f_add_jit.lower(4.0, 5.0).as_text()
self.assertIn('mhlo.frontend_attributes = {b = "c"}', f_add_lowered1)
self.assertNotIn("mhlo.frontend_attributes = {}", f_add_lowered2)
self.assertNotIn('mhlo.frontend_attributes = {b = "c"}', f_add_lowered2)
self.assertNotIn('mhlo.frontend_attributes = {c = "d"}', f_add_lowered2)
self.assertIn('mhlo.frontend_attributes = {c = "d"}', f_add_lowered3)
def test_attr_caching_nonjit(self):
def f_add(a, b):
return lax.add(a, b)
arg1 = jnp.arange(2)
arg2 = jnp.arange(2) + 1
arg3 = jnp.arange(2) + 2
with set_xla_metadata(b="c"):
self.assertIn(
'mhlo.frontend_attributes = {b = "c"}',
jax.jit(f_add).lower(arg1, arg1).as_text(),
)
# Expect no attributes in the jaxpr.
self.assertNotIn(
"mhlo.frontend_attributes",
jax.jit(f_add).lower(arg2, arg2).as_text(),
)
with set_xla_metadata(c="d"):
self.assertIn(
'mhlo.frontend_attributes = {c = "d"}',
jax.jit(f_add).lower(arg3, arg3).as_text(),
)
def test_axpy(self):
@jax.jit
def axpy(a, x, y):
with set_xla_metadata(a="b"):
return a * x + y
for line in axpy.lower(1.0, 2.0, 3.0).as_text().split("\n"):
if "stablehlo.multiply" in line:
self.assertIn('mhlo.frontend_attributes = {a = "b"}', line)
if "stablehlo.add" in line:
self.assertIn('mhlo.frontend_attributes = {a = "b"}', line)
def test_while(self):
@jax.jit
def f(a):
with set_xla_metadata(a="b"):
return jax.lax.while_loop(lambda x: x < 10, lambda x: x + 1, a)
self.assertIn(
'mhlo.frontend_attributes = {a = "b"}', f.lower(1.0).as_text()
)
def test_while_condition_body(self):
@jax.jit
def f_condition(x):
with set_xla_metadata(a="b"):
return x < 10
@jax.jit
def f_body(x):
with set_xla_metadata(a="c"):
return x + 1
@jax.jit
def while_fn(a):
return jax.lax.while_loop(f_condition, f_body, a)
for line in while_fn.lower(1.0).as_text().split("\n"):
if "stablehlo.compare" in line:
self.assertIn('mhlo.frontend_attributes = {a = "b"}', line)
if "stablehlo.add" in line:
self.assertIn('mhlo.frontend_attributes = {a = "c"}', line)
def test_cond_annotates_branches(self):
sin = jnp.sin
cos = jnp.cos
@jax.jit
def f(x):
with set_xla_metadata(a="b"):
return jax.lax.cond(x < 0., sin, cos, x)
hlo_lines = f.lower(1.).as_text().split("\n")
sin_hlo, = (line for line in hlo_lines if "stablehlo.sine" in line)
cos_hlo, = (line for line in hlo_lines if "stablehlo.cosine" in line)
self.assertIn('mhlo.frontend_attributes = {a = "b"}', sin_hlo)
self.assertIn('mhlo.frontend_attributes = {a = "b"}', cos_hlo)
def test_cond_annotates_branches_and_none_unsets(self):
sin = jnp.sin
def cos(x):
with set_xla_metadata(a=None):
return jnp.cos(x)
@jax.jit
def f(x):
with set_xla_metadata(a="b"):
return jax.lax.cond(x < 0., sin, cos, x)
hlo_lines = f.lower(1.).as_text().split("\n")
sin_hlo, = (line for line in hlo_lines if "stablehlo.sine" in line)
cos_hlo, = (line for line in hlo_lines if "stablehlo.cosine" in line)
self.assertIn( 'mhlo.frontend_attributes = {a = "b"}', sin_hlo)
self.assertNotIn('mhlo.frontend_attributes = {a = "b"}', cos_hlo)
def test_nested_jit(self):
@jax.jit
def f(x, y):
with set_xla_metadata(a="b"):
z = x * y
@jax.jit
def g(z):
with set_xla_metadata(c="d"):
return z**2 + 1
return g(z)
self.assertIn(
'mhlo.frontend_attributes = {a = "b", c = "d"}',
f.lower(1.0, 2.0).as_text(),
)
def test_grad(self):
@jax.jit
def f(x, y):
with set_xla_metadata(a="b"):
return jax.grad(lambda x: x**3 + y**2 + jnp.sin(x))(x)
f_jaxpr = jax.make_jaxpr(f)(1.0, 2.0)
eqns = f_jaxpr.eqns
for eq in eqns[1:]:
self.assertDictEqual(eq.ctx.attributes, {"a": "b"})
self.assertIn(
'mhlo.frontend_attributes = {a = "b"}', f.lower(1.0, 2.).as_text()
)
def test_grad_outside_ctx(self):
@jax.jit
def f(x):
with set_xla_metadata(a="b"):
return x**3 + x**2 + jnp.sin(x)
grad_fn = jax.jit(jax.grad(f))
for line in grad_fn.lower(1.0).as_text().split("\n"):
if "stablehlo.cosine" in line:
self.assertIn('mhlo.frontend_attributes = {a = "b"}', line)
if "call @integer_pow" in line:
self.assertIn('mhlo.frontend_attributes = {a = "b"}', line)
def test_vmap(self):
dct = {"a": 0.0, "b": jnp.arange(5.0)}
@jax.jit
def f(dct, x):
with set_xla_metadata(a="b"):
return dct["a"] + dct["b"] + x
with set_xla_metadata(a="d"):
f_vmap = jax.vmap(f, in_axes=({"a": None, "b": 0}, None))
f_jaxpr = jax.make_jaxpr(f_vmap)(dct, 1.0)
eqns = f_jaxpr.eqns
for eq in eqns[1:]:
self.assertDictEqual(eq.ctx.attributes, {"a": "d"})
@jax.jit
def f2(x, y):
with set_xla_metadata(a="b"):
return (x + y, y * 2.0)
f2_vmap = jax.vmap(f2, in_axes=(0, None))
self.assertIn(
'mhlo.frontend_attributes = {a = "b"}',
jax.jit(f2_vmap).lower(jnp.arange(5.0), 1.0).as_text(),
)
def test_multiple_instructions(self):
@jax.jit
def f(x, a):
y = jnp.matmul(x, x)
with set_xla_metadata(a="b"):
return y + a
for line in f.lower(jnp.arange(5.0), 1.0).as_text().split("\n"):
# matmul doesn't have attributes
if "stablehlo.dot_general" in line:
self.assertNotIn('mhlo.frontend_attributes = {a = "b"}', line)
if "stablehlo.add" in line:
self.assertIn('mhlo.frontend_attributes = {a = "b"}', line)
def test_softmax(self):
@jax.jit
def f(x):
with set_xla_metadata(a="b"):
return jax.nn.softmax(x)
self.assertIn(
'mhlo.frontend_attributes = {a = "b"}', f.lower(jnp.arange(5.0)).as_text()
)
@parameterized.parameters(
("x*x", lambda x: x * x, "multiply"),
("sin(x)", jnp.sin, "sin"),
("tanh(x)", jnp.tanh, "tanh"),
("1/x", lambda x: 1 / x, "divide"),
("sinc(x)", jnp.sinc, "call"),
)
def test_value_tagging(self, name, fn, expected_tagged_op):
metadata = {"test_value_tagging": name}
def wrapped_fn(x):
return set_xla_metadata(fn(x), **metadata)
x_scalar = jnp.array(0.7)
text = jax.jit(wrapped_fn).lower(x_scalar).as_text("hlo")
self._assert_metadata_appears_once_per_op(
text, [expected_tagged_op], metadata)
@parameterized.parameters(
("x*x", lambda x: x * x, "add"),
("sin(x)", jnp.sin, "cosine"),
("tanh(x)", jnp.tanh, "add"),
("1/x", lambda x: 1 / x, "negate"),
("sinc(x)", jnp.sinc, "call"),
)
def test_value_grad_tagging(self, name, fn, expected_tagged_op):
metadata = {"test_value_grad_tagging": name}
@jax.custom_vjp
def wrapped_fn(x):
return fn(x)
def fwd(*args):
primal_out, vjp_fn = jax.vjp(fn, *args)
return primal_out, vjp_fn
def bwd(vjp_fn, cts_in):
cts_out = vjp_fn(cts_in)
cts_out = set_xla_metadata(cts_out, **metadata)
return cts_out
wrapped_fn.defvjp(fwd, bwd)
x_scalar = jnp.array(0.7)
text = jax.jit(jax.grad(wrapped_fn)).lower(x_scalar).as_text("hlo")
self._assert_metadata_appears_once_per_op(
text, [expected_tagged_op], metadata)
def test_vmap_multi_input_output_value_tagging(self):
metadata = {"test_vmap_multi_input_output_value_tagging": "value"}
fn = lambda x, y, z: (x @ y + z, z - x @ y)
@jax.vmap
def vmapped_fn(x_item, y_item, z_item):
return set_xla_metadata(fn(x_item, y_item, z_item), **metadata)
batch_size, num_rows, num_cols = 4, 5, 6
rng = np.random.default_rng(0)
x_batch = rng.random((batch_size, num_rows, num_cols)).astype(np.float32)
y_batch = rng.random((batch_size, num_cols, num_rows)).astype(np.float32)
z_batch = rng.random((batch_size, num_rows, num_rows)).astype(np.float32)
inputs = (x_batch, y_batch, z_batch)
text = jax.jit(vmapped_fn).lower(*inputs).as_text("hlo")
self._assert_metadata_appears_once_per_op(
text, ["add", "subtract"], metadata)
def test_sharding_support_value_tagging(self):
mesh = jtu.create_mesh((1,), "data")
np_inp = np.arange(8, dtype=np.float32)
arr = jax.device_put(np_inp, jax.NamedSharding(mesh, jax.P("data")))
metadata = {"test_sharding_support_value_tagging": "value"}
@jax.jit
def wrapped_fn(x):
return set_xla_metadata(x * 2.0, **metadata)
text = jax.jit(wrapped_fn).lower(arr).as_text("hlo")
self._assert_metadata_appears_once_per_op(text, ["multiply"], metadata)
def test_scan_support_value_tagging(self):
metadata = {"test_scan_support_value_tagging": "value"}
fn = lambda carry, x: (carry + x) * 2.0
def scan_body_val_with_metadata(carry, x):
tagged_result = set_xla_metadata(fn(carry, x), **metadata)
return tagged_result, tagged_result
def scan_fn(init_carry, inputs_arr):
return jax.lax.scan(scan_body_val_with_metadata, init_carry, inputs_arr)
inputs = (jnp.array(0.0), jnp.arange(1, 4, dtype=jnp.float32))
text = jax.jit(scan_fn).lower(*inputs).as_text("hlo")
self._assert_metadata_appears_once_per_op(text, ["multiply"], metadata)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| XlaMetadataTest |
python | django__django | tests/basic/tests.py | {
"start": 27936,
"end": 30361
} | class ____(SimpleTestCase):
QUERYSET_PROXY_METHODS = [
"none",
"count",
"dates",
"datetimes",
"distinct",
"extra",
"get",
"get_or_create",
"update_or_create",
"create",
"bulk_create",
"bulk_update",
"filter",
"aggregate",
"annotate",
"alias",
"complex_filter",
"exclude",
"in_bulk",
"iterator",
"earliest",
"latest",
"first",
"last",
"order_by",
"select_for_update",
"select_related",
"prefetch_related",
"values",
"values_list",
"update",
"reverse",
"defer",
"only",
"using",
"exists",
"contains",
"explain",
"_insert",
"_update",
"raw",
"union",
"intersection",
"difference",
"aaggregate",
"abulk_create",
"abulk_update",
"acontains",
"acount",
"acreate",
"aearliest",
"aexists",
"aexplain",
"afirst",
"aget",
"aget_or_create",
"ain_bulk",
"aiterator",
"alast",
"alatest",
"aupdate",
"aupdate_or_create",
"fetch_mode",
]
def test_manager_methods(self):
"""
This test ensures that the correct set of methods from `QuerySet`
are copied onto `Manager`.
It's particularly useful to prevent accidentally leaking new methods
into `Manager`. New `QuerySet` methods that should also be copied onto
`Manager` will need to be added to
`ManagerTest.QUERYSET_PROXY_METHODS`.
"""
self.assertEqual(
sorted(BaseManager._get_queryset_methods(models.QuerySet)),
sorted(self.QUERYSET_PROXY_METHODS),
)
def test_manager_method_attributes(self):
self.assertEqual(Article.objects.get.__doc__, models.QuerySet.get.__doc__)
self.assertEqual(Article.objects.count.__name__, models.QuerySet.count.__name__)
def test_manager_method_signature(self):
self.assertEqual(
str(inspect.signature(Article.objects.bulk_create)),
"(objs, batch_size=None, ignore_conflicts=False, update_conflicts=False, "
"update_fields=None, unique_fields=None)",
)
| ManagerTest |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/engine/row.py | {
"start": 9902,
"end": 12091
} | class ____(BaseRow, typing.Mapping["_KeyType", Any]):
"""A ``Mapping`` that maps column names and objects to :class:`.Row`
values.
The :class:`.RowMapping` is available from a :class:`.Row` via the
:attr:`.Row._mapping` attribute, as well as from the iterable interface
provided by the :class:`.MappingResult` object returned by the
:meth:`_engine.Result.mappings` method.
:class:`.RowMapping` supplies Python mapping (i.e. dictionary) access to
the contents of the row. This includes support for testing of
containment of specific keys (string column names or objects), as well
as iteration of keys, values, and items::
for row in result:
if "a" in row._mapping:
print("Column 'a': %s" % row._mapping["a"])
print("Column b: %s" % row._mapping[table.c.b])
.. versionadded:: 1.4 The :class:`.RowMapping` object replaces the
mapping-like access previously provided by a database result row,
which now seeks to behave mostly like a named tuple.
"""
__slots__ = ()
if TYPE_CHECKING:
def __getitem__(self, key: _KeyType) -> Any: ...
else:
__getitem__ = BaseRow._get_by_key_impl_mapping
def __iter__(self) -> Iterator[str]:
return (k for k in self._parent.keys if k is not None)
def __contains__(self, key: object) -> bool:
return self._parent._has_key(key)
def __repr__(self) -> str:
return repr(dict(self))
def items(self) -> ROMappingItemsView:
"""Return a view of key/value tuples for the elements in the
underlying :class:`.Row`.
"""
return ROMappingItemsView(
self, [(key, self[key]) for key in self.keys()]
)
def keys(self) -> RMKeyView:
"""Return a view of 'keys' for string column names represented
by the underlying :class:`.Row`.
"""
return self._parent.keys
def values(self) -> ROMappingKeysValuesView:
"""Return a view of values for the values represented in the
underlying :class:`.Row`.
"""
return ROMappingKeysValuesView(self, self._values_impl())
| RowMapping |
python | gevent__gevent | src/greentest/3.14/test_urllib2_localnet.py | {
"start": 6763,
"end": 8149
} | class ____(http.server.BaseHTTPRequestHandler):
"""Handler for performing basic authentication."""
# Server side values
USER = 'testUser'
PASSWD = 'testPass'
REALM = 'Test'
USER_PASSWD = "%s:%s" % (USER, PASSWD)
ENCODED_AUTH = base64.b64encode(USER_PASSWD.encode('ascii')).decode('ascii')
def __init__(self, *args, **kwargs):
http.server.BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def log_message(self, format, *args):
# Suppress console log message
pass
def do_HEAD(self):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
def do_AUTHHEAD(self):
self.send_response(401)
self.send_header("WWW-Authenticate", "Basic realm=\"%s\"" % self.REALM)
self.send_header("Content-type", "text/html")
self.end_headers()
def do_GET(self):
if not self.headers.get("Authorization", ""):
self.do_AUTHHEAD()
self.wfile.write(b"No Auth header received")
elif self.headers.get(
"Authorization", "") == "Basic " + self.ENCODED_AUTH:
self.send_response(200)
self.end_headers()
self.wfile.write(b"It works")
else:
# Request Unauthorized
self.do_AUTHHEAD()
# Proxy test infrastructure
| BasicAuthHandler |
python | spyder-ide__spyder | spyder/app/utils.py | {
"start": 1417,
"end": 13698
} | class ____:
"""
This is used to inject a 'spy' object in the internal console
namespace to inspect Spyder internals.
Attributes:
app Reference to main QApplication object
window Reference to spyder.MainWindow widget
"""
def __init__(self, app, window):
self.app = app
self.window = window
def __dir__(self):
return (list(self.__dict__.keys()) +
[x for x in dir(self.__class__) if x[0] != '_'])
def get_python_doc_path():
"""
Return Python documentation path
(Windows: return the PythonXX.chm path if available)
"""
if os.name == 'nt':
doc_path = osp.join(sys.prefix, "Doc")
if not osp.isdir(doc_path):
return
python_chm = [path for path in os.listdir(doc_path)
if re.match(r"(?i)Python[0-9]{3,6}.chm", path)]
if python_chm:
return file_uri(osp.join(doc_path, python_chm[0]))
else:
vinf = sys.version_info
doc_path = '/usr/share/doc/python%d.%d/html' % (vinf[0], vinf[1])
python_doc = osp.join(doc_path, "index.html")
if osp.isfile(python_doc):
return file_uri(python_doc)
def set_opengl_implementation(option):
"""
Set the OpenGL implementation used by Spyder.
See spyder-ide/spyder#7447 for the details.
"""
if hasattr(QQuickWindow, "setGraphicsApi"):
set_api = QQuickWindow.setGraphicsApi # Qt 6
else:
if QQuickWindow is not None:
set_api = QQuickWindow.setSceneGraphBackend # Qt 5
if option == 'software':
QCoreApplication.setAttribute(Qt.AA_UseSoftwareOpenGL)
if QQuickWindow is not None:
set_api(QSGRendererInterface.GraphicsApi.Software)
elif option == 'desktop':
QCoreApplication.setAttribute(Qt.AA_UseDesktopOpenGL)
if QQuickWindow is not None:
set_api(QSGRendererInterface.GraphicsApi.OpenGL)
elif option == 'gles':
QCoreApplication.setAttribute(Qt.AA_UseOpenGLES)
if QQuickWindow is not None:
set_api(QSGRendererInterface.GraphicsApi.OpenGL)
def setup_logging(cli_options):
"""Setup logging with cli options defined by the user."""
if cli_options.debug_info or get_debug_level() > 0:
levels = {2: logging.INFO, 3: logging.DEBUG}
log_level = levels[get_debug_level()]
log_format = '%(asctime)s [%(levelname)s] [%(name)s] -> %(message)s'
console_filters = cli_options.filter_log.split(',')
console_filters = [x.strip() for x in console_filters]
console_filters = console_filters + FILTER_NAMES
console_filters = [x for x in console_filters if x != '']
handlers = [logging.StreamHandler()]
filepath = os.environ['SPYDER_DEBUG_FILE']
handlers.append(
logging.FileHandler(filename=filepath, mode='w+')
)
match_func = lambda x: True
if console_filters != [''] and len(console_filters) > 0:
dafsa = DAFSA(console_filters)
match_func = lambda x: (dafsa.lookup(x, stop_on_prefix=True)
is not None)
formatter = logging.Formatter(log_format)
class ModuleFilter(logging.Filter):
"""Filter messages based on module name prefix."""
def filter(self, record):
return match_func(record.name)
filter = ModuleFilter()
root_logger.setLevel(log_level)
for handler in handlers:
handler.addFilter(filter)
handler.setFormatter(formatter)
handler.setLevel(log_level)
root_logger.addHandler(handler)
def delete_debug_log_files():
"""Delete previous debug log files."""
regex = re.compile(r'.*_.*_(\d+)[.]log')
files = glob.glob(osp.join(get_conf_path('lsp_logs'), '*.log'))
for f in files:
match = regex.match(f)
if match is not None:
pid = int(match.group(1))
if not psutil.pid_exists(pid):
os.remove(f)
debug_file = os.environ['SPYDER_DEBUG_FILE']
if osp.exists(debug_file):
os.remove(debug_file)
def qt_message_handler(msg_type, msg_log_context, msg_string):
"""
Qt warning messages are intercepted by this handler.
On some operating systems, warning messages might be displayed
even if the actual message does not apply. This filter adds a
blacklist for messages that are unnecessary. Anything else will
get printed in the internal console.
"""
BLACKLIST = [
'QMainWidget::resizeDocks: all sizes need to be larger than 0',
# This is shown at startup due to our splash screen but it's harmless
"fromIccProfile: failed minimal tag size sanity",
# This is shown when expanding/collpasing folders in the Files plugin
# after spyder-ide/spyder#
"QFont::setPixelSize: Pixel size <= 0 (0)",
# These warnings are shown uncollapsing CollapsibleWidget
"QPainter::begin: Paint device returned engine == 0, type: 2",
"QPainter::save: Painter not active",
"QPainter::setPen: Painter not active",
"QPainter::setWorldTransform: Painter not active",
"QPainter::setOpacity: Painter not active",
"QFont::setPixelSize: Pixel size <= 0 (-3)",
"QPainter::setFont: Painter not active",
"QPainter::restore: Unbalanced save/restore",
# This warning is shown at startup when using PyQt6
"<use> element image0 in wrong context!",
]
if msg_string not in BLACKLIST:
print(msg_string) # spyder: test-skip
def create_splash_screen(use_previous_factor=False):
"""
Create splash screen.
Parameters
----------
use_previous_factor: bool, optional
Use previous scale factor when creating the splash screen. This is used
when restarting Spyder, so the screen looks as expected. Default is
False.
"""
if not running_under_pytest():
# This is a good size for the splash screen image at a scale factor of
# 1. It corresponds to 75 ppi and preserves its aspect ratio.
width = 526
height = 432
# This allows us to use the previous scale factor for the splash screen
# shown when Spyder is restarted. Otherwise, it appears pixelated.
previous_factor = float(
CONF.get('main', 'prev_high_dpi_custom_scale_factors', 1)
)
# We need to increase the image size according to the scale factor to
# be displayed correctly.
# See https://falsinsoft.blogspot.com/2016/04/
# qt-snippet-render-svg-to-qpixmap-for.html for details.
if CONF.get('main', 'high_dpi_custom_scale_factor'):
if not use_previous_factor:
factors = CONF.get('main', 'high_dpi_custom_scale_factors')
factor = float(factors.split(":")[0])
else:
factor = previous_factor
else:
if not use_previous_factor:
factor = 1
else:
factor = previous_factor
# Save scale factor for restarts.
CONF.set('main', 'prev_high_dpi_custom_scale_factors', factor)
image = QImage(
int(width * factor), int(height * factor),
QImage.Format_ARGB32_Premultiplied
)
image.fill(0)
painter = QPainter(image)
renderer = QSvgRenderer(get_image_path('splash'))
renderer.render(painter)
painter.end()
# This is also necessary to make the image look good.
if factor > 1.0:
image.setDevicePixelRatio(factor)
pm = QPixmap.fromImage(image)
pm = pm.copy(0, 0, int(width * factor), int(height * factor))
splash = QSplashScreen(pm)
else:
splash = None
return splash
def set_links_color(app):
"""
Fix color for links.
This was taken from QDarkstyle, which is MIT licensed.
"""
color = SpyderPalette.COLOR_ACCENT_4
qcolor = QColor(color)
app_palette = app.palette()
app_palette.setColor(QPalette.Normal, QPalette.Link, qcolor)
app.setPalette(app_palette)
def create_application():
"""Create application and patch sys.exit."""
# Our QApplication
app = qapplication()
# ---- Set icon
app_icon = QIcon(get_image_path("spyder"))
app.setWindowIcon(app_icon)
# ---- Set font
# The try/except is necessary to run the main window tests on their own.
try:
app.set_font()
except AttributeError as error:
if running_under_pytest():
# Set font options to avoid a ton of Qt warnings when running tests
app_family = app.font().family()
app_size = app.font().pointSize()
CONF.set('appearance', 'app_font/family', app_family)
CONF.set('appearance', 'app_font/size', app_size)
from spyder.config.fonts import MEDIUM, MONOSPACE
CONF.set('appearance', 'monospace_app_font/family', MONOSPACE[0])
CONF.set('appearance', 'monospace_app_font/size', MEDIUM)
else:
# Raise in case the error is valid
raise error
# Required for correct icon on GNOME/Wayland:
if hasattr(app, 'setDesktopFileName'):
app.setDesktopFileName('spyder')
# ---- Monkey patching sys.exit
def fake_sys_exit(arg=[]):
pass
sys.exit = fake_sys_exit
# ---- Monkey patching sys.excepthook to avoid crashes in PyQt 5.5+
def spy_excepthook(type_, value, tback):
sys.__excepthook__(type_, value, tback)
if running_installer_test():
# This will exit Spyder with exit code 1 without invoking
# macOS system dialogue window.
raise SystemExit(1)
sys.excepthook = spy_excepthook
# Removing arguments from sys.argv as in standard Python interpreter
sys.argv = ['']
return app
def create_window(WindowClass, app, splash, options, args):
"""
Create and show Spyder's main window and start QApplication event loop.
Parameters
----------
WindowClass: QMainWindow
Subclass to instantiate the Window.
app: QApplication
Instance to start the application.
splash: QSplashScreen
Splash screen instamce.
options: argparse.Namespace
Command line options passed to Spyder
args: list
List of file names passed to the Spyder executable in the
command line.
"""
# Main window
main = WindowClass(splash, options)
try:
main.setup()
except BaseException:
if main.console is not None:
try:
main.console.exit_interpreter()
except BaseException:
pass
raise
main.pre_visible_setup()
main.show()
main.post_visible_setup()
# Add a reference to the main window so it can be accessed from the
# application.
#
# Notes
# -----
# * **DO NOT** use it to access other plugins functionality through it.
app._main_window = main
if main.console:
main.console.start_interpreter(namespace={})
main.console.set_namespace_item('spy', Spy(app=app, window=main))
# Propagate current configurations to all configuration observers
CONF.notify_all_observers()
# Don't show icons in menus for Mac
if sys.platform == 'darwin':
QCoreApplication.setAttribute(Qt.AA_DontShowIconsInMenus, True)
# Open external files with our Mac app
# ??? Do we need this?
if sys.platform == 'darwin' and is_conda_based_app():
app.sig_open_external_file.connect(main.open_external_file)
app._has_started = True
if hasattr(app, '_pending_file_open'):
if args:
args = app._pending_file_open + args
else:
args = app._pending_file_open
# Open external files passed as args
if args:
for a in args:
main.open_external_file(a)
# To give focus again to the last focused widget after restoring
# the window
app.focusChanged.connect(main.change_last_focused_widget)
if not running_under_pytest():
app.exec_()
return main
| Spy |
python | astropy__astropy | astropy/units/tests/test_quantity_non_ufuncs.py | {
"start": 73948,
"end": 75179
} | class ____(InvariantUnitTestSetup):
tested_module = np.fft
# These are all trivial, just preserve the unit.
def setup_method(self):
# Use real input; gets turned into complex as needed.
self.q = np.arange(128.0).reshape(8, -1) * u.s
def test_fft(self):
self.check(np.fft.fft)
def test_ifft(self):
self.check(np.fft.ifft)
def test_rfft(self):
self.check(np.fft.rfft)
def test_irfft(self):
self.check(np.fft.irfft)
def test_fft2(self):
self.check(np.fft.fft2)
def test_ifft2(self):
self.check(np.fft.ifft2)
def test_rfft2(self):
self.check(np.fft.rfft2)
def test_irfft2(self):
self.check(np.fft.irfft2)
def test_fftn(self):
self.check(np.fft.fftn)
def test_ifftn(self):
self.check(np.fft.ifftn)
def test_rfftn(self):
self.check(np.fft.rfftn)
def test_irfftn(self):
self.check(np.fft.irfftn)
def test_hfft(self):
self.check(np.fft.hfft)
def test_ihfft(self):
self.check(np.fft.ihfft)
def test_fftshift(self):
self.check(np.fft.fftshift)
def test_ifftshift(self):
self.check(np.fft.ifftshift)
| TestFFT |
python | getsentry__sentry | tests/sentry/notifications/notification_action/test_issue_alert_registry_handlers.py | {
"start": 21228,
"end": 21599
} | class ____(TestTicketingIssueAlertHandlerBase):
def setUp(self) -> None:
super().setUp()
self.handler = JiraServerIssueAlertHandler()
def test_build_rule_action_blob(self) -> None:
for expected in JIRA_SERVER_ACTION_DATA_BLOBS:
self._test_build_rule_action_blob(expected, Action.Type.JIRA_SERVER)
| TestJiraServerIssueAlertHandler |
python | sanic-org__sanic | sanic/exceptions.py | {
"start": 11422,
"end": 13297
} | class ____(NotFound):
"""404 Not Found
A specific form of :class:`.NotFound` that is specifically when looking
for a file on the file system at a known path.
Args:
message (Optional[Union[str, bytes]], optional): The message to be sent to the client. If `None`
then the HTTP status 'Not Found' will be sent. Defaults to `None`.
path (Optional[PathLike], optional): The path, if any, to the file that could not
be found. Defaults to `None`.
relative_url (Optional[str], optional): A relative URL of the file. Defaults to `None`.
quiet (Optional[bool], optional): When `True`, the error traceback will be suppressed
from the logs. Defaults to `None`.
context (Optional[Dict[str, Any]], optional): Additional mapping of key/value data that will be
sent to the client upon exception. Defaults to `None`.
extra (Optional[Dict[str, Any]], optional): Additional mapping of key/value data that will NOT be
sent to the client when in PRODUCTION mode. Defaults to `None`.
headers (Optional[Dict[str, Any]], optional): Additional headers that should be sent with the HTTP
response. Defaults to `None`.
""" # noqa: E501
def __init__(
self,
message: Optional[Union[str, bytes]] = None,
path: Optional[PathLike] = None,
relative_url: Optional[str] = None,
*,
quiet: Optional[bool] = None,
context: Optional[dict[str, Any]] = None,
extra: Optional[dict[str, Any]] = None,
headers: Optional[dict[str, Any]] = None,
):
super().__init__(
message,
quiet=quiet,
context=context,
extra=extra,
headers=headers,
)
self.path = path
self.relative_url = relative_url
| FileNotFound |
python | dagster-io__dagster | python_modules/libraries/dagster-fivetran/dagster_fivetran/managed/reconciliation.py | {
"start": 12663,
"end": 14630
} | class ____(ManagedElementReconciler):
def __init__(
self,
fivetran: ResourceDefinition,
connectors: Iterable[FivetranConnector],
delete_unmentioned_resources: bool = False,
):
"""Reconciles Python-specified Fivetran resources with an Fivetran instance.
Args:
fivetran (ResourceDefinition): The Fivetran resource definition to reconcile against.
connectors (Iterable[FivetranConnector]): The Fivetran connector objects to reconcile.
delete_unmentioned_resources (bool): Whether to delete resources that are not mentioned in
the set of connectors provided. When True, all Fivetran instance contents are effectively
managed by the reconciler. Defaults to False.
"""
fivetran = check.inst_param(fivetran, "fivetran", ResourceDefinition)
self._fivetran_instance: FivetranResource = fivetran(build_init_resource_context())
self._connectors = list(
check.iterable_param(connectors, "connectors", of_type=FivetranConnector)
)
self._delete_unmentioned_resources = check.bool_param(
delete_unmentioned_resources, "delete_unmentioned_resources"
)
super().__init__()
def check(self, **kwargs) -> ManagedElementCheckResult:
return reconcile_config(
self._fivetran_instance,
self._connectors,
dry_run=True,
should_delete=self._delete_unmentioned_resources,
ignore_secrets=(not kwargs.get("include_all_secrets", False)),
)
def apply(self, **kwargs) -> ManagedElementCheckResult:
return reconcile_config(
self._fivetran_instance,
self._connectors,
dry_run=False,
should_delete=self._delete_unmentioned_resources,
ignore_secrets=(not kwargs.get("include_all_secrets", False)),
)
| FivetranManagedElementReconciler |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_exceptions.py | {
"start": 2499,
"end": 2673
} | class ____(Exception):
def __str__(self):
raise Exception("str() is broken")
# XXX This is not really enough, each *operation* should be tested!
| BrokenStrException |
python | pytorch__pytorch | torch/_numpy/_dtypes.py | {
"start": 2315,
"end": 2414
} | class ____(floating):
name = "float32"
typecode = "f"
torch_dtype = torch.float32
| float32 |
python | ray-project__ray | python/ray/autoscaler/v2/schema.py | {
"start": 4246,
"end": 4310
} | class ____(ResourceDemand):
pass
@dataclass
| RayTaskActorDemand |
python | pdm-project__pdm | src/pdm/models/serializers.py | {
"start": 188,
"end": 850
} | class ____(json.JSONEncoder):
"""Expand standard json encoder to support dumps bytes object."""
bytes_ident = "PDM_BYTES_OBJECT"
def default(self, o: Any) -> Any:
if isinstance(o, bytes):
base64_string = base64.b64encode(o).decode()
return {"type": self.bytes_ident, "val": base64_string}
return super().default(o)
@classmethod
def object_hook(cls, obj: Any) -> Any:
if isinstance(obj, dict) and obj.get("type") == cls.bytes_ident:
val = obj.get("val")
if val is not None and isinstance(val, str):
return base64.b64decode(val)
return obj
| Encoder |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol3.py | {
"start": 301,
"end": 392
} | class ____(Protocol):
@property
def batch_shape(self) -> int:
return 0
| Class1 |
python | coleifer__peewee | playhouse/reflection.py | {
"start": 13754,
"end": 15780
} | class ____(Metadata):
column_map = {
'bigint': BigIntegerField,
'blob': BlobField,
'bool': BooleanField,
'boolean': BooleanField,
'char': CharField,
'date': DateField,
'datetime': DateTimeField,
'decimal': DecimalField,
'float': FloatField,
'integer': IntegerField,
'integer unsigned': IntegerField,
'int': IntegerField,
'long': BigIntegerField,
'numeric': DecimalField,
'real': FloatField,
'smallinteger': IntegerField,
'smallint': IntegerField,
'smallint unsigned': IntegerField,
'text': TextField,
'time': TimeField,
'varchar': CharField,
}
begin = r'(?:["\[\(]+)?'
end = r'(?:["\]\)]+)?'
re_foreign_key = (
r'(?:FOREIGN KEY\s*)?'
r'{begin}(.+?){end}\s+(?:.+\s+)?'
r'references\s+{begin}(.+?){end}'
r'\s*\(["|\[]?(.+?)["|\]]?\)').format(begin=begin, end=end)
re_varchar = r'^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$'
def _map_col(self, column_type):
raw_column_type = column_type.lower()
if raw_column_type in self.column_map:
field_class = self.column_map[raw_column_type]
elif re.search(self.re_varchar, raw_column_type):
field_class = CharField
else:
column_type = re.sub(r'\(.+\)', '', raw_column_type)
if column_type == '':
field_class = BareField
else:
field_class = self.column_map.get(column_type, UnknownField)
return field_class
def get_column_types(self, table, schema=None):
column_types = {}
columns = self.database.get_columns(table)
for column in columns:
column_types[column.name] = self._map_col(column.data_type)
return column_types, {}
_DatabaseMetadata = namedtuple('_DatabaseMetadata', (
'columns',
'primary_keys',
'foreign_keys',
'model_names',
'indexes'))
| SqliteMetadata |
python | doocs__leetcode | solution/1600-1699/1690.Stone Game VII/Solution.py | {
"start": 0,
"end": 429
} | class ____:
def stoneGameVII(self, stones: List[int]) -> int:
@cache
def dfs(i: int, j: int) -> int:
if i > j:
return 0
a = s[j + 1] - s[i + 1] - dfs(i + 1, j)
b = s[j] - s[i] - dfs(i, j - 1)
return max(a, b)
s = list(accumulate(stones, initial=0))
ans = dfs(0, len(stones) - 1)
dfs.cache_clear()
return ans
| Solution |
python | pytest-dev__pytest | src/_pytest/pytester.py | {
"start": 3195,
"end": 6015
} | class ____:
def get_open_files(self) -> list[tuple[str, str]]:
if sys.version_info >= (3, 11):
# New in Python 3.11, ignores utf-8 mode
encoding = locale.getencoding()
else:
encoding = locale.getpreferredencoding(False)
out = subprocess.run(
("lsof", "-Ffn0", "-p", str(os.getpid())),
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
check=True,
text=True,
encoding=encoding,
).stdout
def isopen(line: str) -> bool:
return line.startswith("f") and (
"deleted" not in line
and "mem" not in line
and "txt" not in line
and "cwd" not in line
)
open_files = []
for line in out.split("\n"):
if isopen(line):
fields = line.split("\0")
fd = fields[0][1:]
filename = fields[1][1:]
if filename in IGNORE_PAM:
continue
if filename.startswith("/"):
open_files.append((fd, filename))
return open_files
def matching_platform(self) -> bool:
try:
subprocess.run(("lsof", "-v"), check=True)
except (OSError, subprocess.CalledProcessError):
return False
else:
return True
@hookimpl(wrapper=True, tryfirst=True)
def pytest_runtest_protocol(self, item: Item) -> Generator[None, object, object]:
lines1 = self.get_open_files()
try:
return (yield)
finally:
if hasattr(sys, "pypy_version_info"):
gc.collect()
lines2 = self.get_open_files()
new_fds = {t[0] for t in lines2} - {t[0] for t in lines1}
leaked_files = [t for t in lines2 if t[0] in new_fds]
if leaked_files:
error = [
f"***** {len(leaked_files)} FD leakage detected",
*(str(f) for f in leaked_files),
"*** Before:",
*(str(f) for f in lines1),
"*** After:",
*(str(f) for f in lines2),
f"***** {len(leaked_files)} FD leakage detected",
"*** function {}:{}: {} ".format(*item.location),
"See issue #2366",
]
item.warn(PytestFDWarning("\n".join(error)))
# used at least by pytest-xdist plugin
@fixture
def _pytest(request: FixtureRequest) -> PytestArg:
"""Return a helper which offers a gethookrecorder(hook) method which
returns a HookRecorder instance which helps to make assertions about called
hooks."""
return PytestArg(request)
| LsofFdLeakChecker |
python | scrapy__scrapy | tests/test_loader.py | {
"start": 6314,
"end": 6430
} | class ____(ItemLoader):
title_in = MapCompose(str.upper)
title_out = TakeFirst()
| BaseNoInputReprocessingLoader |
python | huggingface__transformers | src/transformers/models/qwen2_5_omni/modeling_qwen2_5_omni.py | {
"start": 145615,
"end": 146717
} | class ____(nn.Module):
def __init__(self, ratio=2, kernel_size=None):
super().__init__()
self.ratio = ratio
self.kernel_size = int(6 * ratio // 2) * 2 if kernel_size is None else kernel_size
self.stride = ratio
self.pad = self.kernel_size // ratio - 1
self.pad_left = self.pad * self.stride + (self.kernel_size - self.stride) // 2
self.pad_right = self.pad * self.stride + (self.kernel_size - self.stride + 1) // 2
filter = kaiser_sinc_filter1d(cutoff=0.5 / ratio, half_width=0.6 / ratio, kernel_size=self.kernel_size)
self.register_buffer("filter", filter, persistent=False)
def forward(self, hidden_states):
channels = hidden_states.shape[1]
hidden_states = F.pad(hidden_states, (self.pad, self.pad), mode="replicate")
hidden_states = self.ratio * F.conv_transpose1d(
hidden_states, self.filter.expand(channels, -1, -1), stride=self.stride, groups=channels
)
hidden_states = hidden_states[..., self.pad_left : -self.pad_right]
return hidden_states
| UpSample1d |
python | pyqtgraph__pyqtgraph | pyqtgraph/parametertree/parameterTypes/font.py | {
"start": 581,
"end": 1211
} | class ____(Parameter):
"""
Creates and controls a QFont value. Be careful when selecting options from the font dropdown. since not all
fonts are available on all systems
"""
itemClass = FontParameterItem
def _interpretValue(self, v):
if isinstance(v, str):
newVal = QtGui.QFont()
if not newVal.fromString(v):
raise ValueError(f'Error parsing font "{v}"')
v = newVal
return v
def saveState(self, filter=None):
state = super().saveState(filter)
state['value'] = state['value'].toString()
return state
| FontParameter |
python | walkccc__LeetCode | solutions/2786. Visit Array Positions to Maximize Score/2786.py | {
"start": 0,
"end": 607
} | class ____:
def maxScore(self, nums: list[int], x: int) -> int:
# Note that we always need to take nums[0], so the initial definition might
# not hold true.
# dp0 := the maximum score so far with `nums` ending in an even number
dp0 = nums[0] - (x if nums[0] % 2 == 1 else 0)
# dp0 := the maximum score so far with `nums` ending in an odd number
dp1 = nums[0] - (x if nums[0] % 2 == 0 else 0)
for i in range(1, len(nums)):
if nums[i] % 2 == 0:
dp0 = nums[i] + max(dp0, dp1 - x)
else:
dp1 = nums[i] + max(dp1, dp0 - x)
return max(dp0, dp1)
| Solution |
python | huggingface__transformers | tests/models/tvp/test_image_processing_tvp.py | {
"start": 1156,
"end": 4409
} | class ____:
def __init__(
self,
parent,
do_resize: bool = True,
size: dict[str, int] = {"longest_edge": 40},
do_center_crop: bool = False,
crop_size: dict[str, int] | None = None,
do_rescale: bool = False,
rescale_factor: int | float = 1 / 255,
do_pad: bool = True,
pad_size: dict[str, int] = {"height": 80, "width": 80},
fill: int | None = None,
pad_mode: PaddingMode | None = None,
do_normalize: bool = True,
image_mean: float | list[float] | None = [0.48145466, 0.4578275, 0.40821073],
image_std: float | list[float] | None = [0.26862954, 0.26130258, 0.27577711],
batch_size=2,
min_resolution=40,
max_resolution=80,
num_channels=3,
num_frames=2,
):
self.do_resize = do_resize
self.size = size
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_pad = do_pad
self.pad_size = pad_size
self.fill = fill
self.pad_mode = pad_mode
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.batch_size = batch_size
self.num_channels = num_channels
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.num_frames = num_frames
def prepare_image_processor_dict(self):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"do_center_crop": self.do_center_crop,
"do_pad": self.do_pad,
"pad_size": self.pad_size,
}
def get_expected_values(self, image_inputs, batched=False):
"""
This function computes the expected height and width when providing images to TvpImageProcessor,
assuming do_resize is set to True with a scalar size.
"""
if not batched:
return (int(self.pad_size["height"]), int(self.pad_size["width"]))
else:
expected_values = []
for image in image_inputs:
expected_height, expected_width = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
expected_height = max(expected_values, key=lambda item: item[0])[0]
expected_width = max(expected_values, key=lambda item: item[1])[1]
return expected_height, expected_width
def prepare_video_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_video_inputs(
batch_size=self.batch_size,
num_frames=self.num_frames,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
| TvpImageProcessingTester |
python | getsentry__sentry | src/sentry/testutils/silo.py | {
"start": 5899,
"end": 26576
} | class ____:
"""Encapsulate the set of changes made to a test class by a SiloModeTestDecorator."""
silo_modes: frozenset[SiloMode]
regions: tuple[Region, ...]
def __post_init__(self) -> None:
if not self.silo_modes:
raise ValueError("silo_modes must not be empty")
@contextmanager
def test_config(self, silo_mode: SiloMode):
with (
override_regions(self.regions) if self.regions else nullcontext(),
assume_test_silo_mode(silo_mode, can_be_monolith=False),
):
yield
def _create_overriding_test_class(
self, test_class: type[TestCase], silo_mode: SiloMode, name_suffix: str = ""
) -> type[TestCase]:
silo_mode_attr = "__silo_mode_override"
@contextmanager
def create_context(obj: TestCase) -> Generator[None]:
tagged_class, tagged_mode = getattr(obj, silo_mode_attr)
if type(obj) is not tagged_class:
# This condition indicates that the test case inherits the silo mode
# attribute from a superclass. Although we could just test in that
# mode, doing so would silently skip other modes if the superclass is
# supposed to be tested in more than one mode. So, enforce a general
# rule that test case subclasses must have decorators of their own.
sup = tagged_class.__name__
sub = type(obj).__name__
raise SubclassNotSiloDecoratedException(
f"A test class ({sub}) extends a silo-decorated test class ({sup}) "
f"without a silo decorator of its own. Add a decorator to {sub}. "
f"(You probably want to copy and paste the decorator from {sup}. "
f"If you don't want to run {sub} in a silo mode at all, use "
f"`@no_silo_test`.)"
)
with self.test_config(tagged_mode):
yield
# Unfortunately, due to the way DjangoTestCase setup and app manipulation works, `override_settings` in a
# run method produces unusual, broken results. We're forced to wrap the hidden methods that invoke setup
# test method in order to use override_settings correctly in django test cases.
def _callSetUp(obj: TestCase) -> Any:
with create_context(obj):
return TestCase._callSetUp(obj) # type: ignore[attr-defined]
def _callTestMethod(obj: TestCase, method: Any) -> Any:
with create_context(obj):
return TestCase._callTestMethod(obj, method) # type: ignore[attr-defined]
new_methods = {"_callSetUp": _callSetUp, "_callTestMethod": _callTestMethod}
name = test_class.__name__ + name_suffix
new_class = type(name, (test_class,), new_methods)
setattr(new_class, silo_mode_attr, (new_class, silo_mode))
return cast(type[TestCase], new_class)
def _arrange_silo_modes(self) -> tuple[SiloMode, Collection[SiloMode]]:
"""Select which silo modes will be tested by the original and dynamic classes.
The return value is a (primary, secondary) pair. The "primary" silo mode is
the one to be tested by the decorated class without changing its name. The
"secondary" modes are tested by dynamically generated classes that are added
to the module namespace.
"""
if len(self.silo_modes) == 1:
(only_mode,) = self.silo_modes
return only_mode, ()
non_monolith_modes = [m for m in self.silo_modes if m != SiloMode.MONOLITH]
if len(non_monolith_modes) == 1:
(other_mode,) = non_monolith_modes
return other_mode, (SiloMode.MONOLITH,)
else:
return SiloMode.MONOLITH, non_monolith_modes
def _add_siloed_test_classes_to_module(self, test_class: type[TestCase]) -> type[TestCase]:
primary_mode, secondary_modes = self._arrange_silo_modes()
for silo_mode in secondary_modes:
siloed_test_class = self._create_overriding_test_class(
test_class, silo_mode, _get_test_name_suffix(silo_mode)
)
module = sys.modules[test_class.__module__]
setattr(module, siloed_test_class.__name__, siloed_test_class)
# Return the value to be wrapped by the original decorator
return self._create_overriding_test_class(test_class, primary_mode)
def _mark_parameterized_by_silo_mode(self, test_method: TestMethod) -> TestMethod:
def replacement_test_method(*args: Any, **kwargs: Any) -> None:
silo_mode = kwargs.pop("silo_mode")
with self.test_config(silo_mode):
test_method(*args, **kwargs)
orig_sig = inspect.signature(test_method)
new_test_method = functools.update_wrapper(replacement_test_method, test_method)
if "silo_mode" not in orig_sig.parameters:
new_params = tuple(orig_sig.parameters.values()) + (
inspect.Parameter("silo_mode", inspect.Parameter.KEYWORD_ONLY),
)
new_sig = orig_sig.replace(parameters=new_params)
new_test_method.__setattr__("__signature__", new_sig)
return pytest.mark.parametrize("silo_mode", sorted(self.silo_modes))(new_test_method)
def apply(self, decorated_obj: Any) -> Any:
is_test_case_class = isinstance(decorated_obj, type) and issubclass(decorated_obj, TestCase)
is_function = inspect.isfunction(decorated_obj)
if not (is_test_case_class or is_function):
raise ValueError("@SiloModeTest must decorate a function or TestCase class")
if SENTRY_USE_MONOLITH_DBS:
# In this case, skip modifying the object and let it run in the default
# silo mode (monolith)
return decorated_obj
if is_test_case_class:
return self._add_siloed_test_classes_to_module(decorated_obj)
return self._mark_parameterized_by_silo_mode(decorated_obj)
all_silo_test = SiloModeTestDecorator(*SiloMode)
"""
Apply to test functions/classes to indicate that tests are
expected to pass in CONTROL, REGION and MONOLITH modes.
"""
no_silo_test = SiloModeTestDecorator(SiloMode.MONOLITH)
"""
Apply to test functions/classes to indicate that tests are
free of silo mode logic and hybrid cloud service usage.
"""
control_silo_test = SiloModeTestDecorator(SiloMode.CONTROL)
"""
Apply to test functions/classes to indicate that tests are
expected to pass with the current silo mode set to CONTROL.
"""
region_silo_test = SiloModeTestDecorator(SiloMode.REGION)
"""
Apply to test functions/classes to indicate that tests are
expected to pass with the current silo mode set to REGION.
"""
# assume_test_silo_mode vs assume_test_silo_mode_of: What's the difference?
#
# These two functions are similar ways to express the same thing. Generally,
# assume_test_silo_mode_of is preferable because it does more to communicate your
# intent and matches the style used by functions such as `router.db_for_write`. But
# assume_test_silo_mode is used in more places because it has existed longer.
@contextmanager
def assume_test_silo_mode(
desired_silo: SiloMode, can_be_monolith: bool = True, region_name: str | None = None
) -> Any:
"""Potential swap the silo mode in a test class or factory, useful for creating multi SiloMode models and executing
test code in a special silo context.
In monolith mode, this context manager has no effect.
This context manager, should never be run outside of test contexts. In fact, it depends on test code that will
not exist in production!
When run in either Region or Control silo modes, it forces the settings.SILO_MODE to the desired_silo.
Notably, this won't be thread safe, so again, only use this in factories and test cases, not code, or you'll
have a nightmare when your (threaded) acceptance tests bleed together and do whacky things :o)
Use this in combination with factories or test setup code to create models that don't correspond with your
given test mode.
"""
# Only swapping the silo mode if we are already in a silo mode.
if can_be_monolith and SiloMode.get_current_mode() == SiloMode.MONOLITH:
desired_silo = SiloMode.MONOLITH
with override_settings(SILO_MODE=desired_silo):
if desired_silo == SiloMode.REGION:
region_dir = get_test_env_directory()
if region_name is None:
with region_dir.swap_to_default_region():
yield
else:
with region_dir.swap_to_region_by_name(region_name):
yield
else:
with override_settings(SENTRY_REGION=None):
yield
@contextmanager
def assume_test_silo_mode_of(*models: type[BaseModel], can_be_monolith: bool = True) -> Any:
from sentry.db.models.base import ModelSiloLimit
"""Potentially swap to the silo mode to match the provided model classes.
The argument should be one or more model classes that are scoped to exactly one
non-monolith mode. That is, they must be tagged with `control_silo_model` or
`region_silo_model`. The enclosed context is swapped into the appropriate
mode, allowing the model to be accessed.
If no silo-scoped models are provided, no mode swap is performed.
The intent is that you list the cross-silo models that you intend to access
within the block. However, this is for the sake of expressiveness only. The
context will not actually check that you access only those models; it will allow
you to access any model that happens to share the same silo mode.
"""
def unpack_modes() -> Iterable[SiloMode]:
for model in models:
try:
meta = getattr(model, "_meta")
except AttributeError as e:
raise ValueError(
f"Expected a model class with a _meta attribute: {model.__name__} did not have `_meta`"
) from e
silo_limit: ModelSiloLimit | None = getattr(meta, "silo_limit", None)
if silo_limit:
yield from silo_limit.modes
unique_modes = {mode for mode in unpack_modes() if mode != SiloMode.MONOLITH}
if not unique_modes:
yield
return
if len(unique_modes) > 1:
model_names = [m.__name__ for m in models]
raise ValueError(
f"Models ({model_names!r}) don't share a unique silo mode ({unique_modes!r})"
)
(mode,) = unique_modes
with assume_test_silo_mode(mode, can_be_monolith):
yield
def protected_table(table: str, operation: str) -> re.Pattern:
return re.compile(f'{operation}[^"]+"{table}"', re.IGNORECASE)
_protected_operations: list[re.Pattern] = []
def get_protected_operations() -> list[re.Pattern]:
from sentry.db.models.fields.hybrid_cloud_foreign_key import HybridCloudForeignKey
from sentry.hybridcloud.outbox.base import ReplicatedControlModel, ReplicatedRegionModel
if len(_protected_operations):
return _protected_operations
# Protect Foreign Keys using hybrid cloud models from being deleted without using the
# privileged user. Deletion should only occur when the developer is actively aware
# of the need to generate outboxes.
seen_models: MutableSet[type] = set()
for app_config in apps.get_app_configs():
for model in iter_models(app_config.name):
for field in model._meta.fields:
if not isinstance(field, HybridCloudForeignKey):
continue
fk_model = field.foreign_model
if fk_model is None or fk_model in seen_models:
continue
seen_models.add(fk_model)
_protected_operations.append(protected_table(fk_model._meta.db_table, "delete"))
if issubclass(model, ReplicatedControlModel) or issubclass(
model, ReplicatedRegionModel
):
_protected_operations.append(protected_table(model._meta.db_table, "insert"))
_protected_operations.append(protected_table(model._meta.db_table, "update"))
_protected_operations.append(protected_table(model._meta.db_table, "delete"))
# Protect inserts/updates that require outbox messages.
_protected_operations.extend(
[
protected_table("sentry_user", "insert"),
protected_table("sentry_user", "update"),
protected_table("sentry_user", "delete"),
protected_table("sentry_organizationmember", "insert"),
protected_table("sentry_organizationmember", "update"),
protected_table("sentry_organizationmember", "delete"),
protected_table("sentry_organizationmembermapping", "insert"),
]
)
return _protected_operations
def validate_protected_queries(queries: Sequence[Mapping[str, str | None]]) -> None:
"""
Validate a list of queries to ensure that protected queries
are wrapped in role_override fence values.
See sentry.db.postgres.roles for where fencing queries come from.
"""
context_queries = 5
fence_depth = 0
start_fence_index = 0
for index, query in enumerate(queries):
sql = query["sql"]
if sql is None:
continue
match = match_fence_query(sql)
if match:
operation = match.group("operation")
if operation == "start":
fence_depth += 1
start_fence_index = index
elif operation == "end":
fence_depth = max(fence_depth - 1, 0)
else:
raise AssertionError("Invalid fencing operation encounted")
for protected in get_protected_operations():
if protected.match(sql) and fence_depth == 0:
start = max(0, start_fence_index - context_queries)
end = min(index + context_queries, len(queries))
query_slice = queries[start:end]
msg = [
"Found protected operation without explicit outbox escape!",
"",
sql,
"",
"Was not surrounded by role elevation queries, and could corrupt data if outboxes are not generated.",
"If you are confident that outboxes are being generated, wrap the "
"operation that generates this query with the `unguarded_write()` ",
"context manager to resolve this failure. For example:",
"",
"with unguarded_write(using=router.db_for_write(OrganizationMembership)):",
" member.delete()",
"",
"Query logs:",
"",
]
for query in query_slice:
if query["sql"] is None:
continue
msg.append(query["sql"])
if query["sql"] == sql:
msg.append("^" * len(sql))
raise AssertionError("\n".join(msg))
def iter_models(app_name: str | None = None) -> Iterable[type[Model]]:
for app, app_models in apps.all_models.items():
if app == app_name or app_name is None:
for model in app_models.values():
if (
model.__module__.startswith("django.")
or "tests." in model.__module__
or "fixtures." in model.__module__
):
continue
yield model
def validate_models_have_silos(exemptions: set[type[Model]], app_name: str | None = None) -> None:
for model in iter_models(app_name):
if model in exemptions:
continue
silo_limit = _model_silo_limit(model)
if SiloMode.REGION not in silo_limit.modes and SiloMode.CONTROL not in silo_limit.modes:
raise ValueError(
f"{model!r} is marked as a pending model, but either needs a placement or an exemption in this test."
)
def validate_no_cross_silo_foreign_keys(
exemptions: set[tuple[type[Model], type[Model]]], app_name: str | None = None
) -> set[Any]:
seen: set[Any] = set()
for model in iter_models(app_name):
seen |= validate_model_no_cross_silo_foreign_keys(model, exemptions)
return seen
def validate_no_cross_silo_deletions(
exemptions: set[tuple[type[Model], type[Model]]], app_name: str | None = None
) -> None:
from sentry import deletions
from sentry.deletions.base import BaseDeletionTask
from sentry.incidents.grouptype import MetricIssue
from sentry.incidents.utils.types import DATA_SOURCE_SNUBA_QUERY_SUBSCRIPTION
from sentry.workflow_engine.models import DataSource, Detector
# hack for datasource registry, needs type
instantiation_params: dict[type[Model], dict[str, str]] = {
DataSource: {"type": DATA_SOURCE_SNUBA_QUERY_SUBSCRIPTION},
Detector: {"type": MetricIssue.slug},
}
for model_class in iter_models(app_name):
if not hasattr(model_class._meta, "silo_limit"):
continue
deletion_task: BaseDeletionTask = deletions.get(model=model_class, query={})
for relation in deletion_task.get_child_relations(
model_class(**instantiation_params.get(model_class, {}))
):
to_model = relation.params["model"]
if (model_class, to_model) in exemptions or (to_model, model_class) in exemptions:
continue
for mode in _model_silo_limit(model_class).modes:
if mode not in _model_silo_limit(to_model).modes:
raise ValueError(
f"Deletions for {model_class!r} cascade to {to_model!r}, but does not belong to the same silo mode. Please remove this relation from get_child_relations in deletions configuration"
)
def _is_relation_cross_silo(
model: type[Model] | Literal["self"],
related: type[Model] | Literal["self"],
) -> bool:
if model == "self" or related == "self":
return False
for mode in _model_silo_limit(model).modes:
if mode not in _model_silo_limit(related).modes:
return True
return False
def validate_relation_does_not_cross_silo_foreign_keys(
model: type[Model] | Literal["self"],
related: type[Model] | Literal["self"],
) -> None:
if model == "self" or related == "self":
return
for mode in _model_silo_limit(model).modes:
if mode not in _model_silo_limit(related).modes:
raise ValueError(
f"{model!r} runs in {mode}, but is related to {related!r} which does not. Add this relationship pair as an exception or drop the foreign key."
)
def validate_hcfk_has_global_id(model: type[Model], related_model: type[Model]):
# HybridCloudForeignKey can point to region models if they have snowflake ids
if uses_snowflake_id(related_model):
return
# but they cannot point to region models otherwise.
if SiloMode.REGION in _model_silo_limit(related_model).modes:
raise ValueError(
f"{related_model!r} runs in {SiloMode.REGION}, but is related to {model!r} via a HybridCloudForeignKey! Region model ids are not global, unless you use a snowflake id."
)
def validate_model_no_cross_silo_foreign_keys(
model: type[Model],
exemptions: set[tuple[type[Model], type[Model]]],
) -> set[Any]:
from sentry.db.models.fields.hybrid_cloud_foreign_key import HybridCloudForeignKey
seen: set[Any] = set()
for field in model._meta.fields:
if isinstance(field, RelatedField):
if (model, field.related_model) in exemptions:
if _is_relation_cross_silo(model, field.related_model):
seen = seen | {(model, field.related_model)}
continue
if (field.related_model, model) in exemptions:
if _is_relation_cross_silo(field.related_model, model):
seen = seen | {(field.related_model, model)}
continue
validate_relation_does_not_cross_silo_foreign_keys(model, field.related_model)
validate_relation_does_not_cross_silo_foreign_keys(field.related_model, model)
if isinstance(field, HybridCloudForeignKey):
validate_hcfk_has_global_id(model, field.foreign_model)
return seen
| _SiloModeTestModification |
python | allegroai__clearml | clearml/backend_api/services/v2_20/queues.py | {
"start": 61223,
"end": 62233
} | class ____(Response):
"""
Response of queues.get_num_entries endpoint.
:param num: Number of entries
:type num: int
"""
_service = "queues"
_action = "get_num_entries"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {"num": {"description": "Number of entries", "type": ["integer", "null"]}},
"type": "object",
}
def __init__(self, num: Optional[int] = None, **kwargs: Any) -> None:
super(GetNumEntriesResponse, self).__init__(**kwargs)
self.num = num
@schema_property("num")
def num(self) -> Optional[int]:
return self._property_num
@num.setter
def num(self, value: Optional[int]) -> None:
if value is None:
self._property_num = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "num", six.integer_types)
self._property_num = value
| GetNumEntriesResponse |
python | pennersr__django-allauth | tests/apps/headless/spec/internal/test_openapikit.py | {
"start": 342,
"end": 2232
} | class ____:
optional_integer: Optional[int]
integer: int
optional_string: Optional[str]
string: str
number: float = field(
metadata={
"description": "Some float",
"example": "3.14",
}
)
nested: Optional[NestedDataClass]
def test_spec_for_dataclass():
spec = spec_for_dataclass(ExampleDataclass)
assert spec == (
{
"properties": {
"integer": {
"type": "integer",
},
"number": {
"description": "Some float",
"example": "3.14",
"format": "float",
"type": "number",
},
"optional_integer": {
"type": "integer",
},
"optional_string": {
"type": "string",
},
"string": {
"type": "string",
},
"nested": {
"example": {"integer": 42},
"properties": {
"string": {
"type": "string",
},
"integer": {
"description": "Some nested int",
"example": 42,
"type": "integer",
},
},
"required": [
"string",
"integer",
],
"type": "object",
},
},
"required": [
"integer",
"string",
"number",
],
"type": "object",
},
{
"number": "3.14",
},
)
| ExampleDataclass |
python | langchain-ai__langchain | libs/core/langchain_core/documents/compressor.py | {
"start": 383,
"end": 2017
} | class ____(BaseModel, ABC):
"""Base class for document compressors.
This abstraction is primarily used for post-processing of retrieved documents.
`Document` objects matching a given query are first retrieved.
Then the list of documents can be further processed.
For example, one could re-rank the retrieved documents using an LLM.
!!! note
Users should favor using a `RunnableLambda` instead of sub-classing from this
interface.
"""
@abstractmethod
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Callbacks | None = None,
) -> Sequence[Document]:
"""Compress retrieved documents given the query context.
Args:
documents: The retrieved `Document` objects.
query: The query context.
callbacks: Optional `Callbacks` to run during compression.
Returns:
The compressed documents.
"""
async def acompress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Callbacks | None = None,
) -> Sequence[Document]:
"""Async compress retrieved documents given the query context.
Args:
documents: The retrieved `Document` objects.
query: The query context.
callbacks: Optional `Callbacks` to run during compression.
Returns:
The compressed documents.
"""
return await run_in_executor(
None, self.compress_documents, documents, query, callbacks
)
| BaseDocumentCompressor |
python | qdrant__qdrant-client | qdrant_client/embed/type_inspector.py | {
"start": 325,
"end": 5426
} | class ____:
"""Inspector which tries to find at least one occurrence of an object requiring inference
Inspector is stateful and accumulates parsed model schemes in its parser.
Attributes:
parser: ModelSchemaParser instance to inspect model json schemas
"""
def __init__(self, parser: Optional[ModelSchemaParser] = None) -> None:
self.parser = ModelSchemaParser() if parser is None else parser
def inspect(self, points: Union[Iterable[BaseModel], BaseModel]) -> bool:
"""Looks for at least one occurrence of an object requiring inference in the received models
Args:
points: models to inspect
Returns:
True if at least one object requiring inference is found, False otherwise
"""
if isinstance(points, BaseModel):
self.parser.parse_model(points.__class__)
return self._inspect_model(points)
elif isinstance(points, dict):
for value in points.values():
if self.inspect(value):
return True
elif isinstance(points, Iterable):
for point in points:
if isinstance(point, BaseModel):
self.parser.parse_model(point.__class__)
if self._inspect_model(point):
return True
else:
return False
return False
def _inspect_model(self, model: BaseModel, paths: Optional[list[FieldPath]] = None) -> bool:
if isinstance(model, get_args(INFERENCE_OBJECT_TYPES)):
return True
paths = (
self.parser.path_cache.get(model.__class__.__name__, []) if paths is None else paths
)
for path in paths:
type_found = self._inspect_inner_models(
model, path.current, path.tail if path.tail else []
)
if type_found:
return True
return False
def _inspect_inner_models(
self, original_model: BaseModel, current_path: str, tail: list[FieldPath]
) -> bool:
def inspect_recursive(member: BaseModel) -> bool:
recursive_paths = []
for field_name in model_fields_set(member):
if field_name in self.parser.name_recursive_ref_mapping:
mapped_model_name = self.parser.name_recursive_ref_mapping[field_name]
recursive_paths.extend(self.parser.path_cache[mapped_model_name])
if recursive_paths:
found = self._inspect_model(member, recursive_paths)
if found:
return True
return False
model = getattr(original_model, current_path, None)
if model is None:
return False
if isinstance(model, get_args(INFERENCE_OBJECT_TYPES)):
return True
if isinstance(model, BaseModel):
type_found = inspect_recursive(model)
if type_found:
return True
for next_path in tail:
type_found = self._inspect_inner_models(
model, next_path.current, next_path.tail if next_path.tail else []
)
if type_found:
return True
return False
elif isinstance(model, list):
for current_model in model:
if isinstance(current_model, get_args(INFERENCE_OBJECT_TYPES)):
return True
if not isinstance(current_model, BaseModel):
continue
type_found = inspect_recursive(current_model)
if type_found:
return True
for next_path in tail:
for current_model in model:
type_found = self._inspect_inner_models(
current_model, next_path.current, next_path.tail if next_path.tail else []
)
if type_found:
return True
return False
elif isinstance(model, dict):
for key, values in model.items():
values = [values] if not isinstance(values, list) else values
for current_model in values:
if isinstance(current_model, get_args(INFERENCE_OBJECT_TYPES)):
return True
if not isinstance(current_model, BaseModel):
continue
found_type = inspect_recursive(current_model)
if found_type:
return True
for next_path in tail:
for current_model in values:
found_type = self._inspect_inner_models(
current_model,
next_path.current,
next_path.tail if next_path.tail else [],
)
if found_type:
return True
return False
| Inspector |
python | google__jax | jax/experimental/array_serialization/serialization_test.py | {
"start": 28984,
"end": 29248
} | class ____:
def __init__(self, a):
self.a = a
# we're testing custom type registration which modifies the global registry
# so need to ensure we're not running multiple custom types tests in parallel
custom_types_threading_lock = threading.Lock()
| CustomStatic |
python | getsentry__sentry | tests/sentry/feedback/endpoints/test_organization_feedback_summary.py | {
"start": 2052,
"end": 10473
} | class ____(APITestCase):
endpoint = "sentry-api-0-organization-user-feedback-summary"
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.org = self.create_organization(owner=self.user)
self.team = self.create_team(
organization=self.org, name="Sentaur Squad", members=[self.user]
)
self.project1 = self.create_project(teams=[self.team])
self.project2 = self.create_project(teams=[self.team])
self.features = {
"organizations:user-feedback-ai-summaries": True,
"organizations:user-feedback-ai-summaries-cache": True,
}
self.url = reverse(
self.endpoint,
kwargs={"organization_id_or_slug": self.org.slug},
)
# Mock patchers.
self.mock_has_seer_access_patcher = patch(
"sentry.feedback.endpoints.organization_feedback_summary.has_seer_access",
return_value=True,
)
self.mock_get_summary_from_seer_patcher = patch(
"sentry.feedback.endpoints.organization_feedback_summary.get_summary_from_seer",
return_value="Test summary of feedback",
)
self.mock_min_feedbacks_patcher = patch(
"sentry.feedback.endpoints.organization_feedback_summary.MIN_FEEDBACKS_TO_SUMMARIZE",
1,
)
self.mock_has_seer_access = self.mock_has_seer_access_patcher.start()
self.mock_get_summary_from_seer = self.mock_get_summary_from_seer_patcher.start()
self.mock_min_feedbacks_patcher.start()
def tearDown(self) -> None:
self.mock_has_seer_access_patcher.stop()
self.mock_get_summary_from_seer_patcher.stop()
self.mock_min_feedbacks_patcher.stop()
super().tearDown()
def save_feedback(self, project: Project, message: str, dt: datetime | None = None) -> None:
event = mock_feedback_event(project.id, message=message, dt=dt)
create_feedback_issue(event, project, FeedbackCreationSource.NEW_FEEDBACK_ENVELOPE)
def test_get_feedback_summary_without_feature_flag(self) -> None:
response = self.get_error_response(self.org.slug)
assert response.status_code == 403
def test_get_feedback_summary_without_seer_access(self) -> None:
self.mock_has_seer_access.return_value = False
with self.feature(self.features):
response = self.get_error_response(self.org.slug)
assert response.status_code == 403
@patch("sentry.feedback.endpoints.organization_feedback_summary.cache")
def test_get_feedback_summary_cache_miss(self, mock_cache: MagicMock) -> None:
mock_cache.get.return_value = None
self.save_feedback(self.project1, "hello")
with self.feature(self.features):
response = self.get_success_response(self.org.slug)
assert response.data["success"] is True
assert response.data["summary"] == "Test summary of feedback"
assert response.data["numFeedbacksUsed"] == 1
assert self.mock_get_summary_from_seer.call_count == 1
assert self.mock_get_summary_from_seer.call_args[0][0] == ["hello"]
mock_cache.set.assert_called_once()
assert mock_cache.set.call_args[0][1] == {
"summary": "Test summary of feedback",
"numFeedbacksUsed": 1,
}
@patch("sentry.feedback.endpoints.organization_feedback_summary.cache")
def test_get_feedback_summary_cache_hit(self, mock_cache: MagicMock) -> None:
mock_cache.get.return_value = {
"summary": "Test cached summary of feedback",
"numFeedbacksUsed": 13,
}
with self.feature(self.features):
response = self.get_success_response(self.org.slug)
assert response.data["success"] is True
assert response.data["summary"] == "Test cached summary of feedback"
assert response.data["numFeedbacksUsed"] == 13
mock_cache.get.assert_called_once()
mock_cache.set.assert_not_called()
def test_get_feedback_summary_with_date_filter(self) -> None:
# Created immediately
self.save_feedback(self.project1, "New feedback")
# Created ~21 days ago - will not be included in the summary
self.save_feedback(self.project1, "Old feedback", dt=datetime.now(UTC) - timedelta(days=21))
with self.feature(self.features):
response = self.get_success_response(self.org.slug, statsPeriod="14d")
assert response.data["success"] is True
assert response.data["summary"] == "Test summary of feedback"
assert response.data["numFeedbacksUsed"] == 1
assert self.mock_get_summary_from_seer.call_count == 1
assert self.mock_get_summary_from_seer.call_args[0][0] == ["New feedback"]
@patch("sentry.feedback.endpoints.organization_feedback_summary.cache")
def test_get_feedback_summary_project_filter(self, mock_cache: MagicMock) -> None:
mock_cache.get.return_value = None
self.save_feedback(self.project1, "Project 1 feedback")
# Created ~21 days ago - will not be included in the summary
self.save_feedback(self.project2, "Project 2 feedback")
with self.feature(self.features):
response = self.get_success_response(self.org.slug, project=[self.project1.id])
assert response.data["success"] is True
assert response.data["summary"] == "Test summary of feedback"
assert response.data["numFeedbacksUsed"] == 1
assert self.mock_get_summary_from_seer.call_count == 1
assert set(self.mock_get_summary_from_seer.call_args[0][0]) == {"Project 1 feedback"}
with self.feature(self.features):
response = self.get_success_response(
self.org.slug, project=[self.project1.id, self.project2.id]
)
assert response.data["success"] is True
assert response.data["summary"] == "Test summary of feedback"
assert response.data["numFeedbacksUsed"] == 2
assert self.mock_get_summary_from_seer.call_count == 2
assert set(self.mock_get_summary_from_seer.call_args[0][0]) == {
"Project 1 feedback",
"Project 2 feedback",
}
with self.feature(self.features):
response = self.client.get(
f"{self.url}?project={self.project1.id}&project={self.project2.id}"
)
assert response.data["success"] is True
assert response.data["summary"] == "Test summary of feedback"
assert response.data["numFeedbacksUsed"] == 2
assert self.mock_get_summary_from_seer.call_count == 3
assert set(self.mock_get_summary_from_seer.call_args[0][0]) == {
"Project 1 feedback",
"Project 2 feedback",
}
def test_get_feedback_summary_too_few_feedbacks(self) -> None:
with self.feature(self.features):
response = self.get_success_response(self.org.slug)
assert response.data["success"] is False
@patch(
"sentry.feedback.endpoints.organization_feedback_summary.MAX_FEEDBACKS_TO_SUMMARIZE_CHARS",
1,
)
def test_get_feedback_summary_character_limit(self) -> None:
self.save_feedback(self.project1, "a", dt=datetime.now(UTC) - timedelta(hours=3))
self.save_feedback(self.project1, "b", dt=datetime.now(UTC) - timedelta(hours=2))
self.save_feedback(self.project1, "c", dt=datetime.now(UTC) - timedelta(hours=1))
with self.feature(self.features):
response = self.get_success_response(self.org.slug)
assert response.data["success"] is True
assert response.data["summary"] == "Test summary of feedback"
assert response.data["numFeedbacksUsed"] == 1
assert self.mock_get_summary_from_seer.call_count == 1
# Most recent is prioritized.
assert self.mock_get_summary_from_seer.call_args[0][0] == ["c"]
def test_get_summary_from_seer_failed(self) -> None:
self.mock_get_summary_from_seer.return_value = None
self.save_feedback(self.project1, "hello")
with self.feature(self.features):
response = self.get_error_response(self.org.slug)
assert response.status_code == 500
assert response.data["detail"] == "Failed to generate a summary for a list of feedbacks"
| OrganizationFeedbackSummaryTest |
python | astropy__astropy | astropy/constants/constant.py | {
"start": 3359,
"end": 8842
} | class ____(Quantity, metaclass=ConstantMeta):
"""A physical or astronomical constant.
These objects are quantities that are meant to represent physical
constants.
Parameters
----------
abbrev : str
A typical ASCII text abbreviation of the constant, generally
the same as the Python variable used for this constant.
name : str
Full constant name.
value : numbers.Real
Constant value. Note that this should be a bare number, not a
|Quantity|.
unit : str
String representation of the constant units.
uncertainty : numbers.Real
Absolute uncertainty in constant value. Note that this should be
a bare number, not a |Quantity|.
reference : str, optional
Reference where the value is taken from.
system : str
System of units in which the constant is defined. This can be
`None` when the constant's units can be directly converted
between systems.
"""
_registry = {}
_has_incompatible_units = set()
def __new__(
cls, abbrev, name, value, unit, uncertainty, reference=None, system=None
):
if reference is None:
reference = getattr(cls, "default_reference", None)
if reference is None:
raise TypeError(f"{cls} requires a reference.")
name_lower = name.lower()
instances = cls._registry.setdefault(name_lower, {})
# By-pass Quantity initialization, since units may not yet be
# initialized here, and we store the unit in string form.
inst = np.array(value).view(cls)
if system in instances:
warnings.warn(
f"Constant {name!r} already has a definition in "
f"the {system!r} system from {reference!r} reference",
AstropyUserWarning,
)
for c in instances.values():
if system is not None and not hasattr(c.__class__, system):
setattr(c, system, inst)
if c.system is not None and not hasattr(inst.__class__, c.system):
setattr(inst, c.system, c)
instances[system] = inst
inst._abbrev = abbrev
inst._name = name
inst._value = value
inst._unit_string = unit
inst._uncertainty = uncertainty
inst._reference = reference
inst._system = system
inst._checked_units = False
return inst
def __repr__(self):
return (
f"<{self.__class__} "
f"name={self.name!r} "
f"value={self.value} "
f"uncertainty={self.uncertainty} "
f"unit={str(self.unit)!r} "
f"reference={self.reference!r}>"
)
def __str__(self):
return (
f" Name = {self.name}\n"
f" Value = {self.value}\n"
f" Uncertainty = {self.uncertainty}\n"
f" Unit = {self.unit}\n"
f" Reference = {self.reference}"
)
def __quantity_subclass__(self, unit):
return super().__quantity_subclass__(unit)[0], False
def copy(self):
"""
Return a copy of this `Constant` instance. Since they are by
definition immutable, this merely returns another reference to
``self``.
"""
return self
__deepcopy__ = __copy__ = copy
@property
def abbrev(self):
"""A typical ASCII text abbreviation of the constant, also generally
the same as the Python variable used for this constant.
"""
return self._abbrev
@property
def name(self):
"""The full name of the constant."""
return self._name
@lazyproperty
def _unit(self):
"""The unit(s) in which this constant is defined."""
return Unit(self._unit_string)
@property
def uncertainty(self):
"""The known absolute uncertainty in this constant's value."""
return self._uncertainty
@property
def reference(self):
"""The source used for the value of this constant."""
return self._reference
@property
def system(self):
"""The system of units in which this constant is defined (typically
`None` so long as the constant's units can be directly converted
between systems).
"""
return self._system
def _instance_or_super(self, key):
instances = self._registry[self.name.lower()]
inst = instances.get(key)
if inst is not None:
return inst
else:
return getattr(super(), key)
@property
def si(self):
"""If the Constant is defined in the SI system return that instance of
the constant, else convert to a Quantity in the appropriate SI units.
"""
return self._instance_or_super("si")
@property
def cgs(self):
"""If the Constant is defined in the CGS system return that instance of
the constant, else convert to a Quantity in the appropriate CGS units.
"""
return self._instance_or_super("cgs")
def __array_finalize__(self, obj):
for attr in (
"_abbrev",
"_name",
"_value",
"_unit_string",
"_uncertainty",
"_reference",
"_system",
):
setattr(self, attr, getattr(obj, attr, None))
self._checked_units = getattr(obj, "_checked_units", False)
| Constant |
python | getsentry__sentry | src/sentry/ingest/transaction_clusterer/rules.py | {
"start": 876,
"end": 2941
} | class ____:
"""Store rules in both project options and Redis.
Why Redis?
We want to update the rule lifetimes when a transaction has been sanitized
with that rule. That load is very high for the project options to handle,
but Redis is capable of doing so.
Then, why project options?
Redis is not a persistent store, and rules should be persistent. As a
result, at some point the up-to-date lifetimes of rules in Redis must be
updated and merged back to project options. This operation can't happen too
frequently, and the task to generate rules meets the criteria and thus is
responsible for that.
"""
def __init__(self, namespace: ClustererNamespace):
self._rules_prefix = namespace.value.rules
def _get_rules_key(self, project: Project) -> str:
return f"{self._rules_prefix}:o:{project.organization_id}:p:{project.id}"
def read(self, project: Project) -> RuleSet:
client = get_redis_client()
key = self._get_rules_key(project)
data = client.hgetall(key)
return {rule: int(timestamp) for rule, timestamp in data.items()}
def write(self, project: Project, rules: RuleSet) -> None:
client = get_redis_client()
key = self._get_rules_key(project)
with client.pipeline() as p:
# to be consistent with other stores, clear previous hash entries:
p.delete(key)
if len(rules) > 0:
p.hmset(name=key, mapping=rules) # type: ignore[arg-type]
p.execute()
def update_rule(self, project: Project, rule: str, last_used: int) -> None:
"""Overwrite a rule's last_used timestamp.
This function does not create the rule if it does not exist.
"""
client = get_redis_client()
key = self._get_rules_key(project)
# There is no atomic "overwrite if exists" for hashes, so fetch keys first:
existing_rules = client.hkeys(key)
if rule in existing_rules:
client.hset(key, rule, last_used)
| RedisRuleStore |
python | python__mypy | mypyc/codegen/literals.py | {
"start": 600,
"end": 10602
} | class ____:
"""Collection of literal values used in a compilation group and related helpers."""
def __init__(self) -> None:
# Each dict maps value to literal index (0, 1, ...)
self.str_literals: dict[str, int] = {}
self.bytes_literals: dict[bytes, int] = {}
self.int_literals: dict[int, int] = {}
self.float_literals: dict[float, int] = {}
self.complex_literals: dict[complex, int] = {}
self.tuple_literals: dict[tuple[object, ...], int] = {}
self.frozenset_literals: dict[frozenset[object], int] = {}
def record_literal(self, value: LiteralValue) -> None:
"""Ensure that the literal value is available in generated code."""
if value is None or value is True or value is False:
# These are special cased and always present
return
if isinstance(value, str):
str_literals = self.str_literals
if value not in str_literals:
str_literals[value] = len(str_literals)
elif isinstance(value, bytes):
bytes_literals = self.bytes_literals
if value not in bytes_literals:
bytes_literals[value] = len(bytes_literals)
elif isinstance(value, int):
int_literals = self.int_literals
if value not in int_literals:
int_literals[value] = len(int_literals)
elif isinstance(value, float):
float_literals = self.float_literals
if value not in float_literals:
float_literals[value] = len(float_literals)
elif isinstance(value, complex):
complex_literals = self.complex_literals
if value not in complex_literals:
complex_literals[value] = len(complex_literals)
elif isinstance(value, tuple):
tuple_literals = self.tuple_literals
if value not in tuple_literals:
for item in value:
assert _is_literal_value(item)
self.record_literal(item)
tuple_literals[value] = len(tuple_literals)
elif isinstance(value, frozenset):
frozenset_literals = self.frozenset_literals
if value not in frozenset_literals:
for item in value:
assert _is_literal_value(item)
self.record_literal(item)
frozenset_literals[value] = len(frozenset_literals)
else:
assert False, "invalid literal: %r" % value
def literal_index(self, value: LiteralValue) -> int:
"""Return the index to the literals array for given value."""
# The array contains first None and booleans, followed by all str values,
# followed by bytes values, etc.
if value is None:
return 0
elif value is False:
return 1
elif value is True:
return 2
n = NUM_SINGLETONS
if isinstance(value, str):
return n + self.str_literals[value]
n += len(self.str_literals)
if isinstance(value, bytes):
return n + self.bytes_literals[value]
n += len(self.bytes_literals)
if isinstance(value, int):
return n + self.int_literals[value]
n += len(self.int_literals)
if isinstance(value, float):
return n + self.float_literals[value]
n += len(self.float_literals)
if isinstance(value, complex):
return n + self.complex_literals[value]
n += len(self.complex_literals)
if isinstance(value, tuple):
return n + self.tuple_literals[value]
n += len(self.tuple_literals)
if isinstance(value, frozenset):
return n + self.frozenset_literals[value]
assert False, "invalid literal: %r" % value
def num_literals(self) -> int:
# The first three are for None, True and False
return (
NUM_SINGLETONS
+ len(self.str_literals)
+ len(self.bytes_literals)
+ len(self.int_literals)
+ len(self.float_literals)
+ len(self.complex_literals)
+ len(self.tuple_literals)
+ len(self.frozenset_literals)
)
# The following methods return the C encodings of literal values
# of different types
def encoded_str_values(self) -> list[bytes]:
return _encode_str_values(self.str_literals)
def encoded_int_values(self) -> list[bytes]:
return _encode_int_values(self.int_literals)
def encoded_bytes_values(self) -> list[bytes]:
return _encode_bytes_values(self.bytes_literals)
def encoded_float_values(self) -> list[str]:
return _encode_float_values(self.float_literals)
def encoded_complex_values(self) -> list[str]:
return _encode_complex_values(self.complex_literals)
def encoded_tuple_values(self) -> list[str]:
return self._encode_collection_values(self.tuple_literals)
def encoded_frozenset_values(self) -> list[str]:
return self._encode_collection_values(self.frozenset_literals)
def _encode_collection_values(
self, values: dict[tuple[object, ...], int] | dict[frozenset[object], int]
) -> list[str]:
"""Encode tuple/frozenset values into a C array.
The format of the result is like this:
<number of collections>
<length of the first collection>
<literal index of first item>
...
<literal index of last item>
<length of the second collection>
...
"""
value_by_index = {index: value for value, index in values.items()}
result = []
count = len(values)
result.append(str(count))
for i in range(count):
value = value_by_index[i]
result.append(str(len(value)))
for item in value:
assert _is_literal_value(item)
index = self.literal_index(item)
result.append(str(index))
return result
def _encode_str_values(values: dict[str, int]) -> list[bytes]:
value_by_index = {index: value for value, index in values.items()}
result = []
line: list[bytes] = []
line_len = 0
for i in range(len(values)):
value = value_by_index[i]
c_literal = format_str_literal(value)
c_len = len(c_literal)
if line_len > 0 and line_len + c_len > 70:
result.append(format_int(len(line)) + b"".join(line))
line = []
line_len = 0
line.append(c_literal)
line_len += c_len
if line:
result.append(format_int(len(line)) + b"".join(line))
result.append(b"")
return result
def _encode_bytes_values(values: dict[bytes, int]) -> list[bytes]:
value_by_index = {index: value for value, index in values.items()}
result = []
line: list[bytes] = []
line_len = 0
for i in range(len(values)):
value = value_by_index[i]
c_init = format_int(len(value))
c_len = len(c_init) + len(value)
if line_len > 0 and line_len + c_len > 70:
result.append(format_int(len(line)) + b"".join(line))
line = []
line_len = 0
line.append(c_init + value)
line_len += c_len
if line:
result.append(format_int(len(line)) + b"".join(line))
result.append(b"")
return result
def format_int(n: int) -> bytes:
"""Format an integer using a variable-length binary encoding."""
if n < 128:
a = [n]
else:
a = []
while n > 0:
a.insert(0, n & 0x7F)
n >>= 7
for i in range(len(a) - 1):
# If the highest bit is set, more 7-bit digits follow
a[i] |= 0x80
return bytes(a)
def format_str_literal(s: str) -> bytes:
utf8 = s.encode("utf-8", errors="surrogatepass")
return format_int(len(utf8)) + utf8
def _encode_int_values(values: dict[int, int]) -> list[bytes]:
"""Encode int values into C strings.
Values are stored in base 10 and separated by 0 bytes.
"""
value_by_index = {index: value for value, index in values.items()}
result = []
line: list[bytes] = []
line_len = 0
for i in range(len(values)):
value = value_by_index[i]
encoded = b"%d" % value
if line_len > 0 and line_len + len(encoded) > 70:
result.append(format_int(len(line)) + b"\0".join(line))
line = []
line_len = 0
line.append(encoded)
line_len += len(encoded)
if line:
result.append(format_int(len(line)) + b"\0".join(line))
result.append(b"")
return result
def float_to_c(x: float) -> str:
"""Return C literal representation of a float value."""
s = str(x)
if s == "inf":
return "INFINITY"
elif s == "-inf":
return "-INFINITY"
elif s == "nan":
return "NAN"
return s
def _encode_float_values(values: dict[float, int]) -> list[str]:
"""Encode float values into a C array values.
The result contains the number of values followed by individual values.
"""
value_by_index = {index: value for value, index in values.items()}
result = []
num = len(values)
result.append(str(num))
for i in range(num):
value = value_by_index[i]
result.append(float_to_c(value))
return result
def _encode_complex_values(values: dict[complex, int]) -> list[str]:
"""Encode float values into a C array values.
The result contains the number of values followed by pairs of doubles
representing complex numbers.
"""
value_by_index = {index: value for value, index in values.items()}
result = []
num = len(values)
result.append(str(num))
for i in range(num):
value = value_by_index[i]
result.append(float_to_c(value.real))
result.append(float_to_c(value.imag))
return result
| Literals |
python | google__pytype | pytype/tests/test_closures.py | {
"start": 7542,
"end": 9970
} | class ____(test_base.BaseTest):
"""Tests for closures in Python 3."""
def test_if_split_delete_deref(self):
ty = self.Infer("""
def f(a: int):
x = "hello"
def g():
nonlocal x
x = 42
if a:
g()
else:
return x
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Optional
def f(a: int) -> Optional[str]: ...
""",
)
def test_closures_delete_deref(self):
err = self.CheckWithErrors("""
def f():
x = "hello"
def g():
nonlocal x # force x to be stored in a closure cell
x = 10
del x
return x # name-error[e]
""")
self.assertErrorSequences(err, {"e": ["Variable x", "deleted", "line 6"]})
def test_nonlocal(self):
ty = self.Infer("""
def f():
x = "hello"
def g():
nonlocal x
x = 10
g()
return x
""")
self.assertTypesMatchPytd(
ty,
"""
def f() -> int: ...
""",
)
def test_nonlocal_delete_deref(self):
err = self.CheckWithErrors("""
def f():
x = True
def g():
nonlocal x
del x
g()
return x # name-error[e]
""")
self.assertErrorSequences(err, {"e": ["Variable x", "deleted", "line 5"]})
def test_reuse_after_delete_deref(self):
ty = self.Infer("""
def f():
x = True
def g():
nonlocal x
del x
g()
x = 42
return x
""")
self.assertTypesMatchPytd(
ty,
"""
def f() -> int: ...
""",
)
def test_closure_annotations(self):
errors = self.CheckWithErrors("""
def f():
a = 1
def g(x: int) -> int:
a # makes sure g is a closure
return "hello" # bad-return-type[e]
""")
self.assertErrorRegexes(errors, {"e": r"int.*str"})
def test_filter_before_delete(self):
# TODO(b/117463644): Remove the disable on line 7.
self.CheckWithErrors("""
from typing import Optional
def f(x: Optional[str]):
if x is None:
raise TypeError()
def nested():
nonlocal x
print(x.upper()) # pytype: disable=name-error
del x
nested()
return x # name-error
""")
if __name__ == "__main__":
test_base.main()
| ClosuresTestPy3 |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_dataflow.py | {
"start": 27056,
"end": 30572
} | class ____:
@pytest.fixture
def run_operator(self):
"""
Create a DataflowDeletePipelineOperator instance with test data
"""
return DataflowDeletePipelineOperator(
task_id=TASK_ID,
pipeline_name=TEST_PIPELINE_NAME,
project_id=TEST_PROJECT,
location=TEST_LOCATION,
gcp_conn_id=GCP_CONN_ID,
)
@mock.patch("airflow.providers.google.cloud.operators.dataflow.DataflowHook")
# @mock.patch("airflow.providers.google.cloud.operators.dataflow.DataflowHook.delete_data_pipeline")
def test_execute(self, data_pipeline_hook_mock, run_operator):
"""
Test Delete Operator execute with correct parameters
"""
data_pipeline_hook_mock.return_value.delete_data_pipeline.return_value = None
run_operator.execute(mock.MagicMock())
data_pipeline_hook_mock.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
data_pipeline_hook_mock.return_value.delete_data_pipeline.assert_called_once_with(
pipeline_name=TEST_PIPELINE_NAME,
project_id=TEST_PROJECT,
location=TEST_LOCATION,
)
def test_invalid_data_pipeline_name(self, sdk_connection_not_found):
"""
Test that AirflowException is raised if Delete Operator is not given a data pipeline name.
"""
init_kwargs = {
"task_id": TASK_ID,
"pipeline_name": None,
"project_id": TEST_PROJECT,
"location": TEST_LOCATION,
"gcp_conn_id": GCP_CONN_ID,
}
with pytest.raises(AirflowException):
DataflowDeletePipelineOperator(**init_kwargs).execute(mock.MagicMock())
def test_invalid_project_id(self, sdk_connection_not_found):
"""
Test that AirflowException is raised if Delete Operator is not given a project ID.
"""
init_kwargs = {
"task_id": TASK_ID,
"pipeline_name": TEST_PIPELINE_NAME,
"project_id": None,
"location": TEST_LOCATION,
"gcp_conn_id": GCP_CONN_ID,
}
with pytest.raises(AirflowException):
DataflowDeletePipelineOperator(**init_kwargs).execute(mock.MagicMock())
def test_invalid_location(self, sdk_connection_not_found):
"""
Test that AirflowException is raised if Delete Operator is not given a location.
"""
init_kwargs = {
"task_id": TASK_ID,
"pipeline_name": TEST_PIPELINE_NAME,
"project_id": TEST_PROJECT,
"location": None,
"gcp_conn_id": GCP_CONN_ID,
}
with pytest.raises(AirflowException):
DataflowDeletePipelineOperator(**init_kwargs).execute(mock.MagicMock())
def test_invalid_response(self, sdk_connection_not_found):
"""
Test that AirflowException is raised if Delete Operator fails execution and returns error.
"""
init_kwargs = {
"task_id": TASK_ID,
"pipeline_name": TEST_PIPELINE_NAME,
"project_id": TEST_PROJECT,
"location": TEST_LOCATION,
"gcp_conn_id": GCP_CONN_ID,
}
with pytest.raises(AirflowException):
DataflowDeletePipelineOperator(**init_kwargs).execute(mock.MagicMock()).return_value = {
"error": {"message": "example error"}
}
| TestDataflowDeletePipelineOperator |
python | sympy__sympy | sympy/integrals/manualintegrate.py | {
"start": 11860,
"end": 12162
} | class ____(Rule):
"""Rewrite integrand to another form that is easier to handle."""
rewritten: Expr
substep: Rule
def eval(self) -> Expr:
return self.substep.eval()
def contains_dont_know(self) -> bool:
return self.substep.contains_dont_know()
@dataclass
| RewriteRule |
python | getsentry__sentry | src/sentry/replays/usecases/query/fields.py | {
"start": 3642,
"end": 5818
} | class ____:
def __init__(self, query: type[SumOfTagAggregate] | type[TagScalar]) -> None:
self.parse = parse_str
self.query = query
def apply(self, search_filter: SearchFilter) -> Condition:
"""Apply a search operation against any named expression.
A named expression can be a column name or an expression alias.
"""
key = search_filter.key.name
if key.startswith("tags["):
key = key[5:-1]
operator = search_filter.operator
value = search_filter.value.value
if isinstance(value, (str, int, float, datetime.datetime)):
parsed_value = self.parse(str(value))
if search_filter.value.is_wildcard():
applicable = self._apply_wildcard
else:
applicable = self._apply_scalar
return applicable(key, operator, parsed_value)
else:
parsed_values = [self.parse(str(v)) for v in value]
return self._apply_composite(key, operator, parsed_values)
def _apply_wildcard(self, key: str, operator: str, value: str) -> Condition:
if operator == "=":
visitor = self.query.visit_match
elif operator == "!=":
visitor = self.query.visit_not_match
else:
raise OperatorNotSupported(f"Unsupported wildcard search operator: '{operator}'")
return visitor(key, value)
def _apply_composite(self, key: str, operator: str, value: list[str]) -> Condition:
if operator == "IN":
visitor = self.query.visit_in
elif operator == "NOT IN":
visitor = self.query.visit_not_in
else:
raise OperatorNotSupported(f"Unsupported composite search operator: '{operator}'")
return visitor(key, value)
def _apply_scalar(self, key: str, operator: str, value: str) -> Condition:
if operator == "=":
visitor = self.query.visit_eq
elif operator == "!=":
visitor = self.query.visit_neq
else:
raise OperatorNotSupported(f"Unsupported search operator: '{operator}'")
return visitor(key, value)
| TagField |
python | mlflow__mlflow | mlflow/gateway/providers/mistral.py | {
"start": 295,
"end": 4928
} | class ____(ProviderAdapter):
@classmethod
def model_to_completions(cls, resp, config):
# Response example (https://docs.mistral.ai/api/#operation/createChatCompletion)
# ```
# {
# "id": "string",
# "object": "string",
# "created": "integer",
# "model": "string",
# "choices": [
# {
# "index": "integer",
# "message": {
# "role": "string",
# "content": "string"
# },
# "finish_reason": "string",
# }
# ],
# "usage":
# {
# "prompt_tokens": "integer",
# "completion_tokens": "integer",
# "total_tokens": "integer",
# }
# }
# ```
return completions.ResponsePayload(
created=int(time.time()),
object="text_completion",
model=config.model.name,
choices=[
completions.Choice(
index=idx,
text=c["message"]["content"],
finish_reason=c["finish_reason"],
)
for idx, c in enumerate(resp["choices"])
],
usage=completions.CompletionsUsage(
prompt_tokens=resp["usage"]["prompt_tokens"],
completion_tokens=resp["usage"]["completion_tokens"],
total_tokens=resp["usage"]["total_tokens"],
),
)
@classmethod
def model_to_chat(cls, resp, config):
# Response example (https://docs.mistral.ai/api/#operation/createChatCompletion)
return chat.ResponsePayload(
id=resp["id"],
object=resp["object"],
created=resp["created"],
model=resp["model"],
choices=[
chat.Choice(
index=idx,
message=chat.ResponseMessage(
role=c["message"]["role"],
content=c["message"].get("content"),
tool_calls=(
(calls := c["message"].get("tool_calls"))
and [chat.ToolCall(**c) for c in calls]
),
),
finish_reason=c.get("finish_reason"),
)
for idx, c in enumerate(resp["choices"])
],
usage=chat.ChatUsage(
prompt_tokens=resp["usage"]["prompt_tokens"],
completion_tokens=resp["usage"]["completion_tokens"],
total_tokens=resp["usage"]["total_tokens"],
),
)
@classmethod
def model_to_embeddings(cls, resp, config):
# Response example (https://docs.mistral.ai/api/#operation/createEmbedding):
# ```
# {
# "id": "string",
# "object": "string",
# "data": [
# {
# "object": "string",
# "embedding":
# [
# float,
# float
# ]
# "index": "integer",
# }
# ],
# "model": "string",
# "usage":
# {
# "prompt_tokens": "integer",
# "total_tokens": "integer",
# }
# }
# ```
return embeddings.ResponsePayload(
data=[
embeddings.EmbeddingObject(
embedding=data["embedding"],
index=data["index"],
)
for data in resp["data"]
],
model=config.model.name,
usage=embeddings.EmbeddingsUsage(
prompt_tokens=resp["usage"]["prompt_tokens"],
total_tokens=resp["usage"]["total_tokens"],
),
)
@classmethod
def completions_to_model(cls, payload, config):
payload["model"] = config.model.name
payload.pop("stop", None)
payload.pop("n", None)
payload["messages"] = [{"role": "user", "content": payload.pop("prompt")}]
# The range of Mistral's temperature is 0-1, but ours is 0-2, so we scale it.
if "temperature" in payload:
payload["temperature"] = 0.5 * payload["temperature"]
return payload
@classmethod
def chat_to_model(cls, payload, config):
return {"model": config.model.name, **payload}
@classmethod
def embeddings_to_model(cls, payload, config):
return {"model": config.model.name, **payload}
| MistralAdapter |
python | django__django | docs/_ext/djangodocs.py | {
"start": 7633,
"end": 10049
} | class ____(nodes.literal_block):
"""
Custom node to override the visit/depart event handlers at registration
time. Wrap a literal_block object and defer to it.
"""
tagname = "ConsoleNode"
def __init__(self, litblk_obj):
self.wrapped = litblk_obj
def __getattr__(self, attr):
if attr == "wrapped":
return self.__dict__.wrapped
return getattr(self.wrapped, attr)
def visit_console_dummy(self, node):
"""Defer to the corresponding parent's handler."""
self.visit_literal_block(node)
def depart_console_dummy(self, node):
"""Defer to the corresponding parent's handler."""
self.depart_literal_block(node)
def visit_console_html(self, node):
"""Generate HTML for the console directive."""
if self.builder.name in ("djangohtml", "json") and node["win_console_text"]:
# Put a mark on the document object signaling the fact the directive
# has been used on it.
self.document._console_directive_used_flag = True
uid = node["uid"]
self.body.append(
"""\
<div class="console-block" id="console-block-%(id)s">
<input class="c-tab-unix" id="c-tab-%(id)s-unix" type="radio" name="console-%(id)s" \
checked>
<label for="c-tab-%(id)s-unix" title="Linux/macOS">/</label>
<input class="c-tab-win" id="c-tab-%(id)s-win" type="radio" name="console-%(id)s">
<label for="c-tab-%(id)s-win" title="Windows"></label>
<section class="c-content-unix" id="c-content-%(id)s-unix">\n"""
% {"id": uid}
)
try:
self.visit_literal_block(node)
except nodes.SkipNode:
pass
self.body.append("</section>\n")
self.body.append(
'<section class="c-content-win" id="c-content-%(id)s-win">\n' % {"id": uid}
)
win_text = node["win_console_text"]
highlight_args = {"force": True}
linenos = node.get("linenos", False)
def warner(msg):
self.builder.warn(msg, (self.builder.current_docname, node.line))
highlighted = self.highlighter.highlight_block(
win_text, "doscon", warn=warner, linenos=linenos, **highlight_args
)
self.body.append(highlighted)
self.body.append("</section>\n")
self.body.append("</div>\n")
raise nodes.SkipNode
else:
self.visit_literal_block(node)
| ConsoleNode |
python | pydata__xarray | xarray/coding/cftime_offsets.py | {
"start": 20718,
"end": 20917
} | class ____(Tick):
_freq = "min"
def as_timedelta(self) -> timedelta:
return timedelta(minutes=self.n)
def __apply__(self, other):
return other + self.as_timedelta()
| Minute |
python | tensorflow__tensorflow | third_party/xla/build_tools/lint/generate_compile_commands_test.py | {
"start": 814,
"end": 1770
} | class ____(absltest.TestCase):
def test_command_from_args_list(self):
arguments = [
"/usr/bin/gcc",
"-DTEST_DEFINE",
"-fstack-protector",
"-c",
"xla/compiler.cc",
"-o",
"bazel-out/k8-opt/bin/xla/_objs/compiler/compiler.pic.o",
]
command = CompileCommand.from_args_list(arguments)
self.assertEqual(command.file, "xla/compiler.cc")
self.assertEqual(command.arguments, arguments)
def test_command_from_args_list_with_disallowed_option(self):
arguments = [
"/usr/bin/gcc",
"-DTEST_DEFINE",
"-fno-canonical-system-headers",
"-c",
"xla/compiler.cc",
"-o",
"bazel-out/k8-opt/bin/xla/_objs/compiler/compiler.pic.o",
]
command = CompileCommand.from_args_list(arguments)
self.assertEqual(command.file, "xla/compiler.cc")
self.assertEqual(command.arguments, arguments[0:2] + arguments[3:])
| CompileCommandsTest |
python | sympy__sympy | sympy/functions/special/bessel.py | {
"start": 34488,
"end": 36589
} | class ____(SphericalBesselBase):
@assume_integer_order
def _eval_rewrite_as_besselj(self, nu, z, **kwargs):
# jn +- I*yn
# jn as beeselj: sqrt(pi/(2*z)) * besselj(nu + S.Half, z)
# yn as besselj: (-1)**(nu+1) * sqrt(pi/(2*z)) * besselj(-nu - S.Half, z)
hks = self._hankel_kind_sign
return sqrt(pi/(2*z))*(besselj(nu + S.Half, z) +
hks*I*S.NegativeOne**(nu+1)*besselj(-nu - S.Half, z))
@assume_integer_order
def _eval_rewrite_as_bessely(self, nu, z, **kwargs):
# jn +- I*yn
# jn as bessely: (-1)**nu * sqrt(pi/(2*z)) * bessely(-nu - S.Half, z)
# yn as bessely: sqrt(pi/(2*z)) * bessely(nu + S.Half, z)
hks = self._hankel_kind_sign
return sqrt(pi/(2*z))*(S.NegativeOne**nu*bessely(-nu - S.Half, z) +
hks*I*bessely(nu + S.Half, z))
def _eval_rewrite_as_yn(self, nu, z, **kwargs):
hks = self._hankel_kind_sign
return jn(nu, z).rewrite(yn) + hks*I*yn(nu, z)
def _eval_rewrite_as_jn(self, nu, z, **kwargs):
hks = self._hankel_kind_sign
return jn(nu, z) + hks*I*yn(nu, z).rewrite(jn)
def _eval_expand_func(self, **hints):
if self.order.is_Integer:
return self._expand(**hints)
else:
nu = self.order
z = self.argument
hks = self._hankel_kind_sign
return jn(nu, z) + hks*I*yn(nu, z)
def _expand(self, **hints):
n = self.order
z = self.argument
hks = self._hankel_kind_sign
# fully expanded version
# return ((fn(n, z) * sin(z) +
# (-1)**(n + 1) * fn(-n - 1, z) * cos(z)) + # jn
# (hks * I * (-1)**(n + 1) *
# (fn(-n - 1, z) * hk * I * sin(z) +
# (-1)**(-n) * fn(n, z) * I * cos(z))) # +-I*yn
# )
return (_jn(n, z) + hks*I*_yn(n, z)).expand()
def _eval_evalf(self, prec):
if self.order.is_Integer:
return self.rewrite(besselj)._eval_evalf(prec)
| SphericalHankelBase |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.