language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | walkccc__LeetCode | solutions/3337. Total Characters in String After Transformations II/3337.py | {
"start": 0,
"end": 1526
} | class ____:
# Similar to 3335. Total Characters in String After Transformations I
def lengthAfterTransformations(self, s: str, t: int, nums: list[int]) -> int:
MOD = 1_000_000_007
def matrixMult(A: list[list[int]], B: list[list[int]]) -> list[list[int]]:
"""Returns A * B."""
sz = len(A)
C = [[0] * sz for _ in range(sz)]
for i in range(sz):
for j in range(sz):
for k in range(sz):
C[i][j] += A[i][k] * B[k][j]
C[i][j] %= MOD
return C
def matrixPow(M: list[list[int]], n: int) -> list[list[int]]:
"""Returns M^n."""
if n == 0:
return [[1 if i == j else 0 # identity matrix
for j in range(len(M))]
for i in range(len(M))]
if n % 2 == 1:
return matrixMult(M, matrixPow(M, n - 1))
return matrixPow(matrixMult(M, M), n // 2)
# T[i][j] := the number of ways to transform ('a' + i) to ('a' + j)
T = self._getTransformationMatrix(nums)
poweredT = matrixPow(T, t)
count = [0] * 26
lengths = [0] * 26
for c in s:
count[ord(c) - ord('a')] += 1
for i in range(26):
for j in range(26):
lengths[j] += count[i] * poweredT[i][j]
lengths[j] %= MOD
return sum(lengths) % MOD
def _getTransformationMatrix(self, nums: list[int]) -> list[list[int]]:
T = [[0] * 26 for _ in range(26)]
for i, steps in enumerate(nums):
for step in range(1, steps + 1):
T[i][(i + step) % 26] += 1
return T
| Solution |
python | spyder-ide__spyder | spyder/utils/image_path_manager.py | {
"start": 479,
"end": 2509
} | class ____():
"""Manager of the image path in the project."""
def __init__(self):
"""Initialize main path with all the images."""
self.IMG_PATH = {}
self.add_image_path(get_module_data_path('spyder', relpath='images'))
self.default = 'not_found'
def add_image_path(self, path):
"""Add path to the image path list."""
if not osp.isdir(path):
return
for dirpath, __, _filenames in os.walk(path):
for filename in _filenames:
if filename.startswith('.'):
continue
name, __ = osp.splitext(osp.basename(filename))
complete_path = osp.join(dirpath, filename)
if name in self.IMG_PATH:
warnings.warn(
f'The icon located in {complete_path} is overriding '
f'the existing {name}')
existing_path = self.IMG_PATH[name]
if osp.basename(dirpath) == 'svg':
# If current file is from svg directory, it has
# priority
self.IMG_PATH[name] = complete_path
elif osp.basename(osp.dirname(existing_path)) == 'svg':
# If existing file is from svg directory, keep it
continue
else:
self.IMG_PATH[name] = complete_path
else:
self.IMG_PATH[name] = complete_path
def get_image_path(self, name):
"""Get path of the image given its name."""
try:
act_image = self.IMG_PATH[name]
if osp.isfile(act_image):
return osp.abspath(act_image)
except KeyError:
return osp.abspath(self.IMG_PATH[self.default])
IMAGE_PATH_MANAGER = ImagePathManager()
def get_image_path(name):
"""Return absolute image path."""
return IMAGE_PATH_MANAGER.get_image_path(name)
| ImagePathManager |
python | Pylons__pyramid | tests/test_scripts/test_pshell.py | {
"start": 13604,
"end": 13862
} | class ____(unittest.TestCase):
def _callFUT(self, argv):
from pyramid.scripts.pshell import main
return main(argv, quiet=True)
def test_it(self):
result = self._callFUT(['pshell'])
self.assertEqual(result, 2)
| Test_main |
python | readthedocs__readthedocs.org | readthedocs/api/v2/views/model_views.py | {
"start": 17011,
"end": 18419
} | class ____(DisableListEndpoint, CreateModelMixin, UserSelectViewSet):
"""
Create a notification attached to an object (User, Project, Build, Organization).
This endpoint is currently used only internally by the builder.
Notifications are attached to `Build` objects only when using this endpoint.
This limitation will change in the future when re-implementing this on APIv3 if neeed.
"""
parser_classes = [JSONParser, MultiPartParser]
permission_classes = [HasBuildAPIKey]
renderer_classes = (JSONRenderer,)
serializer_class = NotificationSerializer
model = Notification
def perform_create(self, serializer):
"""Restrict creation to notifications attached to the project's builds from the api key."""
attached_to = serializer.validated_data["attached_to"]
build_api_key = self.request.build_api_key
project_slug = None
if isinstance(attached_to, Build):
project_slug = attached_to.project.slug
elif isinstance(attached_to, Project):
project_slug = attached_to.slug
# Limit the permissions to create a notification on this object only if the API key
# is attached to the related project
if not project_slug or build_api_key.project.slug != project_slug:
raise PermissionDenied()
return super().perform_create(serializer)
| NotificationViewSet |
python | fastapi__sqlmodel | docs_src/tutorial/relationship_attributes/cascade_delete_relationships/tutorial004.py | {
"start": 367,
"end": 3505
} | class ____(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: Optional[int] = Field(default=None, index=True)
team_id: Optional[int] = Field(
default=None, foreign_key="team.id", ondelete="RESTRICT"
)
team: Optional[Team] = Relationship(back_populates="heroes")
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
with engine.connect() as connection:
connection.execute(text("PRAGMA foreign_keys=ON")) # for SQLite only
def create_heroes():
with Session(engine) as session:
team_preventers = Team(name="Preventers", headquarters="Sharp Tower")
team_z_force = Team(name="Z-Force", headquarters="Sister Margaret's Bar")
hero_deadpond = Hero(
name="Deadpond", secret_name="Dive Wilson", team=team_z_force
)
hero_rusty_man = Hero(
name="Rusty-Man", secret_name="Tommy Sharp", age=48, team=team_preventers
)
hero_spider_boy = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
session.add(hero_deadpond)
session.add(hero_rusty_man)
session.add(hero_spider_boy)
session.commit()
session.refresh(hero_deadpond)
session.refresh(hero_rusty_man)
session.refresh(hero_spider_boy)
print("Created hero:", hero_deadpond)
print("Created hero:", hero_rusty_man)
print("Created hero:", hero_spider_boy)
hero_spider_boy.team = team_preventers
session.add(hero_spider_boy)
session.commit()
session.refresh(hero_spider_boy)
print("Updated hero:", hero_spider_boy)
hero_black_lion = Hero(name="Black Lion", secret_name="Trevor Challa", age=35)
hero_sure_e = Hero(name="Princess Sure-E", secret_name="Sure-E")
team_wakaland = Team(
name="Wakaland",
headquarters="Wakaland Capital City",
heroes=[hero_black_lion, hero_sure_e],
)
session.add(team_wakaland)
session.commit()
session.refresh(team_wakaland)
print("Team Wakaland:", team_wakaland)
def delete_team():
with Session(engine) as session:
statement = select(Team).where(Team.name == "Wakaland")
team = session.exec(statement).one()
session.delete(team)
session.commit()
print("Deleted team:", team)
def select_deleted_heroes():
with Session(engine) as session:
statement = select(Hero).where(Hero.name == "Black Lion")
result = session.exec(statement)
hero = result.first()
print("Black Lion has no team:", hero)
statement = select(Hero).where(Hero.name == "Princess Sure-E")
result = session.exec(statement)
hero = result.first()
print("Princess Sure-E has no team:", hero)
def main():
create_db_and_tables()
create_heroes()
delete_team()
if __name__ == "__main__":
main()
| Hero |
python | chroma-core__chroma | chromadb/db/impl/sqlite.py | {
"start": 840,
"end": 1885
} | class ____(base.TxWrapper):
_conn: Connection
_pool: Pool
def __init__(self, conn_pool: Pool, stack: local):
self._tx_stack = stack
self._conn = conn_pool.connect()
self._pool = conn_pool
@override
def __enter__(self) -> base.Cursor:
if len(self._tx_stack.stack) == 0:
self._conn.execute("PRAGMA case_sensitive_like = ON")
self._conn.execute("BEGIN;")
self._tx_stack.stack.append(self)
return self._conn.cursor() # type: ignore
@override
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> Literal[False]:
self._tx_stack.stack.pop()
if len(self._tx_stack.stack) == 0:
if exc_type is None:
self._conn.commit()
else:
self._conn.rollback()
self._conn.cursor().close()
self._pool.return_to_pool(self._conn)
return False
| TxWrapper |
python | bokeh__bokeh | src/bokeh/models/tools.py | {
"start": 35116,
"end": 38426
} | class ____(InspectTool):
''' *toolbar icon*: |crosshair_icon|
The crosshair tool is a passive inspector tool. It is generally on at all
times, but can be configured in the inspector's menu associated with the
*toolbar icon* shown above.
The crosshair tool draws a crosshair annotation over the plot, centered on
the current mouse position. The crosshair tool may be configured to draw
across only one dimension by setting the ``dimension`` property to only
``width`` or ``height``.
.. |crosshair_icon| image:: /_images/icons/crosshair.svg
:height: 24px
:alt: Icon of circle with aiming reticle marks representing the crosshair tool in the toolbar.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
overlay = Either(
Auto,
Instance(Span),
Tuple(Instance(Span), Instance(Span)), default="auto", help="""
An annotation drawn to indicate the crosshair.
If ``"auto"``, this will create spans depending on the ``dimensions``
property, which based on its value, will result in either one span
(horizontal or vertical) or two spans (horizontal and vertical).
Alternatively the user can provide one ``Span`` instance, where the
dimension is indicated by the ``dimension`` property of the ``Span``.
Also two ``Span`` instances can be provided. Providing explicit
``Span`` instances allows for constructing linked crosshair, when
those instances are shared between crosshair tools of different plots.
.. note::
This property is experimental and may change at any point. In
particular in future this will allow using other annotations
than ``Span`` and annotation groups.
""")
dimensions = Enum(Dimensions, default="both", help="""
Which dimensions the crosshair tool is to track. By default, both vertical
and horizontal lines will be drawn. If only "width" is supplied, only a
horizontal line will be drawn. If only "height" is supplied, only a
vertical line will be drawn.
""")
line_color = Color(default="black", help="""
A color to use to stroke paths with.
""")
line_alpha = Alpha(help="""
An alpha value to use to stroke paths with.
""")
line_width = Float(default=1, help="""
Stroke width in units of pixels.
""")
DEFAULT_BOX_ZOOM_OVERLAY = InstanceDefault(BoxAnnotation,
syncable=False,
level="overlay",
visible=False,
editable=False,
left=nan,
right=nan,
top=nan,
bottom=nan,
top_units="canvas",
left_units="canvas",
bottom_units="canvas",
right_units="canvas",
fill_color="lightgrey",
fill_alpha=0.5,
line_color="black",
line_alpha=1.0,
line_width=2,
line_dash=[4, 4],
)
DEFAULT_BOX_SELECT_OVERLAY = InstanceDefault(BoxAnnotation,
syncable=False,
level="overlay",
visible=False,
editable=True,
left=nan,
right=nan,
top=nan,
bottom=nan,
top_units="data",
left_units="data",
bottom_units="data",
right_units="data",
fill_color="lightgrey",
fill_alpha=0.5,
line_color="black",
line_alpha=1.0,
line_width=2,
line_dash=[4, 4],
)
| CrosshairTool |
python | Netflix__metaflow | metaflow/_vendor/click/types.py | {
"start": 10304,
"end": 10641
} | class ____(ParamType):
name = "float"
def convert(self, value, param, ctx):
try:
return float(value)
except ValueError:
self.fail(
"{} is not a valid floating point value".format(value), param, ctx
)
def __repr__(self):
return "FLOAT"
| FloatParamType |
python | getsentry__sentry | src/sentry/notifications/notification_action/action_handler_registry/email_handler.py | {
"start": 575,
"end": 2913
} | class ____(ActionHandler):
_config_transformer: ConfigTransformer | None = None
config_schema = {
"$schema": "https://json-schema.org/draft/2020-12/schema",
"description": "The configuration schema for an email Action",
"type": "object",
"properties": {
"target_identifier": {"type": ["string", "null"]},
"target_display": {"type": ["null"]},
"target_type": {
"type": ["integer"],
"enum": [ActionTarget.USER, ActionTarget.TEAM, ActionTarget.ISSUE_OWNERS],
},
},
"required": ["target_type"],
"additionalProperties": False,
"allOf": [
{
"if": {
"properties": {"target_type": {"enum": [ActionTarget.USER, ActionTarget.TEAM]}}
},
"then": {
"properties": {"target_identifier": {"type": "string"}},
"required": ["target_type", "target_identifier"],
},
},
],
}
data_schema = {
"$schema": "https://json-schema.org/draft/2020-12/schema",
"type": "object",
"properties": {
"fallthrough_type": {
"type": "string",
"description": "The fallthrough type for issue owners email notifications",
"enum": [*FallthroughChoiceType],
},
# XXX(CEO): temporarily support this incorrect camel case
"fallthroughType": {
"type": "string",
"description": "The fallthrough type for issue owners email notifications",
"enum": [*FallthroughChoiceType],
},
},
"additionalProperties": False,
}
group = ActionHandler.Group.NOTIFICATION
@classmethod
def get_config_transformer(cls) -> ConfigTransformer | None:
if cls._config_transformer is None:
cls._config_transformer = TargetTypeConfigTransformer.from_config_schema(
cls.config_schema
)
return cls._config_transformer
@staticmethod
def execute(
job: WorkflowEventData,
action: Action,
detector: Detector,
) -> None:
execute_via_group_type_registry(job, action, detector)
| EmailActionHandler |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 12585,
"end": 12753
} | class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = ("UPDATED_AT",)
| IssueCommentOrderField |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/prefect_databricks/models/jobs.py | {
"start": 21427,
"end": 21708
} | class ____(BaseModel):
"""
See source code for the fields' description.
"""
model_config = ConfigDict(extra="allow", frozen=True)
destination: Optional[str] = Field(
None, description="File destination. Example: `file:/my/file.sh`"
)
| FileStorageInfo |
python | PrefectHQ__prefect | tests/input/test_run_input.py | {
"start": 860,
"end": 24170
} | class ____(RunInput):
city: str
state: str
def test_keyset_from_base_key():
keyset = keyset_from_base_key("person")
assert keyset["response"] == "person-response"
assert keyset["schema"] == "person-schema"
def test_keyset_from_type():
keyset = Person.keyset_from_type()
assert keyset["response"] == "person-response"
assert keyset["schema"] == "person-schema"
@pytest.mark.parametrize(
"state,expected",
[
(Paused(pause_key="1"), keyset_from_base_key("paused-1")),
(Suspended(pause_key="1"), keyset_from_base_key("suspended-1")),
],
)
def test_keyset_from_paused_state(state, expected):
assert keyset_from_paused_state(state) == expected
def test_keyset_from_paused_state_non_paused_state_raises_exception():
with pytest.raises(RuntimeError, match="unsupported"):
keyset_from_paused_state(Running())
async def test_save_stores_schema(flow_run_context):
keyset = keyset_from_base_key("person")
await Person.save(keyset)
schema = await read_flow_run_input(key=keyset["schema"])
assert set(schema["properties"].keys()) == {
"name",
"email",
"human",
}
async def test_save_stores_provided_description(flow_run_context):
keyset = keyset_from_base_key("person")
await Person.with_initial_data(description="Testing").save(keyset)
description = await read_flow_run_input(key=keyset["description"])
assert description == "Testing"
def test_save_works_sync():
@flow
def test_flow():
keyset = keyset_from_base_key("person")
Person.save(keyset)
schema = read_flow_run_input(key=keyset["schema"])
assert set(schema["properties"].keys()) == {
"name",
"email",
"human",
}
test_flow()
async def test_save_explicit_flow_run(flow_run):
keyset = keyset_from_base_key("person")
await Person.save(keyset, flow_run_id=flow_run.id)
schema = await read_flow_run_input(key=keyset["schema"], flow_run_id=flow_run.id)
assert schema is not None
async def test_load(flow_run_context):
keyset = keyset_from_base_key("person")
await create_flow_run_input(
keyset["response"],
value={"name": "Bob", "email": "bob@bob.bob", "human": True},
)
person = await Person.load(keyset)
assert isinstance(person, Person)
assert person.name == "Bob"
assert person.email == "bob@bob.bob"
assert person.human is True
async def test_load_populates_metadata(flow_run_context):
keyset = keyset_from_base_key("person")
await create_flow_run_input(
keyset["response"],
value={"name": "Bob", "email": "bob@bob.bob", "human": True},
)
person = await Person.load(keyset)
assert person.metadata == RunInputMetadata(
key=keyset["response"], receiver=flow_run_context.flow_run.id, sender=None
)
async def test_load_works_sync():
@flow
def test_flow():
keyset = keyset_from_base_key("person")
create_flow_run_input(
keyset["response"],
value={"name": "Bob", "email": "bob@bob.bob", "human": True},
)
person = Person.load(keyset)
assert isinstance(person, Person)
assert person.name == "Bob"
assert person.email == "bob@bob.bob"
assert person.human is True
test_flow()
async def test_load_explicit_flow_run(flow_run):
keyset = keyset_from_base_key("person")
await create_flow_run_input(
keyset["response"],
value={"name": "Bob", "email": "bob@bob.bob", "human": True},
flow_run_id=flow_run.id,
)
person = await Person.load(keyset, flow_run_id=flow_run.id)
assert isinstance(person, Person)
async def test_load_fails_validation_raises_exception(flow_run_context):
keyset = keyset_from_base_key("person")
await create_flow_run_input(
keyset["response"],
value={
"name": "Bob",
"email": "bob@bob.bob",
"human": "123",
}, # Human should be a boolean value.
)
with pytest.raises(pydantic.ValidationError, match="boolean"):
person = await Person.load(keyset)
assert isinstance(person, Person)
async def test_load_from_flow_run_input(flow_run_context):
flow_run_input = FlowRunInput(
flow_run_id=flow_run_context.flow_run.id,
key="person-response",
value=orjson.dumps(
{"name": "Bob", "email": "bob@example.com", "human": True}
).decode(),
sender=f"prefect.flow-run.{uuid4()}",
)
person = Person.load_from_flow_run_input(flow_run_input)
assert person.name == "Bob"
assert person.email == "bob@example.com"
assert person.human is True
assert person.metadata == RunInputMetadata(
key=flow_run_input.key,
receiver=flow_run_context.flow_run.id,
sender=flow_run_input.sender,
)
async def test_with_initial_data(flow_run_context):
keyset = keyset_from_base_key("bob")
name = "Bob"
new_cls = Person.with_initial_data(name=name)
await new_cls.save(keyset)
schema = await read_flow_run_input(key=keyset["schema"])
assert schema["properties"]["name"]["default"] == "Bob"
async def test_run_input_from_type_str(flow_run_context):
new_cls = run_input_subclass_from_type(str)
assert issubclass(new_cls, AutomaticRunInput)
obj = new_cls(value="hey")
assert obj.value == "hey"
async def test_run_input_from_type_basemodel(flow_run_context):
class MyModel(pydantic.BaseModel):
name: str
age: int
new_cls = run_input_subclass_from_type(MyModel)
assert issubclass(new_cls, RunInput)
obj = new_cls(name="Bob", age=42)
assert obj.name == "Bob"
assert obj.age == 42
async def test_respond(flow_run):
flow_run_input = FlowRunInput(
flow_run_id=uuid4(),
key="person-response",
value=orjson.dumps(
{"name": "Bob", "email": "bob@example.com", "human": True}
).decode(),
sender=f"prefect.flow-run.{flow_run.id}",
)
person = Person.load_from_flow_run_input(flow_run_input)
await person.respond(Place(city="New York", state="NY"))
place = await Place.receive(flow_run_id=flow_run.id, timeout=0.1).next()
assert isinstance(place, Place)
assert place.city == "New York"
assert place.state == "NY"
async def test_respond_functions_sync(flow_run):
flow_run_input = FlowRunInput(
flow_run_id=uuid4(),
key="person-response",
value=orjson.dumps(
{"name": "Bob", "email": "bob@example.com", "human": True}
).decode(),
sender=f"prefect.flow-run.{flow_run.id}",
)
person = Person.load_from_flow_run_input(flow_run_input)
@flow
def test_flow():
person.respond(Place(city="New York", state="NY"))
test_flow()
place = await Place.receive(flow_run_id=flow_run.id, timeout=0.1).next()
assert isinstance(place, Place)
assert place.city == "New York"
assert place.state == "NY"
async def test_respond_can_set_sender(flow_run):
flow_run_input = FlowRunInput(
flow_run_id=uuid4(),
key="person-response",
value=orjson.dumps(
{"name": "Bob", "email": "bob@example.com", "human": True}
).decode(),
sender=f"prefect.flow-run.{flow_run.id}",
)
person = Person.load_from_flow_run_input(flow_run_input)
await person.respond(Place(city="New York", state="NY"), sender="sally")
place = await Place.receive(flow_run_id=flow_run.id, timeout=0.1).next()
assert place.metadata.sender == "sally"
async def test_respond_can_set_key_prefix(flow_run):
flow_run_input = FlowRunInput(
flow_run_id=uuid4(),
key="person-response",
value=orjson.dumps(
{"name": "Bob", "email": "bob@example.com", "human": True}
).decode(),
sender=f"prefect.flow-run.{flow_run.id}",
)
person = Person.load_from_flow_run_input(flow_run_input)
await person.respond(Place(city="New York", state="NY"), key_prefix="heythere")
place = await Place.receive(
flow_run_id=flow_run.id, timeout=0.1, key_prefix="heythere"
).next()
assert isinstance(place, Place)
assert place.city == "New York"
assert place.state == "NY"
async def test_respond_raises_exception_no_sender_in_input():
flow_run_input = FlowRunInput(
flow_run_id=uuid4(),
key="person-response",
value=orjson.dumps(
{"name": "Bob", "email": "bob@example.com", "human": True}
).decode(),
sender=None,
)
person = Person.load_from_flow_run_input(flow_run_input)
with pytest.raises(RuntimeError, match="Cannot respond"):
await person.respond(Place(city="New York", state="NY"))
async def test_respond_uses_automatic_input_if_needed(flow_run):
flow_run_input = FlowRunInput(
flow_run_id=uuid4(),
key="person-response",
value=orjson.dumps(
{"name": "Bob", "email": "bob@example.com", "human": True}
).decode(),
sender=f"prefect.flow-run.{flow_run.id}",
)
person = Person.load_from_flow_run_input(flow_run_input)
await person.respond("hey")
message = await receive_input(str, flow_run_id=flow_run.id).next()
assert isinstance(message, str)
assert message == "hey"
async def test_automatic_input_send_to(flow_run):
await send_input(1, flow_run_id=flow_run.id)
received = await receive_input(int, flow_run_id=flow_run.id, timeout=0.1).next()
assert received == 1
async def test_automatic_input_send_to_works_sync(flow_run):
@flow
def test_flow():
send_input(1, flow_run_id=flow_run.id)
test_flow()
receive_iter = receive_input(int, flow_run_id=flow_run.id, timeout=0.1)
received = await receive_iter.next()
assert received == 1
async def test_automatic_input_send_to_can_set_sender(flow_run):
await send_input(1, flow_run_id=flow_run.id, sender="sally")
received = await receive_input(
int, flow_run_id=flow_run.id, timeout=0.1, with_metadata=True
).next()
assert received.metadata.sender == "sally"
async def test_automatic_input_send_to_can_set_key_prefix(flow_run):
await send_input(1, flow_run_id=flow_run.id, sender="sally", key_prefix="heythere")
# Shouldn't work without the key prefix.
with pytest.raises(TimeoutError):
await receive_input(
int, flow_run_id=flow_run.id, timeout=0.1, with_metadata=True
).next()
# Now we should see it.
received = await receive_input(
int,
flow_run_id=flow_run.id,
timeout=0.1,
with_metadata=True,
key_prefix="heythere",
).next()
assert received.metadata.sender == "sally"
async def test_send_to(flow_run):
flow_run_input = FlowRunInput(
flow_run_id=uuid4(),
key="person-response",
value=orjson.dumps(
{"name": "Bob", "email": "bob@example.com", "human": True}
).decode(),
)
person = Person.load_from_flow_run_input(flow_run_input)
await person.send_to(flow_run_id=flow_run.id)
received = await Person.receive(flow_run_id=flow_run.id, timeout=0.1).next()
assert isinstance(received, Person)
assert person.name == "Bob"
assert person.email == "bob@example.com"
assert person.human is True
async def test_send_to_works_sync(flow_run):
flow_run_input = FlowRunInput(
flow_run_id=uuid4(),
key="person-response",
value=orjson.dumps(
{"name": "Bob", "email": "bob@example.com", "human": True}
).decode(),
)
person = Person.load_from_flow_run_input(flow_run_input)
@flow
def test_flow():
person.send_to(flow_run_id=flow_run.id)
test_flow()
received = await Person.receive(flow_run_id=flow_run.id, timeout=0.1).next()
assert isinstance(received, Person)
assert person.name == "Bob"
assert person.email == "bob@example.com"
assert person.human is True
async def test_send_to_can_set_sender(flow_run):
flow_run_input = FlowRunInput(
flow_run_id=uuid4(),
key="person-response",
value=orjson.dumps(
{"name": "Bob", "email": "bob@example.com", "human": True}
).decode(),
)
person = Person.load_from_flow_run_input(flow_run_input)
await person.send_to(flow_run_id=flow_run.id, sender="sally")
received = await Person.receive(flow_run_id=flow_run.id, timeout=0.1).next()
assert received.metadata.sender == "sally"
async def test_send_to_can_set_key_prefix(flow_run):
flow_run_input = FlowRunInput(
flow_run_id=uuid4(),
key="person-response",
value=orjson.dumps(
{"name": "Bob", "email": "bob@example.com", "human": True}
).decode(),
)
person = Person.load_from_flow_run_input(flow_run_input)
await person.send_to(flow_run_id=flow_run.id, key_prefix="heythere")
received = await Person.receive(
flow_run_id=flow_run.id, timeout=0.1, key_prefix="heythere"
).next()
assert isinstance(received, Person)
assert person.name == "Bob"
assert person.email == "bob@example.com"
assert person.human is True
async def test_automatic_input_can_receive_metadata(flow_run):
await send_input(1, flow_run_id=flow_run.id)
received = await receive_input(
int, flow_run_id=flow_run.id, timeout=0.1, with_metadata=True
).next()
assert received.value == 1
async def test_automatic_input_can_receive_without_metadata(flow_run):
await send_input(1, flow_run_id=flow_run.id)
received = await receive_input(int, flow_run_id=flow_run.id, timeout=0.1).next()
assert received == 1
async def test_automatic_input_receive_multiple_values(flow_run):
async def send():
for city in [("New York", "NY"), ("Boston", "MA"), ("Chicago", "IL")]:
await send_input(city, flow_run_id=flow_run.id)
async def receive():
received = []
async for city in receive_input(
Tuple[str, str], flow_run_id=flow_run.id, timeout=1, poll_interval=0.1
):
received.append(city)
return received
await send()
received = await receive()
assert len(received) == 3
assert all(isinstance(city, tuple) for city in received)
assert set(received) == {
("New York", "NY"),
("Boston", "MA"),
("Chicago", "IL"),
}
async def test_automatic_input_receive_works_sync(flow_run):
for city in [("New York", "NY"), ("Boston", "MA"), ("Chicago", "IL")]:
await send_input(city, flow_run_id=flow_run.id)
received = []
@flow
def test_flow():
for city in receive_input(
Tuple[str, str], flow_run_id=flow_run.id, timeout=5, poll_interval=0.1
):
received.append(city)
test_flow()
assert len(received) == 3
assert all(isinstance(city, tuple) for city in received)
assert set(received) == {
("New York", "NY"),
("Boston", "MA"),
("Chicago", "IL"),
}
async def test_automatic_input_receive_with_exclude_keys(flow_run):
for city in [("New York", "NY"), ("Boston", "MA"), ("Chicago", "IL")]:
await send_input(city, flow_run_id=flow_run.id)
# Receive the cities that were sent.
received = []
async for city in receive_input(
Tuple[str, str], flow_run_id=flow_run.id, timeout=5, poll_interval=0.1
):
received.append(city)
assert len(received) == 3
# Send a new city
await send_input(("Los Angeles", "CA"), flow_run_id=flow_run.id)
# Since this receive is being called without exclude_keys, it will receive
# all of the cities that have been sent.
received = []
async for city in receive_input(
Tuple[str, str],
flow_run_id=flow_run.id,
timeout=5,
poll_interval=0.1,
with_metadata=True,
):
received.append(city)
assert len(received) == 4
# If we send another new city and receive excluding the keys that have
# been previously received, we should only receive the new city.
exclude_keys = {city.metadata.key for city in received}
await send_input(("Portland", "OR"), flow_run_id=flow_run.id)
received = []
async for city in receive_input(
Tuple[str, str], flow_run_id=flow_run.id, timeout=0, exclude_keys=exclude_keys
):
received.append(city)
assert len(received) == 1
city = received[0]
assert city[0] == "Portland"
assert city[1] == "OR"
async def test_automatic_input_receive_can_can_raise_timeout_errors_as_generator(
flow_run,
):
with pytest.raises(TimeoutError):
async for _ in receive_input(
int,
flow_run_id=flow_run.id,
timeout=0,
poll_interval=0.1,
# Normally the loop would just exit, but this causes it to raise
# when it doesn't receive a value for `timeout` seconds.
raise_timeout_error=True,
):
pass
async def test_automatic_input_receive_can_can_raise_timeout_errors_as_generator_sync(
flow_run,
):
with pytest.raises(TimeoutError):
async for _ in receive_input(
int,
flow_run_id=flow_run.id,
timeout=0,
poll_interval=0.1,
# Normally the loop would just exit, but this causes it to raise
# when it doesn't receive a value for `timeout` seconds.
raise_timeout_error=True,
):
pass
async def test_automatic_input_receive_run_input_subclass(flow_run):
await send_input(Place(city="New York", state="NY"), flow_run_id=flow_run.id)
received = await receive_input(Place, flow_run_id=flow_run.id, timeout=0).next()
assert received.city == "New York"
assert received.state == "NY"
async def test_receive(flow_run):
async def send():
for city, state in [("New York", "NY"), ("Boston", "MA"), ("Chicago", "IL")]:
await Place(city=city, state=state).send_to(flow_run_id=flow_run.id)
async def receive():
received = []
async for place in Place.receive(
flow_run_id=flow_run.id, timeout=1, poll_interval=0.1
):
received.append(place)
return received
await send()
received = await receive()
assert len(received) == 3
assert all(isinstance(place, Place) for place in received)
assert {(place.city, place.state) for place in received} == {
("New York", "NY"),
("Boston", "MA"),
("Chicago", "IL"),
}
async def test_receive_works_sync(flow_run):
for city, state in [("New York", "NY"), ("Boston", "MA"), ("Chicago", "IL")]:
await Place(city=city, state=state).send_to(flow_run_id=flow_run.id)
received = []
@flow
def test_flow():
for place in Place.receive(
flow_run_id=flow_run.id, timeout=5, poll_interval=0.1
):
received.append(place)
test_flow()
assert len(received) == 3
assert all(isinstance(place, Place) for place in received)
assert {(place.city, place.state) for place in received} == {
("New York", "NY"),
("Boston", "MA"),
("Chicago", "IL"),
}
async def test_receive_with_exclude_keys(flow_run):
for city, state in [("New York", "NY"), ("Boston", "MA"), ("Chicago", "IL")]:
await Place(city=city, state=state).send_to(flow_run_id=flow_run.id)
# Receive the places that were sent.
received = []
async for place in Place.receive(flow_run_id=flow_run.id, timeout=0):
received.append(place)
assert len(received) == 3
# Send a new place
await Place(city="Los Angeles", state="CA").send_to(flow_run_id=flow_run.id)
# Since this receive is being called without exclude_keys, it will receive
# all of the places that have been sent.
received = []
async for place in Place.receive(flow_run_id=flow_run.id, timeout=0):
received.append(place)
assert len(received) == 4
# Lets send another new place, and receive excluding the keys that have
# been previously received and we should only receive the new place.
exclude_keys = {place.metadata.key for place in received}
await Place(city="Portland", state="OR").send_to(flow_run_id=flow_run.id)
received = []
async for place in Place.receive(
flow_run_id=flow_run.id, timeout=0, exclude_keys=exclude_keys
):
received.append(place)
assert len(received) == 1
place = received[0]
assert place.city == "Portland"
assert place.state == "OR"
async def test_receive_can_raise_timeout_errors_as_generator(flow_run):
with pytest.raises(TimeoutError):
async for _ in Place.receive(
flow_run_id=flow_run.id,
timeout=0,
poll_interval=0.1,
# Normally the loop would just exit, but this causes it to raise
# when it doesn't receive a value for `timeout` seconds.
raise_timeout_error=True,
):
pass
def test_receive_can_raise_timeout_errors_as_generator_sync(flow_run):
with pytest.raises(TimeoutError):
@flow
def test_flow():
for _ in Place.receive(
flow_run_id=flow_run.id,
timeout=0,
poll_interval=0.1,
# Normally the loop would just exit, but this causes it to raise
# when it doesn't receive a value for `timeout` seconds.
raise_timeout_error=True,
):
pass
test_flow()
def test_with_initial_data_preserves_optional_type_annotations():
"""Test that with_initial_data preserves union types like str | None."""
class FormInput(RunInput):
"""A form with optional fields."""
name: Union[str, None] = None
email: Union[str, None] = None
age: Union[int, None] = None
# Create a model with initial data
initial_data = {"name": "Alice", "email": "alice@example.com", "age": 30}
PrepopulatedForm = FormInput.with_initial_data(**initial_data)
# Check that the original field annotations are preserved
assert PrepopulatedForm.model_fields["name"].annotation == Union[str, None]
assert PrepopulatedForm.model_fields["email"].annotation == Union[str, None]
assert PrepopulatedForm.model_fields["age"].annotation == Union[int, None]
# Verify that we can create instances with the initial data
instance = PrepopulatedForm()
assert instance.name == "Alice"
assert instance.email == "alice@example.com"
assert instance.age == 30
# Verify that we can override with None (clearing the field)
instance_with_none = PrepopulatedForm(name=None, email=None, age=None)
assert instance_with_none.name is None
assert instance_with_none.email is None
assert instance_with_none.age is None
# Verify that we can override with other values
instance_overridden = PrepopulatedForm(name="Bob", email="bob@example.com", age=25)
assert instance_overridden.name == "Bob"
assert instance_overridden.email == "bob@example.com"
assert instance_overridden.age == 25
| Place |
python | getsentry__sentry | tests/sentry/workflow_engine/endpoints/test_organization_alertrule_detector.py | {
"start": 229,
"end": 1590
} | class ____(APITestCase):
endpoint = "sentry-api-0-organization-alert-rule-detector-index"
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.project = self.create_project(organization=self.organization)
self.detector_1 = self.create_detector(project=self.project)
self.detector_2 = self.create_detector(project=self.project)
self.detector_3 = self.create_detector(project=self.project)
self.alert_rule_detector_1 = self.create_alert_rule_detector(
alert_rule_id=12345, detector=self.detector_1
)
self.alert_rule_detector_2 = self.create_alert_rule_detector(
rule_id=67890, detector=self.detector_2
)
self.alert_rule_detector_3 = self.create_alert_rule_detector(
alert_rule_id=11111, detector=self.detector_3
)
# Create detector in different organization to test filtering
self.other_org = self.create_organization()
self.other_project = self.create_project(organization=self.other_org)
self.other_detector = self.create_detector(project=self.other_project)
self.other_alert_rule_detector = self.create_alert_rule_detector(
alert_rule_id=99999, detector=self.other_detector
)
@region_silo_test
| OrganizationAlertRuleDetectorAPITestCase |
python | langchain-ai__langchain | libs/core/langchain_core/document_loaders/langsmith.py | {
"start": 351,
"end": 5258
} | class ____(BaseLoader):
"""Load LangSmith Dataset examples as `Document` objects.
Loads the example inputs as the `Document` page content and places the entire
example into the `Document` metadata. This allows you to easily create few-shot
example retrievers from the loaded documents.
??? note "Lazy loading example"
```python
from langchain_core.document_loaders import LangSmithLoader
loader = LangSmithLoader(dataset_id="...", limit=100)
docs = []
for doc in loader.lazy_load():
docs.append(doc)
```
```python
# -> [Document("...", metadata={"inputs": {...}, "outputs": {...}, ...}), ...]
```
"""
def __init__(
self,
*,
dataset_id: uuid.UUID | str | None = None,
dataset_name: str | None = None,
example_ids: Sequence[uuid.UUID | str] | None = None,
as_of: datetime.datetime | str | None = None,
splits: Sequence[str] | None = None,
inline_s3_urls: bool = True,
offset: int = 0,
limit: int | None = None,
metadata: dict | None = None,
filter: str | None = None, # noqa: A002
content_key: str = "",
format_content: Callable[..., str] | None = None,
client: LangSmithClient | None = None,
**client_kwargs: Any,
) -> None:
"""Create a LangSmith loader.
Args:
dataset_id: The ID of the dataset to filter by.
dataset_name: The name of the dataset to filter by.
content_key: The inputs key to set as Document page content. `'.'` characters
are interpreted as nested keys. E.g. `content_key="first.second"` will
result in
`Document(page_content=format_content(example.inputs["first"]["second"]))`
format_content: Function for converting the content extracted from the example
inputs into a string. Defaults to JSON-encoding the contents.
example_ids: The IDs of the examples to filter by.
as_of: The dataset version tag or timestamp to retrieve the examples as of.
Response examples will only be those that were present at the time of
the tagged (or timestamped) version.
splits: A list of dataset splits, which are
divisions of your dataset such as `train`, `test`, or `validation`.
Returns examples only from the specified splits.
inline_s3_urls: Whether to inline S3 URLs.
offset: The offset to start from.
limit: The maximum number of examples to return.
metadata: Metadata to filter by.
filter: A structured filter string to apply to the examples.
client: LangSmith Client. If not provided will be initialized from below args.
client_kwargs: Keyword args to pass to LangSmith client init. Should only be
specified if `client` isn't.
Raises:
ValueError: If both `client` and `client_kwargs` are provided.
""" # noqa: E501
if client and client_kwargs:
raise ValueError
self._client = client or LangSmithClient(**client_kwargs)
self.content_key = list(content_key.split(".")) if content_key else []
self.format_content = format_content or _stringify
self.dataset_id = dataset_id
self.dataset_name = dataset_name
self.example_ids = example_ids
self.as_of = as_of
self.splits = splits
self.inline_s3_urls = inline_s3_urls
self.offset = offset
self.limit = limit
self.metadata = metadata
self.filter = filter
@override
def lazy_load(self) -> Iterator[Document]:
for example in self._client.list_examples(
dataset_id=self.dataset_id,
dataset_name=self.dataset_name,
example_ids=self.example_ids,
as_of=self.as_of,
splits=self.splits,
inline_s3_urls=self.inline_s3_urls,
offset=self.offset,
limit=self.limit,
metadata=self.metadata,
filter=self.filter,
):
content: Any = example.inputs
for key in self.content_key:
content = content[key]
content_str = self.format_content(content)
metadata = example.dict()
# Stringify datetime and UUID types.
for k in ("dataset_id", "created_at", "modified_at", "source_run_id", "id"):
metadata[k] = str(metadata[k]) if metadata[k] else metadata[k]
yield Document(content_str, metadata=metadata)
def _stringify(x: str | dict) -> str:
if isinstance(x, str):
return x
try:
return json.dumps(x, indent=2)
except Exception:
return str(x)
| LangSmithLoader |
python | numba__numba | numba/tests/test_ufuncs.py | {
"start": 63076,
"end": 63576
} | class ____(_LoopTypesTester):
_ufuncs = supported_ufuncs[:]
if iswindows:
_ufuncs.remove(np.signbit) # TODO: fix issue #758
_ufuncs.remove(np.floor_divide) # has its own test class
_ufuncs.remove(np.remainder) # has its own test class
_ufuncs.remove(np.divmod) # has its own test class
_ufuncs.remove(np.mod) # same as np.remainder
_required_types = 'fd'
_skip_types = 'FDmMO' + _LoopTypesTester._skip_types
TestLoopTypesFloat.autogenerate()
| TestLoopTypesFloat |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/sensors/test_kinesis_analytics.py | {
"start": 3807,
"end": 6427
} | class ____:
SENSOR = KinesisAnalyticsV2StopApplicationCompletedSensor
APPLICATION_ARN = "arn:aws:kinesisanalytics:us-east-1:123456789012:application/demo"
def setup_method(self):
self.default_op_kwargs = dict(
task_id="stop_application_sensor",
application_name="demo",
poke_interval=5,
max_retries=1,
)
self.sensor = self.SENSOR(**self.default_op_kwargs, aws_conn_id=None)
def test_base_aws_op_attributes(self):
op = self.SENSOR(**self.default_op_kwargs)
assert op.hook.aws_conn_id == "aws_default"
assert op.hook._region_name is None
assert op.hook._verify is None
assert op.hook._config is None
op = self.SENSOR(
**self.default_op_kwargs,
aws_conn_id="aws-test-custom-conn",
region_name="eu-west-1",
verify=False,
botocore_config={"read_timeout": 42},
)
assert op.hook.aws_conn_id == "aws-test-custom-conn"
assert op.hook._region_name == "eu-west-1"
assert op.hook._verify is False
assert op.hook._config is not None
assert op.hook._config.read_timeout == 42
@pytest.mark.parametrize("state", SENSOR.SUCCESS_STATES)
@mock.patch.object(KinesisAnalyticsV2Hook, "conn")
def test_poke_success_state(self, mock_conn, state):
mock_conn.describe_application.return_value = {
"ApplicationDetail": {"ApplicationARN": self.APPLICATION_ARN, "ApplicationStatus": state}
}
assert self.sensor.poke({}) is True
@pytest.mark.parametrize("state", SENSOR.INTERMEDIATE_STATES)
@mock.patch.object(KinesisAnalyticsV2Hook, "conn")
def test_intermediate_state(self, mock_conn, state):
mock_conn.describe_application.return_value = {
"ApplicationDetail": {"ApplicationARN": self.APPLICATION_ARN, "ApplicationStatus": state}
}
assert self.sensor.poke({}) is False
@pytest.mark.parametrize("state", SENSOR.FAILURE_STATES)
@mock.patch.object(KinesisAnalyticsV2Hook, "conn")
def test_poke_failure_states(self, mock_conn, state):
mock_conn.describe_application.return_value = {
"ApplicationDetail": {"ApplicationARN": self.APPLICATION_ARN, "ApplicationStatus": state}
}
sensor = self.SENSOR(**self.default_op_kwargs, aws_conn_id=None)
with pytest.raises(
AirflowException, match="AWS Managed Service for Apache Flink application stop failed"
):
sensor.poke({})
| TestKinesisAnalyticsV2StopApplicationCompletedSensor |
python | Textualize__rich | rich/repr.py | {
"start": 315,
"end": 4419
} | class ____(Exception):
"""An error occurred when attempting to build a repr."""
@overload
def auto(cls: Optional[Type[T]]) -> Type[T]:
...
@overload
def auto(*, angular: bool = False) -> Callable[[Type[T]], Type[T]]:
...
def auto(
cls: Optional[Type[T]] = None, *, angular: Optional[bool] = None
) -> Union[Type[T], Callable[[Type[T]], Type[T]]]:
"""Class decorator to create __repr__ from __rich_repr__"""
def do_replace(cls: Type[T], angular: Optional[bool] = None) -> Type[T]:
def auto_repr(self: T) -> str:
"""Create repr string from __rich_repr__"""
repr_str: List[str] = []
append = repr_str.append
angular: bool = getattr(self.__rich_repr__, "angular", False) # type: ignore[attr-defined]
for arg in self.__rich_repr__(): # type: ignore[attr-defined]
if isinstance(arg, tuple):
if len(arg) == 1:
append(repr(arg[0]))
else:
key, value, *default = arg
if key is None:
append(repr(value))
else:
if default and default[0] == value:
continue
append(f"{key}={value!r}")
else:
append(repr(arg))
if angular:
return f"<{self.__class__.__name__} {' '.join(repr_str)}>"
else:
return f"{self.__class__.__name__}({', '.join(repr_str)})"
def auto_rich_repr(self: Type[T]) -> Result:
"""Auto generate __rich_rep__ from signature of __init__"""
try:
signature = inspect.signature(self.__init__)
for name, param in signature.parameters.items():
if param.kind == param.POSITIONAL_ONLY:
yield getattr(self, name)
elif param.kind in (
param.POSITIONAL_OR_KEYWORD,
param.KEYWORD_ONLY,
):
if param.default is param.empty:
yield getattr(self, param.name)
else:
yield param.name, getattr(self, param.name), param.default
except Exception as error:
raise ReprError(
f"Failed to auto generate __rich_repr__; {error}"
) from None
if not hasattr(cls, "__rich_repr__"):
auto_rich_repr.__doc__ = "Build a rich repr"
cls.__rich_repr__ = auto_rich_repr # type: ignore[attr-defined]
auto_repr.__doc__ = "Return repr(self)"
cls.__repr__ = auto_repr # type: ignore[assignment]
if angular is not None:
cls.__rich_repr__.angular = angular # type: ignore[attr-defined]
return cls
if cls is None:
return partial(do_replace, angular=angular)
else:
return do_replace(cls, angular=angular)
@overload
def rich_repr(cls: Optional[Type[T]]) -> Type[T]:
...
@overload
def rich_repr(*, angular: bool = False) -> Callable[[Type[T]], Type[T]]:
...
def rich_repr(
cls: Optional[Type[T]] = None, *, angular: bool = False
) -> Union[Type[T], Callable[[Type[T]], Type[T]]]:
if cls is None:
return auto(angular=angular)
else:
return auto(cls)
if __name__ == "__main__":
@auto
class Foo:
def __rich_repr__(self) -> Result:
yield "foo"
yield "bar", {"shopping": ["eggs", "ham", "pineapple"]}
yield "buy", "hand sanitizer"
foo = Foo()
from rich.console import Console
console = Console()
console.rule("Standard repr")
console.print(foo)
console.print(foo, width=60)
console.print(foo, width=30)
console.rule("Angular repr")
Foo.__rich_repr__.angular = True # type: ignore[attr-defined]
console.print(foo)
console.print(foo, width=60)
console.print(foo, width=30)
| ReprError |
python | django__django | tests/queries/test_qs_combinators.py | {
"start": 622,
"end": 32212
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
Number.objects.bulk_create(Number(num=i, other_num=10 - i) for i in range(10))
def assertNumbersEqual(self, queryset, expected_numbers, ordered=True):
self.assertQuerySetEqual(
queryset, expected_numbers, operator.attrgetter("num"), ordered
)
def test_simple_union(self):
qs1 = Number.objects.filter(num__lte=1)
qs2 = Number.objects.filter(num__gte=8)
qs3 = Number.objects.filter(num=5)
self.assertNumbersEqual(qs1.union(qs2, qs3), [0, 1, 5, 8, 9], ordered=False)
@skipUnlessDBFeature("supports_select_intersection")
def test_simple_intersection(self):
qs1 = Number.objects.filter(num__lte=5)
qs2 = Number.objects.filter(num__gte=5)
qs3 = Number.objects.filter(num__gte=4, num__lte=6)
self.assertNumbersEqual(qs1.intersection(qs2, qs3), [5], ordered=False)
@skipUnlessDBFeature("supports_select_intersection")
def test_intersection_with_values(self):
ReservedName.objects.create(name="a", order=2)
qs1 = ReservedName.objects.all()
reserved_name = qs1.intersection(qs1).values("name", "order", "id").get()
self.assertEqual(reserved_name["name"], "a")
self.assertEqual(reserved_name["order"], 2)
reserved_name = qs1.intersection(qs1).values_list("name", "order", "id").get()
self.assertEqual(reserved_name[:2], ("a", 2))
@skipUnlessDBFeature("supports_select_difference")
def test_simple_difference(self):
qs1 = Number.objects.filter(num__lte=5)
qs2 = Number.objects.filter(num__lte=4)
self.assertNumbersEqual(qs1.difference(qs2), [5], ordered=False)
def test_union_distinct(self):
qs1 = Number.objects.all()
qs2 = Number.objects.all()
self.assertEqual(len(list(qs1.union(qs2, all=True))), 20)
self.assertEqual(len(list(qs1.union(qs2))), 10)
def test_union_none(self):
qs1 = Number.objects.filter(num__lte=1)
qs2 = Number.objects.filter(num__gte=8)
qs3 = qs1.union(qs2)
self.assertSequenceEqual(qs3.none(), [])
self.assertNumbersEqual(qs3, [0, 1, 8, 9], ordered=False)
def test_union_none_slice(self):
qs1 = Number.objects.filter(num__lte=0)
qs2 = Number.objects.none()
qs3 = qs1.union(qs2)
self.assertNumbersEqual(qs3[:1], [0])
def test_union_empty_slice(self):
qs = Number.objects.union()
self.assertNumbersEqual(qs[:1], [0])
qs = Number.objects.union(all=True)
self.assertNumbersEqual(qs[:1], [0])
self.assertNumbersEqual(qs.order_by("num")[0:], list(range(0, 10)))
def test_union_all_none_slice(self):
qs = Number.objects.filter(id__in=[])
with self.assertNumQueries(0):
self.assertSequenceEqual(qs.union(qs), [])
self.assertSequenceEqual(qs.union(qs)[0:0], [])
def test_union_empty_filter_slice(self):
qs1 = Number.objects.filter(num__lte=0)
qs2 = Number.objects.filter(pk__in=[])
qs3 = qs1.union(qs2)
self.assertNumbersEqual(qs3[:1], [0])
@skipUnlessDBFeature("supports_slicing_ordering_in_compound")
def test_union_slice_compound_empty(self):
qs1 = Number.objects.filter(num__lte=0)[:1]
qs2 = Number.objects.none()
qs3 = qs1.union(qs2)
self.assertNumbersEqual(qs3[:1], [0])
@skipUnlessDBFeature("supports_slicing_ordering_in_compound")
def test_union_combined_slice_compound_empty(self):
qs1 = Number.objects.filter(num__lte=2)[:3]
qs2 = Number.objects.none()
qs3 = qs1.union(qs2)
self.assertNumbersEqual(qs3.order_by("num")[2:3], [2])
def test_union_slice_index(self):
Celebrity.objects.create(name="Famous")
c1 = Celebrity.objects.create(name="Very famous")
qs1 = Celebrity.objects.filter(name="nonexistent")
qs2 = Celebrity.objects.all()
combined_qs = qs1.union(qs2).order_by("name")
self.assertEqual(combined_qs[1], c1)
def test_union_order_with_null_first_last(self):
Number.objects.filter(other_num=5).update(other_num=None)
qs1 = Number.objects.filter(num__lte=1)
qs2 = Number.objects.filter(num__gte=2)
qs3 = qs1.union(qs2)
self.assertSequenceEqual(
qs3.order_by(
F("other_num").asc(nulls_first=True),
).values_list("other_num", flat=True),
[None, 1, 2, 3, 4, 6, 7, 8, 9, 10],
)
self.assertSequenceEqual(
qs3.order_by(
F("other_num").asc(nulls_last=True),
).values_list("other_num", flat=True),
[1, 2, 3, 4, 6, 7, 8, 9, 10, None],
)
def test_union_nested(self):
qs1 = Number.objects.all()
qs2 = qs1.union(qs1)
self.assertNumbersEqual(
qs1.union(qs2),
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
ordered=False,
)
@skipUnlessDBFeature("supports_select_intersection")
def test_intersection_with_empty_qs(self):
qs1 = Number.objects.all()
qs2 = Number.objects.none()
qs3 = Number.objects.filter(pk__in=[])
self.assertEqual(len(qs1.intersection(qs2)), 0)
self.assertEqual(len(qs1.intersection(qs3)), 0)
self.assertEqual(len(qs2.intersection(qs1)), 0)
self.assertEqual(len(qs3.intersection(qs1)), 0)
self.assertEqual(len(qs2.intersection(qs2)), 0)
self.assertEqual(len(qs3.intersection(qs3)), 0)
@skipUnlessDBFeature("supports_select_difference")
def test_difference_with_empty_qs(self):
qs1 = Number.objects.all()
qs2 = Number.objects.none()
qs3 = Number.objects.filter(pk__in=[])
self.assertEqual(len(qs1.difference(qs2)), 10)
self.assertEqual(len(qs1.difference(qs3)), 10)
self.assertEqual(len(qs2.difference(qs1)), 0)
self.assertEqual(len(qs3.difference(qs1)), 0)
self.assertEqual(len(qs2.difference(qs2)), 0)
self.assertEqual(len(qs3.difference(qs3)), 0)
@skipUnlessDBFeature("supports_select_difference")
def test_difference_with_values(self):
ReservedName.objects.create(name="a", order=2)
qs1 = ReservedName.objects.all()
qs2 = ReservedName.objects.none()
reserved_name = qs1.difference(qs2).values("name", "order", "id").get()
self.assertEqual(reserved_name["name"], "a")
self.assertEqual(reserved_name["order"], 2)
reserved_name = qs1.difference(qs2).values_list("name", "order", "id").get()
self.assertEqual(reserved_name[:2], ("a", 2))
def test_union_with_empty_qs(self):
qs1 = Number.objects.all()
qs2 = Number.objects.none()
qs3 = Number.objects.filter(pk__in=[])
self.assertEqual(len(qs1.union(qs2)), 10)
self.assertEqual(len(qs2.union(qs1)), 10)
self.assertEqual(len(qs1.union(qs3)), 10)
self.assertEqual(len(qs3.union(qs1)), 10)
self.assertEqual(len(qs2.union(qs1, qs1, qs1)), 10)
self.assertEqual(len(qs2.union(qs1, qs1, all=True)), 20)
self.assertEqual(len(qs2.union(qs2)), 0)
self.assertEqual(len(qs3.union(qs3)), 0)
def test_empty_qs_union_with_ordered_qs(self):
qs1 = Number.objects.order_by("num")
qs2 = Number.objects.none().union(qs1).order_by("num")
self.assertEqual(list(qs1), list(qs2))
def test_limits(self):
qs1 = Number.objects.all()
qs2 = Number.objects.all()
self.assertEqual(len(list(qs1.union(qs2)[:2])), 2)
def test_ordering(self):
qs1 = Number.objects.filter(num__lte=1)
qs2 = Number.objects.filter(num__gte=2, num__lte=3)
self.assertNumbersEqual(qs1.union(qs2).order_by("-num"), [3, 2, 1, 0])
def test_ordering_by_alias(self):
qs1 = Number.objects.filter(num__lte=1).values(alias=F("num"))
qs2 = Number.objects.filter(num__gte=2, num__lte=3).values(alias=F("num"))
self.assertQuerySetEqual(
qs1.union(qs2).order_by("-alias"),
[3, 2, 1, 0],
operator.itemgetter("alias"),
)
def test_ordering_by_f_expression(self):
qs1 = Number.objects.filter(num__lte=1)
qs2 = Number.objects.filter(num__gte=2, num__lte=3)
self.assertNumbersEqual(qs1.union(qs2).order_by(F("num").desc()), [3, 2, 1, 0])
def test_ordering_by_f_expression_and_alias(self):
qs1 = Number.objects.filter(num__lte=1).values(alias=F("other_num"))
qs2 = Number.objects.filter(num__gte=2, num__lte=3).values(alias=F("other_num"))
self.assertQuerySetEqual(
qs1.union(qs2).order_by(F("alias").desc()),
[10, 9, 8, 7],
operator.itemgetter("alias"),
)
Number.objects.create(num=-1)
self.assertQuerySetEqual(
qs1.union(qs2).order_by(F("alias").desc(nulls_last=True)),
[10, 9, 8, 7, None],
operator.itemgetter("alias"),
)
def test_union_with_values(self):
ReservedName.objects.create(name="a", order=2)
qs1 = ReservedName.objects.all()
reserved_name = qs1.union(qs1).values("name", "order", "id").get()
self.assertEqual(reserved_name["name"], "a")
self.assertEqual(reserved_name["order"], 2)
reserved_name = qs1.union(qs1).values_list("name", "order", "id").get()
self.assertEqual(reserved_name[:2], ("a", 2))
# List of columns can be changed.
reserved_name = qs1.union(qs1).values_list("order").get()
self.assertEqual(reserved_name, (2,))
def test_union_with_two_annotated_values_list(self):
qs1 = (
Number.objects.filter(num=1)
.annotate(
count=Value(0, IntegerField()),
)
.values_list("num", "count")
)
qs2 = (
Number.objects.filter(num=2)
.values("pk")
.annotate(
count=F("num"),
)
.annotate(
num=Value(1, IntegerField()),
)
.values_list("num", "count")
)
self.assertCountEqual(qs1.union(qs2), [(1, 0), (1, 2)])
def test_union_with_field_and_annotation_values(self):
qs1 = (
Number.objects.filter(num=1)
.annotate(
zero=Value(0, IntegerField()),
)
.values_list("num", "zero")
)
qs2 = (
Number.objects.filter(num=2)
.annotate(
zero=Value(0, IntegerField()),
)
.values_list("zero", "num")
)
self.assertCountEqual(qs1.union(qs2), [(1, 0), (0, 2)])
def test_union_with_extra_and_values_list(self):
qs1 = (
Number.objects.filter(num=1)
.extra(
select={"count": 0},
)
.values_list("num", "count")
)
qs2 = Number.objects.filter(num=2).extra(select={"count": 1})
self.assertCountEqual(qs1.union(qs2), [(1, 0), (2, 1)])
def test_union_with_values_list_on_annotated_and_unannotated(self):
ReservedName.objects.create(name="rn1", order=1)
qs1 = Number.objects.annotate(
has_reserved_name=Exists(ReservedName.objects.filter(order=OuterRef("num")))
).filter(has_reserved_name=True)
qs2 = Number.objects.filter(num=9)
self.assertCountEqual(qs1.union(qs2).values_list("num", flat=True), [1, 9])
def test_union_with_values_list_and_order(self):
ReservedName.objects.bulk_create(
[
ReservedName(name="rn1", order=7),
ReservedName(name="rn2", order=5),
ReservedName(name="rn0", order=6),
ReservedName(name="rn9", order=-1),
]
)
qs1 = ReservedName.objects.filter(order__gte=6)
qs2 = ReservedName.objects.filter(order__lte=5)
union_qs = qs1.union(qs2)
for qs, expected_result in (
# Order by a single column.
(union_qs.order_by("-pk").values_list("order", flat=True), [-1, 6, 5, 7]),
(union_qs.order_by("pk").values_list("order", flat=True), [7, 5, 6, -1]),
(union_qs.values_list("order", flat=True).order_by("-pk"), [-1, 6, 5, 7]),
(union_qs.values_list("order", flat=True).order_by("pk"), [7, 5, 6, -1]),
# Order by multiple columns.
(
union_qs.order_by("-name", "pk").values_list("order", flat=True),
[-1, 5, 7, 6],
),
(
union_qs.values_list("order", flat=True).order_by("-name", "pk"),
[-1, 5, 7, 6],
),
):
with self.subTest(qs=qs):
self.assertEqual(list(qs), expected_result)
def test_union_with_values_list_and_order_on_annotation(self):
qs1 = Number.objects.annotate(
annotation=Value(-1),
multiplier=F("annotation"),
).filter(num__gte=6)
qs2 = Number.objects.annotate(
annotation=Value(2),
multiplier=F("annotation"),
).filter(num__lte=5)
self.assertSequenceEqual(
qs1.union(qs2).order_by("annotation", "num").values_list("num", flat=True),
[6, 7, 8, 9, 0, 1, 2, 3, 4, 5],
)
self.assertQuerySetEqual(
qs1.union(qs2)
.order_by(
F("annotation") * F("multiplier"),
"num",
)
.values("num"),
[6, 7, 8, 9, 0, 1, 2, 3, 4, 5],
operator.itemgetter("num"),
)
def test_order_by_annotation_transform(self):
class Mod2(Mod, Transform):
def __init__(self, expr):
super().__init__(expr, 2)
output_field = IntegerField()
output_field.register_lookup(Mod2, "mod2")
qs1 = Number.objects.annotate(
annotation=Value(1, output_field=output_field),
)
qs2 = Number.objects.annotate(
annotation=Value(2, output_field=output_field),
)
msg = "Ordering combined queries by transforms is not implemented."
with self.assertRaisesMessage(NotImplementedError, msg):
list(qs1.union(qs2).order_by("annotation__mod2"))
def test_union_with_select_related_and_order(self):
e1 = ExtraInfo.objects.create(value=7, info="e1")
a1 = Author.objects.create(name="a1", num=1, extra=e1)
a2 = Author.objects.create(name="a2", num=3, extra=e1)
Author.objects.create(name="a3", num=2, extra=e1)
base_qs = Author.objects.select_related("extra").order_by()
qs1 = base_qs.filter(name="a1")
qs2 = base_qs.filter(name="a2")
self.assertSequenceEqual(qs1.union(qs2).order_by("pk"), [a1, a2])
@skipUnlessDBFeature("supports_slicing_ordering_in_compound")
def test_union_with_select_related_and_first(self):
e1 = ExtraInfo.objects.create(value=7, info="e1")
a1 = Author.objects.create(name="a1", num=1, extra=e1)
Author.objects.create(name="a2", num=3, extra=e1)
base_qs = Author.objects.select_related("extra").order_by()
qs1 = base_qs.filter(name="a1")
qs2 = base_qs.filter(name="a2")
self.assertEqual(qs1.union(qs2).order_by("name").first(), a1)
def test_union_with_first(self):
e1 = ExtraInfo.objects.create(value=7, info="e1")
a1 = Author.objects.create(name="a1", num=1, extra=e1)
base_qs = Author.objects.order_by()
qs1 = base_qs.filter(name="a1")
qs2 = base_qs.filter(name="a2")
self.assertEqual(qs1.union(qs2).first(), a1)
def test_union_multiple_models_with_values_list_and_order(self):
reserved_name = ReservedName.objects.create(name="rn1", order=0)
qs1 = Celebrity.objects.all()
qs2 = ReservedName.objects.all()
self.assertSequenceEqual(
qs1.union(qs2).order_by("name").values_list("pk", flat=True),
[reserved_name.pk],
)
def test_union_multiple_models_with_values_list_and_order_by_extra_select(self):
reserved_name = ReservedName.objects.create(name="rn1", order=0)
qs1 = Celebrity.objects.extra(select={"extra_name": "name"})
qs2 = ReservedName.objects.extra(select={"extra_name": "name"})
self.assertSequenceEqual(
qs1.union(qs2).order_by("extra_name").values_list("pk", flat=True),
[reserved_name.pk],
)
def test_union_multiple_models_with_values_list_and_annotations(self):
ReservedName.objects.create(name="rn1", order=10)
Celebrity.objects.create(name="c1")
qs1 = ReservedName.objects.annotate(row_type=Value("rn")).values_list(
"name", "order", "row_type"
)
qs2 = Celebrity.objects.annotate(
row_type=Value("cb"), order=Value(-10)
).values_list("name", "order", "row_type")
self.assertSequenceEqual(
qs1.union(qs2).order_by("order"),
[("c1", -10, "cb"), ("rn1", 10, "rn")],
)
def test_union_multiple_models_with_values_list_and_datetime_annotations(self):
gen_x = datetime(1966, 6, 6)
Article.objects.create(name="Bellatrix", created=gen_x)
column_names = ["name", "created", "order"]
qs1 = Article.objects.annotate(order=Value(1)).values_list(*column_names)
gen_y = datetime(1991, 10, 10)
ReservedName.objects.create(name="Rigel", order=2)
qs2 = ReservedName.objects.annotate(
created=Cast(Value(gen_y), DateTimeField())
).values_list(*column_names)
expected_result = [("Bellatrix", gen_x, 1), ("Rigel", gen_y, 2)]
self.assertEqual(list(qs1.union(qs2).order_by("order")), expected_result)
def test_union_multiple_models_with_values_and_datetime_annotations(self):
gen_x = datetime(1966, 6, 6)
Article.objects.create(name="Bellatrix", created=gen_x)
column_names = ["name", "created", "order"]
qs1 = Article.objects.values(*column_names, order=Value(1))
gen_y = datetime(1991, 10, 10)
ReservedName.objects.create(name="Rigel", order=2)
qs2 = ReservedName.objects.values(
*column_names, created=Cast(Value(gen_y), DateTimeField())
)
expected_result = [
{"name": "Bellatrix", "created": gen_x, "order": 1},
{"name": "Rigel", "created": gen_y, "order": 2},
]
self.assertEqual(list(qs1.union(qs2).order_by("order")), expected_result)
def test_union_in_subquery(self):
ReservedName.objects.bulk_create(
[
ReservedName(name="rn1", order=8),
ReservedName(name="rn2", order=1),
ReservedName(name="rn3", order=5),
]
)
qs1 = Number.objects.filter(num__gt=7, num=OuterRef("order"))
qs2 = Number.objects.filter(num__lt=2, num=OuterRef("order"))
self.assertCountEqual(
ReservedName.objects.annotate(
number=Subquery(qs1.union(qs2).values("num")),
)
.filter(number__isnull=False)
.values_list("order", flat=True),
[8, 1],
)
@skipUnlessDBFeature("supports_select_intersection")
def test_intersection_in_nested_subquery(self):
tag = Tag.objects.create(name="tag")
note = Note.objects.create(tag=tag)
annotation = Annotation.objects.create(tag=tag)
tags = Tag.objects.order_by()
tags = tags.filter(id=OuterRef("tag_id")).intersection(
tags.filter(id=OuterRef(OuterRef("tag_id")))
)
qs = Note.objects.filter(
Exists(
Annotation.objects.filter(
Exists(tags),
notes__in=OuterRef("pk"),
)
)
)
self.assertIsNone(qs.first())
annotation.notes.add(note)
self.assertEqual(qs.first(), note)
def test_union_in_subquery_related_outerref(self):
e1 = ExtraInfo.objects.create(value=7, info="e3")
e2 = ExtraInfo.objects.create(value=5, info="e2")
e3 = ExtraInfo.objects.create(value=1, info="e1")
Author.objects.bulk_create(
[
Author(name="a1", num=1, extra=e1),
Author(name="a2", num=3, extra=e2),
Author(name="a3", num=2, extra=e3),
]
)
qs1 = ExtraInfo.objects.order_by().filter(value=OuterRef("num"))
qs2 = ExtraInfo.objects.order_by().filter(value__lt=OuterRef("extra__value"))
qs = (
Author.objects.annotate(
info=Subquery(qs1.union(qs2).values("info")[:1]),
)
.filter(info__isnull=False)
.values_list("name", flat=True)
)
self.assertCountEqual(qs, ["a1", "a2"])
# Combined queries don't mutate.
self.assertCountEqual(qs, ["a1", "a2"])
@skipUnlessDBFeature("supports_slicing_ordering_in_compound")
def test_union_in_with_ordering(self):
qs1 = Number.objects.filter(num__gt=7).order_by("num")
qs2 = Number.objects.filter(num__lt=2).order_by("num")
self.assertNumbersEqual(
Number.objects.exclude(id__in=qs1.union(qs2).values("id")),
[2, 3, 4, 5, 6, 7],
ordered=False,
)
@skipUnlessDBFeature(
"supports_slicing_ordering_in_compound", "allow_sliced_subqueries_with_in"
)
def test_union_in_with_ordering_and_slice(self):
qs1 = Number.objects.filter(num__gt=7).order_by("num")[:1]
qs2 = Number.objects.filter(num__lt=2).order_by("-num")[:1]
self.assertNumbersEqual(
Number.objects.exclude(id__in=qs1.union(qs2).values("id")),
[0, 2, 3, 4, 5, 6, 7, 9],
ordered=False,
)
def test_count_union(self):
qs1 = Number.objects.filter(num__lte=1).values("num")
qs2 = Number.objects.filter(num__gte=2, num__lte=3).values("num")
self.assertEqual(qs1.union(qs2).count(), 4)
def test_count_union_empty_result(self):
qs = Number.objects.filter(pk__in=[])
self.assertEqual(qs.union(qs).count(), 0)
def test_count_union_with_select_related(self):
e1 = ExtraInfo.objects.create(value=1, info="e1")
Author.objects.create(name="a1", num=1, extra=e1)
qs = Author.objects.select_related("extra").order_by()
self.assertEqual(qs.union(qs).count(), 1)
@skipUnlessDBFeature("supports_select_difference")
def test_count_difference(self):
qs1 = Number.objects.filter(num__lt=10)
qs2 = Number.objects.filter(num__lt=9)
self.assertEqual(qs1.difference(qs2).count(), 1)
@skipUnlessDBFeature("supports_select_intersection")
def test_count_intersection(self):
qs1 = Number.objects.filter(num__gte=5)
qs2 = Number.objects.filter(num__lte=5)
self.assertEqual(qs1.intersection(qs2).count(), 1)
def test_exists_union(self):
qs1 = Number.objects.filter(num__gte=5)
qs2 = Number.objects.filter(num__lte=5)
with CaptureQueriesContext(connection) as context:
self.assertIs(qs1.union(qs2).exists(), True)
captured_queries = context.captured_queries
self.assertEqual(len(captured_queries), 1)
captured_sql = captured_queries[0]["sql"]
self.assertNotIn(
connection.ops.quote_name(Number._meta.pk.column),
captured_sql,
)
self.assertEqual(
captured_sql.count(connection.ops.limit_offset_sql(None, 1)), 1
)
def test_exists_union_empty_result(self):
qs = Number.objects.filter(pk__in=[])
self.assertIs(qs.union(qs).exists(), False)
@skipUnlessDBFeature("supports_select_intersection")
def test_exists_intersection(self):
qs1 = Number.objects.filter(num__gt=5)
qs2 = Number.objects.filter(num__lt=5)
self.assertIs(qs1.intersection(qs1).exists(), True)
self.assertIs(qs1.intersection(qs2).exists(), False)
@skipUnlessDBFeature("supports_select_difference")
def test_exists_difference(self):
qs1 = Number.objects.filter(num__gte=5)
qs2 = Number.objects.filter(num__gte=3)
self.assertIs(qs1.difference(qs2).exists(), False)
self.assertIs(qs2.difference(qs1).exists(), True)
def test_get_union(self):
qs = Number.objects.filter(num=2)
self.assertEqual(qs.union(qs).get().num, 2)
@skipUnlessDBFeature("supports_select_difference")
def test_get_difference(self):
qs1 = Number.objects.all()
qs2 = Number.objects.exclude(num=2)
self.assertEqual(qs1.difference(qs2).get().num, 2)
@skipUnlessDBFeature("supports_select_intersection")
def test_get_intersection(self):
qs1 = Number.objects.all()
qs2 = Number.objects.filter(num=2)
self.assertEqual(qs1.intersection(qs2).get().num, 2)
@skipUnlessDBFeature("supports_slicing_ordering_in_compound")
def test_ordering_subqueries(self):
qs1 = Number.objects.order_by("num")[:2]
qs2 = Number.objects.order_by("-num")[:2]
self.assertNumbersEqual(qs1.union(qs2).order_by("-num")[:4], [9, 8, 1, 0])
@skipIfDBFeature("supports_slicing_ordering_in_compound")
def test_unsupported_ordering_slicing_raises_db_error(self):
qs1 = Number.objects.all()
qs2 = Number.objects.all()
qs3 = Number.objects.all()
msg = "LIMIT/OFFSET not allowed in subqueries of compound statements"
with self.assertRaisesMessage(DatabaseError, msg):
list(qs1.union(qs2[:10]))
msg = "ORDER BY not allowed in subqueries of compound statements"
with self.assertRaisesMessage(DatabaseError, msg):
list(qs1.order_by("id").union(qs2))
with self.assertRaisesMessage(DatabaseError, msg):
list(qs1.union(qs2).order_by("id").union(qs3))
@skipIfDBFeature("supports_select_intersection")
def test_unsupported_intersection_raises_db_error(self):
qs1 = Number.objects.all()
qs2 = Number.objects.all()
msg = "intersection is not supported on this database backend"
with self.assertRaisesMessage(NotSupportedError, msg):
list(qs1.intersection(qs2))
def test_combining_multiple_models(self):
ReservedName.objects.create(name="99 little bugs", order=99)
qs1 = Number.objects.filter(num=1).values_list("num", flat=True)
qs2 = ReservedName.objects.values_list("order")
self.assertEqual(list(qs1.union(qs2).order_by("num")), [1, 99])
def test_order_raises_on_non_selected_column(self):
qs1 = (
Number.objects.filter()
.annotate(
annotation=Value(1, IntegerField()),
)
.values("annotation", num2=F("num"))
)
qs2 = Number.objects.filter().values("id", "num")
# Should not raise
list(qs1.union(qs2).order_by("annotation"))
list(qs1.union(qs2).order_by("num2"))
msg = "ORDER BY term does not match any column in the result set"
# 'id' is not part of the select
with self.assertRaisesMessage(DatabaseError, msg):
list(qs1.union(qs2).order_by("id"))
# 'num' got realiased to num2
with self.assertRaisesMessage(DatabaseError, msg):
list(qs1.union(qs2).order_by("num"))
with self.assertRaisesMessage(DatabaseError, msg):
list(qs1.union(qs2).order_by(F("num")))
with self.assertRaisesMessage(DatabaseError, msg):
list(qs1.union(qs2).order_by(F("num").desc()))
# switched order, now 'exists' again:
list(qs2.union(qs1).order_by("num"))
@skipUnlessDBFeature("supports_select_difference", "supports_select_intersection")
def test_qs_with_subcompound_qs(self):
qs1 = Number.objects.all()
qs2 = Number.objects.intersection(Number.objects.filter(num__gt=1))
self.assertEqual(qs1.difference(qs2).count(), 2)
def test_order_by_same_type(self):
qs = Number.objects.all()
union = qs.union(qs)
numbers = list(range(10))
self.assertNumbersEqual(union.order_by("num"), numbers)
self.assertNumbersEqual(union.order_by("other_num"), reversed(numbers))
def test_unsupported_operations_on_combined_qs(self):
qs = Number.objects.all()
msg = "Calling QuerySet.%s() after %s() is not supported."
combinators = ["union"]
if connection.features.supports_select_difference:
combinators.append("difference")
if connection.features.supports_select_intersection:
combinators.append("intersection")
for combinator in combinators:
for operation in (
"alias",
"annotate",
"defer",
"delete",
"distinct",
"exclude",
"extra",
"filter",
"only",
"prefetch_related",
"select_related",
"update",
):
with self.subTest(combinator=combinator, operation=operation):
with self.assertRaisesMessage(
NotSupportedError,
msg % (operation, combinator),
):
getattr(getattr(qs, combinator)(qs), operation)()
with self.assertRaisesMessage(
NotSupportedError,
msg % ("contains", combinator),
):
obj = Number.objects.first()
getattr(qs, combinator)(qs).contains(obj)
def test_get_with_filters_unsupported_on_combined_qs(self):
qs = Number.objects.all()
msg = "Calling QuerySet.get(...) with filters after %s() is not supported."
combinators = ["union"]
if connection.features.supports_select_difference:
combinators.append("difference")
if connection.features.supports_select_intersection:
combinators.append("intersection")
for combinator in combinators:
with self.subTest(combinator=combinator):
with self.assertRaisesMessage(NotSupportedError, msg % combinator):
getattr(qs, combinator)(qs).get(num=2)
def test_operator_on_combined_qs_error(self):
qs = Number.objects.all()
msg = "Cannot use %s operator with combined queryset."
combinators = ["union"]
if connection.features.supports_select_difference:
combinators.append("difference")
if connection.features.supports_select_intersection:
combinators.append("intersection")
operators = [
("|", operator.or_),
("&", operator.and_),
("^", operator.xor),
]
for combinator in combinators:
combined_qs = getattr(qs, combinator)(qs)
for operator_, operator_func in operators:
with self.subTest(combinator=combinator):
with self.assertRaisesMessage(TypeError, msg % operator_):
operator_func(qs, combined_qs)
with self.assertRaisesMessage(TypeError, msg % operator_):
operator_func(combined_qs, qs)
| QuerySetSetOperationTests |
python | ray-project__ray | python/ray/tests/test_failure_3.py | {
"start": 10785,
"end": 13810
} | class ____:
def create_leaked_child_process(self, num_to_leak):
print("Creating leaked process", os.getpid())
pids = []
for _ in range(num_to_leak):
proc = multiprocessing.Process(
target=time.sleep,
args=(1000,),
daemon=True,
)
proc.start()
pids.append(proc.pid)
return pids
@ray.remote
def task():
print("Creating leaked process", os.getpid())
proc = multiprocessing.Process(
target=time.sleep,
args=(1000,),
daemon=True,
)
proc.start()
return proc.pid
num_to_leak_per_type = 10
actor = Actor.remote()
actor_leaked_pids = ray.get(actor.create_leaked_child_process.remote(
num_to_leak=num_to_leak_per_type,
))
task_leaked_pids = ray.get([task.remote() for _ in range(num_to_leak_per_type)])
leaked_pids = actor_leaked_pids + task_leaked_pids
final_file = "{output_file_path}"
tmp_file = final_file + ".tmp"
with open(tmp_file, "w") as f:
json.dump(leaked_pids, f)
shutil.move(tmp_file, final_file)
while True:
print(os.getpid())
time.sleep(1)
"""
driver_proc = run_string_as_driver_nonblocking(driver_script)
# Wait for the json file containing the child PIDS
# to be present.
wait_for_condition(
condition_predictor=lambda: Path(output_file_path).exists(),
timeout=30,
)
# Load the PIDs of the child processes.
with open(output_file_path, "r") as f:
pids = json.load(f)
# Validate all children of the worker processes are in a sleeping state.
processes = [psutil.Process(pid) for pid in pids]
assert all([proc.status() == psutil.STATUS_SLEEPING for proc in processes])
# Valdiate children of worker process die after SIGINT.
driver_proc.send_signal(signal.SIGINT)
wait_for_condition(
condition_predictor=lambda: all([not proc.is_running() for proc in processes]),
timeout=30,
)
@pytest.mark.skipif(sys.platform != "linux", reason="Only works on linux.")
def test_worker_cleans_up_child_procs_on_raylet_death(ray_start_cluster, tmp_path):
"""
CoreWorker kills its child processes if the raylet dies.
This test creates 20 leaked processes; 10 from a single actor task, and
10 from distinct non-actor tasks.
Once the raylet dies, the test verifies all leaked processes are cleaned up.
"""
output_file_path = tmp_path / "leaked_pids.json"
ray_start_cluster.add_node()
driver_script = f"""
import ray
import json
import multiprocessing
import shutil
import time
import os
def change_name_and_sleep(label: str, index: int) -> None:
proctitle = "child_proc_name_prefix_" + label + "_" + str(index)
ray._raylet.setproctitle(proctitle)
time.sleep(1000)
def create_child_proc(label, index):
proc = multiprocessing.Process(
target=change_name_and_sleep,
args=(label, index,),
daemon=True,
)
proc.start()
return proc.pid
@ray.remote
| Actor |
python | streamlit__streamlit | lib/tests/streamlit/elements/file_uploader_test.py | {
"start": 15697,
"end": 17324
} | class ____(DeltaGeneratorTestCase):
def test_file_uploader_with_width_pixels(self):
"""Test that file_uploader can be displayed with a specific width in pixels."""
st.file_uploader("Label", width=500)
c = self.get_delta_from_queue().new_element
assert (
c.width_config.WhichOneof("width_spec")
== WidthConfigFields.PIXEL_WIDTH.value
)
assert c.width_config.pixel_width == 500
def test_file_uploader_with_width_stretch(self):
"""Test that file_uploader can be displayed with a width of 'stretch'."""
st.file_uploader("Label", width="stretch")
c = self.get_delta_from_queue().new_element
assert (
c.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_STRETCH.value
)
assert c.width_config.use_stretch
def test_file_uploader_with_default_width(self):
"""Test that the default width is used when not specified."""
st.file_uploader("Label")
c = self.get_delta_from_queue().new_element
assert (
c.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_STRETCH.value
)
assert c.width_config.use_stretch
@parameterized.expand(
[
"invalid",
-1,
0,
100.5,
]
)
def test_width_config_invalid(self, invalid_width):
"""Test width config with various invalid values."""
with pytest.raises(StreamlitInvalidWidthError):
st.file_uploader("the label", width=invalid_width)
| FileUploaderWidthTest |
python | great-expectations__great_expectations | great_expectations/render/renderer_configuration.py | {
"start": 2299,
"end": 2416
} | class ____(TypedDict):
"""Json schema for values found in renderers."""
type: RendererValueType
| RendererSchema |
python | scipy__scipy | scipy/integrate/_ivp/rk.py | {
"start": 2356,
"end": 5846
} | class ____(OdeSolver):
"""Base class for explicit Runge-Kutta methods."""
C: np.ndarray = NotImplemented
A: np.ndarray = NotImplemented
B: np.ndarray = NotImplemented
E: np.ndarray = NotImplemented
P: np.ndarray = NotImplemented
order: int = NotImplemented
error_estimator_order: int = NotImplemented
n_stages: int = NotImplemented
def __init__(self, fun, t0, y0, t_bound, max_step=np.inf,
rtol=1e-3, atol=1e-6, vectorized=False,
first_step=None, **extraneous):
warn_extraneous(extraneous)
super().__init__(fun, t0, y0, t_bound, vectorized,
support_complex=True)
self.y_old = None
self.max_step = validate_max_step(max_step)
self.rtol, self.atol = validate_tol(rtol, atol, self.n)
self.f = self.fun(self.t, self.y)
if first_step is None:
self.h_abs = select_initial_step(
self.fun, self.t, self.y, t_bound, max_step, self.f, self.direction,
self.error_estimator_order, self.rtol, self.atol)
else:
self.h_abs = validate_first_step(first_step, t0, t_bound)
self.K = np.empty((self.n_stages + 1, self.n), dtype=self.y.dtype)
self.error_exponent = -1 / (self.error_estimator_order + 1)
self.h_previous = None
def _estimate_error(self, K, h):
return np.dot(K.T, self.E) * h
def _estimate_error_norm(self, K, h, scale):
return norm(self._estimate_error(K, h) / scale)
def _step_impl(self):
t = self.t
y = self.y
max_step = self.max_step
rtol = self.rtol
atol = self.atol
min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t)
if self.h_abs > max_step:
h_abs = max_step
elif self.h_abs < min_step:
h_abs = min_step
else:
h_abs = self.h_abs
step_accepted = False
step_rejected = False
while not step_accepted:
if h_abs < min_step:
return False, self.TOO_SMALL_STEP
h = h_abs * self.direction
t_new = t + h
if self.direction * (t_new - self.t_bound) > 0:
t_new = self.t_bound
h = t_new - t
h_abs = np.abs(h)
y_new, f_new = rk_step(self.fun, t, y, self.f, h, self.A,
self.B, self.C, self.K)
scale = atol + np.maximum(np.abs(y), np.abs(y_new)) * rtol
error_norm = self._estimate_error_norm(self.K, h, scale)
if error_norm < 1:
if error_norm == 0:
factor = MAX_FACTOR
else:
factor = min(MAX_FACTOR,
SAFETY * error_norm ** self.error_exponent)
if step_rejected:
factor = min(1, factor)
h_abs *= factor
step_accepted = True
else:
h_abs *= max(MIN_FACTOR,
SAFETY * error_norm ** self.error_exponent)
step_rejected = True
self.h_previous = h
self.y_old = y
self.t = t_new
self.y = y_new
self.h_abs = h_abs
self.f = f_new
return True, None
def _dense_output_impl(self):
Q = self.K.T.dot(self.P)
return RkDenseOutput(self.t_old, self.t, self.y_old, Q)
| RungeKutta |
python | walkccc__LeetCode | solutions/1254. Number of Closed Islands/1254.py | {
"start": 0,
"end": 733
} | class ____:
def closedIsland(self, grid: list[list[int]]) -> int:
m = len(grid)
n = len(grid[0])
def dfs(i: int, j: int) -> None:
if i < 0 or i == m or j < 0 or j == n:
return
if grid[i][j] == 1:
return
grid[i][j] = 1
dfs(i + 1, j)
dfs(i - 1, j)
dfs(i, j + 1)
dfs(i, j - 1)
# Remove the lands connected to the edge.
for i in range(m):
for j in range(n):
if i * j == 0 or i == m - 1 or j == n - 1:
if grid[i][j] == 0:
dfs(i, j)
ans = 0
# Reduce to 200. Number of Islands
for i in range(m):
for j in range(n):
if grid[i][j] == 0:
dfs(i, j)
ans += 1
return ans
| Solution |
python | tornadoweb__tornado | tornado/test/web_test.py | {
"start": 72191,
"end": 72977
} | class ____(WebTestCase):
class Handler(RequestHandler):
def prepare(self):
self.write(dict(args=self.path_args, kwargs=self.path_kwargs))
def get(self, path):
assert path == "foo"
self.finish()
def get_handlers(self):
return [("/pos/(.*)", self.Handler), ("/kw/(?P<path>.*)", self.Handler)]
def test_pos(self):
response = self.fetch("/pos/foo")
response.rethrow()
data = json_decode(response.body)
self.assertEqual(data, {"args": ["foo"], "kwargs": {}})
def test_kw(self):
response = self.fetch("/kw/foo")
response.rethrow()
data = json_decode(response.body)
self.assertEqual(data, {"args": [], "kwargs": {"path": "foo"}})
| PathArgsInPrepareTest |
python | psf__black | tests/data/cases/line_ranges_fmt_off_decorator.py | {
"start": 774,
"end": 998
} | class ____:
# fmt: off
@decorator ( )
# fmt: on
def method():
print("str")
@decor(
a=1,
# fmt: off
b=(2, 3),
# fmt: on
)
def func():
pass
| MyClass |
python | pypa__warehouse | warehouse/i18n/extensions.py | {
"start": 307,
"end": 1939
} | class ____(Extension):
"""
This extension ensures all {% trans %} tags are trimmed by default.
"""
def __init__(self, environment):
environment.policies["ext.i18n.trimmed"] = True
def _make_newer_gettext(func: t.Callable[[str], str]) -> t.Callable[..., str]:
"""
Wraps upstream _make_new_gettext with the try/except for KeyError to
fallback to untranslated strings when translations have not been updated
with new named variables.
"""
_old_gettext = _make_new_gettext(func)
@pass_context
def gettext(__context: Context, __string: str, **variables: t.Any) -> str:
try:
return _old_gettext(__context, __string, **variables)
except (KeyError, ValueError):
return __string % variables
return gettext
def _make_newer_ngettext(
func: t.Callable[[str, str, int], str],
) -> t.Callable[..., str]:
"""
Wraps upstream _make_new_ngettext with the try/except for KeyError to
fallback to untranslated strings when translations have not been updated
with new named variables.
"""
_old_ngettext = pass_context(_make_new_ngettext(func))
@pass_context
def ngettext(
__context: Context,
__singular: str,
__plural: str,
__num: int,
**variables: t.Any,
) -> str:
try:
return _old_ngettext(__context, __singular, __plural, __num, **variables)
except (KeyError, ValueError):
if __num > 1:
return __plural % variables
return __singular % variables
return ngettext
| TrimmedTranslatableTagsExtension |
python | tensorflow__tensorflow | tensorflow/python/keras/initializers/initializers_v2.py | {
"start": 3572,
"end": 4868
} | class ____(Initializer):
"""Initializer that generates tensors initialized to 0.
Also available via the shortcut function `tf.keras.initializers.zeros`.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.Zeros()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.Zeros()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
"""
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are
supported. If not specified, `tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`).
**kwargs: Additional keyword arguments.
"""
_validate_kwargs(self.__class__.__name__, kwargs)
dtype = _get_dtype(dtype)
if not dtype.is_numpy_compatible or dtype == dtypes.string:
raise ValueError('Expected numeric or boolean dtype, got %s.' % dtype)
if _PARTITION_SHAPE in kwargs:
shape = kwargs[_PARTITION_SHAPE]
return array_ops.zeros(shape, dtype)
| Zeros |
python | allegroai__clearml | clearml/backend_api/services/v2_23/events.py | {
"start": 97696,
"end": 98963
} | class ____(Response):
"""
Response of events.get_plot_sample endpoint.
"""
_service = "events"
_action = "get_plot_sample"
_version = "2.23"
_schema = {
"$ref": "#/definitions/plot_sample_response",
"definitions": {
"plot_sample_response": {
"properties": {
"events": {
"description": "Plot events",
"items": {"type": "object"},
"type": ["array", "null"],
},
"max_iteration": {
"description": "maximal valid iteration for the metric",
"type": ["integer", "null"],
},
"min_iteration": {
"description": "minimal valid iteration for the metric",
"type": ["integer", "null"],
},
"scroll_id": {
"description": "Scroll ID to pass to the next calls to get_plot_sample or next_plot_sample",
"type": ["string", "null"],
},
},
"type": "object",
}
},
}
| GetPlotSampleResponse |
python | jd__tenacity | tenacity/tornadoweb.py | {
"start": 863,
"end": 2125
} | class ____(BaseRetrying):
def __init__(
self,
sleep: "typing.Callable[[float], Future[None]]" = gen.sleep,
**kwargs: typing.Any,
) -> None:
super().__init__(**kwargs)
self.sleep = sleep
@gen.coroutine # type: ignore[misc]
def __call__(
self,
fn: "typing.Callable[..., typing.Union[typing.Generator[typing.Any, typing.Any, _RetValT], Future[_RetValT]]]",
*args: typing.Any,
**kwargs: typing.Any,
) -> "typing.Generator[typing.Any, typing.Any, _RetValT]":
self.begin()
retry_state = RetryCallState(retry_object=self, fn=fn, args=args, kwargs=kwargs)
while True:
do = self.iter(retry_state=retry_state)
if isinstance(do, DoAttempt):
try:
result = yield fn(*args, **kwargs)
except BaseException: # noqa: B902
retry_state.set_exception(sys.exc_info()) # type: ignore[arg-type]
else:
retry_state.set_result(result)
elif isinstance(do, DoSleep):
retry_state.prepare_for_next_attempt()
yield self.sleep(do)
else:
raise gen.Return(do)
| TornadoRetrying |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-mixpanel/source_mixpanel/property_transformation.py | {
"start": 150,
"end": 1505
} | class ____(NamedTuple):
source_name: str
transformed_name: str
def transform_property_names(property_names: Iterable[str]) -> Iterator[TransformationResult]:
"""
Transform property names using this rules:
1. Remove leading "$" from property_name
2. Resolve naming conflicts, like `userName` and `username`,
that will break normalization in the future, by adding `_userName`to property name
"""
lowercase_collision_count = defaultdict(int)
lowercase_properties = set()
# Sort property names for consistent result
for property_name in sorted(property_names):
property_name_transformed = property_name
if property_name_transformed.startswith("$"):
property_name_transformed = property_name_transformed[1:]
lowercase_property_name = property_name_transformed.lower()
if lowercase_property_name in lowercase_properties:
lowercase_collision_count[lowercase_property_name] += 1
# Add prefix to property name
prefix = "_" * lowercase_collision_count[lowercase_property_name]
property_name_transformed = prefix + property_name_transformed
lowercase_properties.add(lowercase_property_name)
yield TransformationResult(source_name=property_name, transformed_name=property_name_transformed)
| TransformationResult |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_generators.py | {
"start": 53533,
"end": 74615
} | class ____:
def __init__(self, m, n, hard=0):
self.m, self.n = m, n
# solve() will set up succs[i] to be a list of square #i's
# successors.
succs = self.succs = []
# Remove i0 from each of its successor's successor lists, i.e.
# successors can't go back to i0 again. Return 0 if we can
# detect this makes a solution impossible, else return 1.
def remove_from_successors(i0, len=len):
# If we remove all exits from a free square, we're dead:
# even if we move to it next, we can't leave it again.
# If we create a square with one exit, we must visit it next;
# else somebody else will have to visit it, and since there's
# only one adjacent, there won't be a way to leave it again.
# Finally, if we create more than one free square with a
# single exit, we can only move to one of them next, leaving
# the other one a dead end.
ne0 = ne1 = 0
for i in succs[i0]:
s = succs[i]
s.remove(i0)
e = len(s)
if e == 0:
ne0 += 1
elif e == 1:
ne1 += 1
return ne0 == 0 and ne1 < 2
# Put i0 back in each of its successor's successor lists.
def add_to_successors(i0):
for i in succs[i0]:
succs[i].append(i0)
# Generate the first move.
def first():
if m < 1 or n < 1:
return
# Since we're looking for a cycle, it doesn't matter where we
# start. Starting in a corner makes the 2nd move easy.
corner = self.coords2index(0, 0)
remove_from_successors(corner)
self.lastij = corner
yield corner
add_to_successors(corner)
# Generate the second moves.
def second():
corner = self.coords2index(0, 0)
assert self.lastij == corner # i.e., we started in the corner
if m < 3 or n < 3:
return
assert len(succs[corner]) == 2
assert self.coords2index(1, 2) in succs[corner]
assert self.coords2index(2, 1) in succs[corner]
# Only two choices. Whichever we pick, the other must be the
# square picked on move m*n, as it's the only way to get back
# to (0, 0). Save its index in self.final so that moves before
# the last know it must be kept free.
for i, j in (1, 2), (2, 1):
this = self.coords2index(i, j)
final = self.coords2index(3-i, 3-j)
self.final = final
remove_from_successors(this)
succs[final].append(corner)
self.lastij = this
yield this
succs[final].remove(corner)
add_to_successors(this)
# Generate moves 3 through m*n-1.
def advance(len=len):
# If some successor has only one exit, must take it.
# Else favor successors with fewer exits.
candidates = []
for i in succs[self.lastij]:
e = len(succs[i])
assert e > 0, "else remove_from_successors() pruning flawed"
if e == 1:
candidates = [(e, i)]
break
candidates.append((e, i))
else:
candidates.sort()
for e, i in candidates:
if i != self.final:
if remove_from_successors(i):
self.lastij = i
yield i
add_to_successors(i)
# Generate moves 3 through m*n-1. Alternative version using a
# stronger (but more expensive) heuristic to order successors.
# Since the # of backtracking levels is m*n, a poor move early on
# can take eons to undo. Smallest square board for which this
# matters a lot is 52x52.
def advance_hard(vmid=(m-1)/2.0, hmid=(n-1)/2.0, len=len):
# If some successor has only one exit, must take it.
# Else favor successors with fewer exits.
# Break ties via max distance from board centerpoint (favor
# corners and edges whenever possible).
candidates = []
for i in succs[self.lastij]:
e = len(succs[i])
assert e > 0, "else remove_from_successors() pruning flawed"
if e == 1:
candidates = [(e, 0, i)]
break
i1, j1 = self.index2coords(i)
d = (i1 - vmid)**2 + (j1 - hmid)**2
candidates.append((e, -d, i))
else:
candidates.sort()
for e, d, i in candidates:
if i != self.final:
if remove_from_successors(i):
self.lastij = i
yield i
add_to_successors(i)
# Generate the last move.
def last():
assert self.final in succs[self.lastij]
yield self.final
if m*n < 4:
self.squaregenerators = [first]
else:
self.squaregenerators = [first, second] + \
[hard and advance_hard or advance] * (m*n - 3) + \
[last]
def coords2index(self, i, j):
assert 0 <= i < self.m
assert 0 <= j < self.n
return i * self.n + j
def index2coords(self, index):
assert 0 <= index < self.m * self.n
return divmod(index, self.n)
def _init_board(self):
succs = self.succs
del succs[:]
m, n = self.m, self.n
c2i = self.coords2index
offsets = [( 1, 2), ( 2, 1), ( 2, -1), ( 1, -2),
(-1, -2), (-2, -1), (-2, 1), (-1, 2)]
rangen = range(n)
for i in range(m):
for j in rangen:
s = [c2i(i+io, j+jo) for io, jo in offsets
if 0 <= i+io < m and
0 <= j+jo < n]
succs.append(s)
# Generate solutions.
def solve(self):
self._init_board()
for x in conjoin(self.squaregenerators):
yield x
def printsolution(self, x):
m, n = self.m, self.n
assert len(x) == m*n
w = len(str(m*n))
format = "%" + str(w) + "d"
squares = [[None] * n for i in range(m)]
k = 1
for i in x:
i1, j1 = self.index2coords(i)
squares[i1][j1] = format % k
k += 1
sep = "+" + ("-" * w + "+") * n
print(sep)
for i in range(m):
row = squares[i]
print("|" + "|".join(row) + "|")
print(sep)
conjoin_tests = """
Generate the 3-bit binary numbers in order. This illustrates dumbest-
possible use of conjoin, just to generate the full cross-product.
>>> for c in conjoin([lambda: iter((0, 1))] * 3):
... print(c)
[0, 0, 0]
[0, 0, 1]
[0, 1, 0]
[0, 1, 1]
[1, 0, 0]
[1, 0, 1]
[1, 1, 0]
[1, 1, 1]
For efficiency in typical backtracking apps, conjoin() yields the same list
object each time. So if you want to save away a full account of its
generated sequence, you need to copy its results.
>>> def gencopy(iterator):
... for x in iterator:
... yield x[:]
>>> for n in range(10):
... all = list(gencopy(conjoin([lambda: iter((0, 1))] * n)))
... print(n, len(all), all[0] == [0] * n, all[-1] == [1] * n)
0 1 True True
1 2 True True
2 4 True True
3 8 True True
4 16 True True
5 32 True True
6 64 True True
7 128 True True
8 256 True True
9 512 True True
And run an 8-queens solver.
>>> q = Queens(8)
>>> LIMIT = 2
>>> count = 0
>>> for row2col in q.solve():
... count += 1
... if count <= LIMIT:
... print("Solution", count)
... q.printsolution(row2col)
Solution 1
+-+-+-+-+-+-+-+-+
|Q| | | | | | | |
+-+-+-+-+-+-+-+-+
| | | | |Q| | | |
+-+-+-+-+-+-+-+-+
| | | | | | | |Q|
+-+-+-+-+-+-+-+-+
| | | | | |Q| | |
+-+-+-+-+-+-+-+-+
| | |Q| | | | | |
+-+-+-+-+-+-+-+-+
| | | | | | |Q| |
+-+-+-+-+-+-+-+-+
| |Q| | | | | | |
+-+-+-+-+-+-+-+-+
| | | |Q| | | | |
+-+-+-+-+-+-+-+-+
Solution 2
+-+-+-+-+-+-+-+-+
|Q| | | | | | | |
+-+-+-+-+-+-+-+-+
| | | | | |Q| | |
+-+-+-+-+-+-+-+-+
| | | | | | | |Q|
+-+-+-+-+-+-+-+-+
| | |Q| | | | | |
+-+-+-+-+-+-+-+-+
| | | | | | |Q| |
+-+-+-+-+-+-+-+-+
| | | |Q| | | | |
+-+-+-+-+-+-+-+-+
| |Q| | | | | | |
+-+-+-+-+-+-+-+-+
| | | | |Q| | | |
+-+-+-+-+-+-+-+-+
>>> print(count, "solutions in all.")
92 solutions in all.
And run a Knight's Tour on a 10x10 board. Note that there are about
20,000 solutions even on a 6x6 board, so don't dare run this to exhaustion.
>>> k = Knights(10, 10)
>>> LIMIT = 2
>>> count = 0
>>> for x in k.solve():
... count += 1
... if count <= LIMIT:
... print("Solution", count)
... k.printsolution(x)
... else:
... break
Solution 1
+---+---+---+---+---+---+---+---+---+---+
| 1| 58| 27| 34| 3| 40| 29| 10| 5| 8|
+---+---+---+---+---+---+---+---+---+---+
| 26| 35| 2| 57| 28| 33| 4| 7| 30| 11|
+---+---+---+---+---+---+---+---+---+---+
| 59|100| 73| 36| 41| 56| 39| 32| 9| 6|
+---+---+---+---+---+---+---+---+---+---+
| 74| 25| 60| 55| 72| 37| 42| 49| 12| 31|
+---+---+---+---+---+---+---+---+---+---+
| 61| 86| 99| 76| 63| 52| 47| 38| 43| 50|
+---+---+---+---+---+---+---+---+---+---+
| 24| 75| 62| 85| 54| 71| 64| 51| 48| 13|
+---+---+---+---+---+---+---+---+---+---+
| 87| 98| 91| 80| 77| 84| 53| 46| 65| 44|
+---+---+---+---+---+---+---+---+---+---+
| 90| 23| 88| 95| 70| 79| 68| 83| 14| 17|
+---+---+---+---+---+---+---+---+---+---+
| 97| 92| 21| 78| 81| 94| 19| 16| 45| 66|
+---+---+---+---+---+---+---+---+---+---+
| 22| 89| 96| 93| 20| 69| 82| 67| 18| 15|
+---+---+---+---+---+---+---+---+---+---+
Solution 2
+---+---+---+---+---+---+---+---+---+---+
| 1| 58| 27| 34| 3| 40| 29| 10| 5| 8|
+---+---+---+---+---+---+---+---+---+---+
| 26| 35| 2| 57| 28| 33| 4| 7| 30| 11|
+---+---+---+---+---+---+---+---+---+---+
| 59|100| 73| 36| 41| 56| 39| 32| 9| 6|
+---+---+---+---+---+---+---+---+---+---+
| 74| 25| 60| 55| 72| 37| 42| 49| 12| 31|
+---+---+---+---+---+---+---+---+---+---+
| 61| 86| 99| 76| 63| 52| 47| 38| 43| 50|
+---+---+---+---+---+---+---+---+---+---+
| 24| 75| 62| 85| 54| 71| 64| 51| 48| 13|
+---+---+---+---+---+---+---+---+---+---+
| 87| 98| 89| 80| 77| 84| 53| 46| 65| 44|
+---+---+---+---+---+---+---+---+---+---+
| 90| 23| 92| 95| 70| 79| 68| 83| 14| 17|
+---+---+---+---+---+---+---+---+---+---+
| 97| 88| 21| 78| 81| 94| 19| 16| 45| 66|
+---+---+---+---+---+---+---+---+---+---+
| 22| 91| 96| 93| 20| 69| 82| 67| 18| 15|
+---+---+---+---+---+---+---+---+---+---+
"""
weakref_tests = """\
Generators are weakly referencable:
>>> import weakref
>>> def gen():
... yield 'foo!'
...
>>> wr = weakref.ref(gen)
>>> wr() is gen
True
>>> p = weakref.proxy(gen)
Generator-iterators are weakly referencable as well:
>>> gi = gen()
>>> wr = weakref.ref(gi)
>>> wr() is gi
True
>>> p = weakref.proxy(gi)
>>> list(p)
['foo!']
"""
coroutine_tests = """\
>>> from test.support import gc_collect
Sending a value into a started generator:
>>> def f():
... print((yield 1))
... yield 2
>>> g = f()
>>> next(g)
1
>>> g.send(42)
42
2
Sending a value into a new generator produces a TypeError:
>>> f().send("foo")
Traceback (most recent call last):
...
TypeError: can't send non-None value to a just-started generator
Yield by itself yields None:
>>> def f(): yield
>>> list(f())
[None]
Yield is allowed only in the outermost iterable in generator expression:
>>> def f(): list(i for i in [(yield 26)])
>>> type(f())
<class 'generator'>
A yield expression with augmented assignment.
>>> def coroutine(seq):
... count = 0
... while count < 200:
... count += yield
... seq.append(count)
>>> seq = []
>>> c = coroutine(seq)
>>> next(c)
>>> print(seq)
[]
>>> c.send(10)
>>> print(seq)
[10]
>>> c.send(10)
>>> print(seq)
[10, 20]
>>> c.send(10)
>>> print(seq)
[10, 20, 30]
Check some syntax errors for yield expressions:
>>> f=lambda: (yield 1),(yield 2)
Traceback (most recent call last):
...
SyntaxError: 'yield' outside function
>>> f=lambda: (yield from (1,2)), (yield from (3,4))
Traceback (most recent call last):
...
SyntaxError: 'yield from' outside function
>>> yield from [1,2]
Traceback (most recent call last):
...
SyntaxError: 'yield from' outside function
>>> def f(): x = yield = y
Traceback (most recent call last):
...
SyntaxError: assignment to yield expression not possible
>>> def f(): (yield bar) = y
Traceback (most recent call last):
...
SyntaxError: cannot assign to yield expression here. Maybe you meant '==' instead of '='?
>>> def f(): (yield bar) += y
Traceback (most recent call last):
...
SyntaxError: 'yield expression' is an illegal expression for augmented assignment
Now check some throw() conditions:
>>> def f():
... while True:
... try:
... print((yield))
... except ValueError as v:
... print("caught ValueError (%s)" % (v))
>>> import sys
>>> g = f()
>>> next(g)
>>> g.throw(ValueError) # type only
caught ValueError ()
>>> g.throw(ValueError("xyz")) # value only
caught ValueError (xyz)
>>> import warnings
>>> old_filters = warnings.filters.copy()
>>> warnings.filterwarnings("ignore", category=DeprecationWarning)
# Filter DeprecationWarning: regarding the (type, val, tb) signature of throw().
# Deprecation warnings are re-enabled below.
>>> g.throw(ValueError, ValueError(1)) # value+matching type
caught ValueError (1)
>>> g.throw(ValueError, TypeError(1)) # mismatched type, rewrapped
caught ValueError (1)
>>> g.throw(ValueError, ValueError(1), None) # explicit None traceback
caught ValueError (1)
>>> g.throw(ValueError(1), "foo") # bad args
Traceback (most recent call last):
...
TypeError: instance exception may not have a separate value
>>> g.throw(ValueError, "foo", 23) # bad args
Traceback (most recent call last):
...
TypeError: throw() third argument must be a traceback object
>>> g.throw("abc")
Traceback (most recent call last):
...
TypeError: exceptions must be classes or instances deriving from BaseException, not str
>>> g.throw(0)
Traceback (most recent call last):
...
TypeError: exceptions must be classes or instances deriving from BaseException, not int
>>> g.throw(list)
Traceback (most recent call last):
...
TypeError: exceptions must be classes or instances deriving from BaseException, not type
>>> def throw(g,exc):
... try:
... raise exc
... except:
... g.throw(*sys.exc_info())
>>> throw(g,ValueError) # do it with traceback included
caught ValueError ()
>>> g.send(1)
1
>>> throw(g,TypeError) # terminate the generator
Traceback (most recent call last):
...
TypeError
>>> print(g.gi_frame)
None
>>> g.send(2)
Traceback (most recent call last):
...
StopIteration
>>> g.throw(ValueError,6) # throw on closed generator
Traceback (most recent call last):
...
ValueError: 6
>>> f().throw(ValueError,7) # throw on just-opened generator
Traceback (most recent call last):
...
ValueError: 7
>>> warnings.filters[:] = old_filters
# Re-enable DeprecationWarning: the (type, val, tb) exception representation is deprecated,
# and may be removed in a future version of Python.
Plain "raise" inside a generator should preserve the traceback (#13188).
The traceback should have 3 levels:
- g.throw()
- f()
- 1/0
>>> def f():
... try:
... yield
... except:
... raise
>>> g = f()
>>> try:
... 1/0
... except ZeroDivisionError as v:
... try:
... g.throw(v)
... except Exception as w:
... tb = w.__traceback__
>>> levels = 0
>>> while tb:
... levels += 1
... tb = tb.tb_next
>>> levels
3
Now let's try closing a generator:
>>> def f():
... try: yield
... except GeneratorExit:
... print("exiting")
>>> g = f()
>>> next(g)
>>> g.close()
exiting
>>> g.close() # should be no-op now
>>> f().close() # close on just-opened generator should be fine
>>> def f(): yield # an even simpler generator
>>> f().close() # close before opening
>>> g = f()
>>> next(g)
>>> g.close() # close normally
And finalization:
>>> def f():
... try: yield
... finally:
... print("exiting")
>>> g = f()
>>> next(g)
>>> del g; gc_collect() # For PyPy or other GCs.
exiting
GeneratorExit is not caught by except Exception:
>>> def f():
... try: yield
... except Exception:
... print('except')
... finally:
... print('finally')
>>> g = f()
>>> next(g)
>>> del g; gc_collect() # For PyPy or other GCs.
finally
Now let's try some ill-behaved generators:
>>> def f():
... try: yield
... except GeneratorExit:
... yield "foo!"
>>> g = f()
>>> next(g)
>>> g.close()
Traceback (most recent call last):
...
RuntimeError: generator ignored GeneratorExit
>>> g.close()
Our ill-behaved code should be invoked during GC:
>>> with support.catch_unraisable_exception() as cm:
... g = f()
... next(g)
... del g
...
... cm.unraisable.exc_type == RuntimeError
... "generator ignored GeneratorExit" in str(cm.unraisable.exc_value)
... cm.unraisable.exc_traceback is not None
True
True
True
And errors thrown during closing should propagate:
>>> def f():
... try: yield
... except GeneratorExit:
... raise TypeError("fie!")
>>> g = f()
>>> next(g)
>>> g.close()
Traceback (most recent call last):
...
TypeError: fie!
Ensure that various yield expression constructs make their
enclosing function a generator:
>>> def f(): x += yield
>>> type(f())
<class 'generator'>
>>> def f(): x = yield
>>> type(f())
<class 'generator'>
>>> def f(): lambda x=(yield): 1
>>> type(f())
<class 'generator'>
>>> def f(d): d[(yield "a")] = d[(yield "b")] = 27
>>> data = [1,2]
>>> g = f(data)
>>> type(g)
<class 'generator'>
>>> g.send(None)
'a'
>>> data
[1, 2]
>>> g.send(0)
'b'
>>> data
[27, 2]
>>> try: g.send(1)
... except StopIteration: pass
>>> data
[27, 27]
"""
refleaks_tests = """
Prior to adding cycle-GC support to itertools.tee, this code would leak
references. We add it to the standard suite so the routine refleak-tests
would trigger if it starts being uncleanable again.
>>> import itertools
>>> def leak():
... class gen:
... def __iter__(self):
... return self
... def __next__(self):
... return self.item
... g = gen()
... head, tail = itertools.tee(g)
... g.item = head
... return head
>>> it = leak()
Make sure to also test the involvement of the tee-internal teedataobject,
which stores returned items.
>>> item = next(it)
This test leaked at one point due to generator finalization/destruction.
It was copied from Lib/test/leakers/test_generator_cycle.py before the file
was removed.
>>> def leak():
... def gen():
... while True:
... yield g
... g = gen()
>>> leak()
This test isn't really generator related, but rather exception-in-cleanup
related. The coroutine tests (above) just happen to cause an exception in
the generator's __del__ (tp_del) method. We can also test for this
explicitly, without generators. We do have to redirect stderr to avoid
printing warnings and to doublecheck that we actually tested what we wanted
to test.
>>> from test import support
>>> class Leaker:
... def __del__(self):
... def invoke(message):
... raise RuntimeError(message)
... invoke("del failed")
...
>>> with support.catch_unraisable_exception() as cm:
... l = Leaker()
... del l
...
... cm.unraisable.object == Leaker.__del__
... cm.unraisable.exc_type == RuntimeError
... str(cm.unraisable.exc_value) == "del failed"
... cm.unraisable.exc_traceback is not None
True
True
True
True
These refleak tests should perhaps be in a testfile of their own,
test_generators just happened to be the test that drew these out.
"""
# __test__ = {"tut": tutorial_tests,
# "pep": pep_tests,
# "email": email_tests,
# "fun": fun_tests,
# "syntax": syntax_tests,
# "conjoin": conjoin_tests,
# "weakref": weakref_tests,
# "coroutine": coroutine_tests,
# "refleaks": refleaks_tests,
# }
# def load_tests(loader, tests, pattern):
# # ======= BEGIN Dynamo patch =======
# suite = doctest.DocTestSuite()
# for test in suite:
# # Dynamically change base class
# test.__class__ = type(test.__class__.__name__, (__TestCase, test.__class__), {})
# tests.addTests(suite)
# # ======= END DYNAMO PATCH =======
# return tests
if __name__ == "__main__":
run_tests()
| Knights |
python | huggingface__transformers | src/transformers/models/glm4_moe/modular_glm4_moe.py | {
"start": 11882,
"end": 12553
} | class ____(DeepseekV3TopkRouter):
def __init__(self, config: Glm4MoeConfig):
nn.Module.__init__(self)
self.config = config
self.top_k = config.num_experts_per_tok
self.n_routed_experts = config.n_routed_experts
self.routed_scaling_factor = config.routed_scaling_factor
self.n_group = config.n_group
self.topk_group = config.topk_group
self.norm_topk_prob = config.norm_topk_prob
self.weight = nn.Parameter(torch.empty((self.n_routed_experts, config.hidden_size)))
self.register_buffer("e_score_correction_bias", torch.zeros((self.n_routed_experts), dtype=torch.float32))
| Glm4MoeTopkRouter |
python | pytorch__pytorch | tools/linter/adapters/_linter/__init__.py | {
"start": 741,
"end": 1024
} | class ____(ValueError):
def __init__(self, token: TokenInfo, *args: str) -> None:
super().__init__(*args)
self.token = token
from .block import Block
from .file_linter import FileLinter
from .messages import LintResult
from .python_file import PythonFile
| ParseError |
python | python__mypy | mypy/util.py | {
"start": 10023,
"end": 18865
} | class ____:
"""Generate integer ids for objects.
Unlike id(), these start from 0 and increment by 1, and ids won't
get reused across the life-time of IdMapper.
Assume objects don't redefine __eq__ or __hash__.
"""
def __init__(self) -> None:
self.id_map: dict[object, int] = {}
self.next_id = 0
def id(self, o: object) -> int:
if o not in self.id_map:
self.id_map[o] = self.next_id
self.next_id += 1
return self.id_map[o]
def get_prefix(fullname: str) -> str:
"""Drop the final component of a qualified name (e.g. ('x.y' -> 'x')."""
return fullname.rsplit(".", 1)[0]
def correct_relative_import(
cur_mod_id: str, relative: int, target: str, is_cur_package_init_file: bool
) -> tuple[str, bool]:
if relative == 0:
return target, True
parts = cur_mod_id.split(".")
rel = relative
if is_cur_package_init_file:
rel -= 1
ok = len(parts) >= rel
if rel != 0:
cur_mod_id = ".".join(parts[:-rel])
return cur_mod_id + (("." + target) if target else ""), ok
fields_cache: Final[dict[type[object], list[str]]] = {}
def get_class_descriptors(cls: type[object]) -> Sequence[str]:
import inspect # Lazy import for minor startup speed win
# Maintain a cache of type -> attributes defined by descriptors in the class
# (that is, attributes from __slots__ and C extension classes)
if cls not in fields_cache:
members = inspect.getmembers(
cls, lambda o: inspect.isgetsetdescriptor(o) or inspect.ismemberdescriptor(o)
)
fields_cache[cls] = [x for x, y in members if x != "__weakref__" and x != "__dict__"]
return fields_cache[cls]
def replace_object_state(
new: object, old: object, copy_dict: bool = False, skip_slots: tuple[str, ...] = ()
) -> None:
"""Copy state of old node to the new node.
This handles cases where there is __dict__ and/or attribute descriptors
(either from slots or because the type is defined in a C extension module).
Assume that both objects have the same __class__.
"""
if hasattr(old, "__dict__"):
if copy_dict:
new.__dict__ = dict(old.__dict__)
else:
new.__dict__ = old.__dict__
for attr in get_class_descriptors(old.__class__):
if attr in skip_slots:
continue
try:
if hasattr(old, attr):
setattr(new, attr, getattr(old, attr))
elif hasattr(new, attr):
delattr(new, attr)
# There is no way to distinguish getsetdescriptors that allow
# writes from ones that don't (I think?), so we just ignore
# AttributeErrors if we need to.
# TODO: What about getsetdescriptors that act like properties???
except AttributeError:
pass
def is_sub_path_normabs(path: str, dir: str) -> bool:
"""Given two paths, return if path is a sub-path of dir.
Moral equivalent of: Path(dir) in Path(path).parents
Similar to the pathlib version:
- Treats paths case-sensitively
- Does not fully handle unnormalised paths (e.g. paths with "..")
- Does not handle a mix of absolute and relative paths
Unlike the pathlib version:
- Fast
- On Windows, assumes input has been slash normalised
- Handles even fewer unnormalised paths (e.g. paths with "." and "//")
As a result, callers should ensure that inputs have had os.path.abspath called on them
(note that os.path.abspath will normalise)
"""
if not dir.endswith(os.sep):
dir += os.sep
return path.startswith(dir)
if sys.platform == "linux" or sys.platform == "darwin":
def os_path_join(path: str, b: str) -> str:
# Based off of os.path.join, but simplified to str-only, 2 args and mypyc can compile it.
if b.startswith("/") or not path:
return b
elif path.endswith("/"):
return path + b
else:
return path + "/" + b
else:
def os_path_join(a: str, p: str) -> str:
return os.path.join(a, p)
def hard_exit(status: int = 0) -> None:
"""Kill the current process without fully cleaning up.
This can be quite a bit faster than a normal exit() since objects are not freed.
"""
sys.stdout.flush()
sys.stderr.flush()
os._exit(status)
def unmangle(name: str) -> str:
"""Remove internal suffixes from a short name."""
return name.rstrip("'")
def get_unique_redefinition_name(name: str, existing: Container[str]) -> str:
"""Get a simple redefinition name not present among existing.
For example, for name 'foo' we try 'foo-redefinition', 'foo-redefinition2',
'foo-redefinition3', etc. until we find one that is not in existing.
"""
r_name = name + "-redefinition"
if r_name not in existing:
return r_name
i = 2
while r_name + str(i) in existing:
i += 1
return r_name + str(i)
def check_python_version(program: str) -> None:
"""Report issues with the Python used to run mypy, dmypy, or stubgen"""
# Check for known bad Python versions.
if sys.version_info[:2] < (3, 10): # noqa: UP036, RUF100
sys.exit(
"Running {name} with Python 3.9 or lower is not supported; "
"please upgrade to 3.10 or newer".format(name=program)
)
def count_stats(messages: list[str]) -> tuple[int, int, int]:
"""Count total number of errors, notes and error_files in message list."""
errors = [e for e in messages if ": error:" in e]
error_files = {e.split(":")[0] for e in errors}
notes = [e for e in messages if ": note:" in e]
return len(errors), len(notes), len(error_files)
def split_words(msg: str) -> list[str]:
"""Split line of text into words (but not within quoted groups)."""
next_word = ""
res: list[str] = []
allow_break = True
for c in msg:
if c == " " and allow_break:
res.append(next_word)
next_word = ""
continue
if c == '"':
allow_break = not allow_break
next_word += c
res.append(next_word)
return res
def get_terminal_width() -> int:
"""Get current terminal width if possible, otherwise return the default one."""
return (
int(os.getenv("MYPY_FORCE_TERMINAL_WIDTH", "0"))
or shutil.get_terminal_size().columns
or DEFAULT_COLUMNS
)
def soft_wrap(msg: str, max_len: int, first_offset: int, num_indent: int = 0) -> str:
"""Wrap a long error message into few lines.
Breaks will only happen between words, and never inside a quoted group
(to avoid breaking types such as "Union[int, str]"). The 'first_offset' is
the width before the start of first line.
Pad every next line with 'num_indent' spaces. Every line will be at most 'max_len'
characters, except if it is a single word or quoted group.
For example:
first_offset
------------------------
path/to/file: error: 58: Some very long error message
that needs to be split in separate lines.
"Long[Type, Names]" are never split.
^^^^--------------------------------------------------
num_indent max_len
"""
words = split_words(msg)
next_line = words.pop(0)
lines: list[str] = []
while words:
next_word = words.pop(0)
max_line_len = max_len - num_indent if lines else max_len - first_offset
# Add 1 to account for space between words.
if len(next_line) + len(next_word) + 1 <= max_line_len:
next_line += " " + next_word
else:
lines.append(next_line)
next_line = next_word
lines.append(next_line)
padding = "\n" + " " * num_indent
return padding.join(lines)
def hash_digest(data: bytes) -> str:
"""Compute a hash digest of some data.
We use a cryptographic hash because we want a low probability of
accidental collision, but we don't really care about any of the
cryptographic properties.
"""
return hashlib.sha1(data).hexdigest()
def hash_digest_bytes(data: bytes) -> bytes:
"""Compute a hash digest of some data.
Similar to above but returns a bytes object.
"""
return hashlib.sha1(data).digest()
def parse_gray_color(cup: bytes) -> str:
"""Reproduce a gray color in ANSI escape sequence"""
assert sys.platform != "win32", "curses is not available on Windows"
set_color = "".join([cup[:-1].decode(), "m"])
gray = curses.tparm(set_color.encode("utf-8"), 1, 9).decode()
return gray
def should_force_color() -> bool:
env_var = os.getenv("MYPY_FORCE_COLOR", os.getenv("FORCE_COLOR", "0"))
try:
return bool(int(env_var))
except ValueError:
return bool(env_var)
| IdMapper |
python | dagster-io__dagster | python_modules/libraries/dagster-k8s/dagster_k8s/client.py | {
"start": 8627,
"end": 8969
} | class ____:
PodInitializing = "PodInitializing"
ContainerCreating = "ContainerCreating"
ErrImagePull = "ErrImagePull"
ImagePullBackOff = "ImagePullBackOff"
CrashLoopBackOff = "CrashLoopBackOff"
RunContainerError = "RunContainerError"
CreateContainerConfigError = "CreateContainerConfigError"
| KubernetesWaitingReasons |
python | keon__algorithms | tests/test_strings.py | {
"start": 18159,
"end": 18402
} | class ____(unittest.TestCase):
def test_repeat_substring(self):
self.assertTrue(repeat_substring("abab"))
self.assertFalse(repeat_substring("aba"))
self.assertTrue(repeat_substring("abcabcabcabc"))
| TestRepeatSubstring |
python | ray-project__ray | python/ray/serve/tests/unit/test_autoscaling_policy.py | {
"start": 9646,
"end": 33629
} | class ____:
@pytest.mark.parametrize(
"use_upscale_smoothing_factor,use_upscaling_factor",
[(True, True), (True, False), (False, True)],
)
def test_scaling_factor_scale_up_from_0_replicas(
self, use_upscale_smoothing_factor, use_upscaling_factor
):
"""Test that the scaling factor is respected when scaling up
from 0 replicas.
"""
min_replicas = 0
max_replicas = 2
config = AutoscalingConfig(
min_replicas=min_replicas,
max_replicas=max_replicas,
upscale_smoothing_factor=10 if use_upscale_smoothing_factor else None,
upscaling_factor=10 if use_upscaling_factor else None,
)
ctx = AutoscalingContext(
target_num_replicas=0,
total_num_requests=1,
current_num_replicas=0,
config=config,
capacity_adjusted_min_replicas=min_replicas,
capacity_adjusted_max_replicas=max_replicas,
policy_state={},
deployment_id=None,
deployment_name=None,
app_name=None,
running_replicas=None,
current_time=None,
total_queued_requests=None,
aggregated_metrics=None,
raw_metrics=None,
last_scale_up_time=None,
last_scale_down_time=None,
)
new_num_replicas, _ = replica_queue_length_autoscaling_policy(ctx=ctx)
# 1 * 10
assert new_num_replicas == 10
if use_upscale_smoothing_factor:
config.upscale_smoothing_factor = 0.5
if use_upscaling_factor:
config.upscaling_factor = 0.5
new_num_replicas, _ = replica_queue_length_autoscaling_policy(ctx=ctx)
# math.ceil(1 * 0.5)
assert new_num_replicas == 1
@pytest.mark.parametrize(
"use_downscale_smoothing_factor,use_downscaling_factor",
[(True, True), (True, False), (False, True)],
)
def test_scaling_factor_scale_down_to_0_replicas(
self, use_downscale_smoothing_factor, use_downscaling_factor
):
"""Test that a deployment scales down to 0 for non-default smoothing factors."""
# With smoothing factor > 1, the desired number of replicas should
# immediately drop to 0 (while respecting upscale and downscale delay)
min_replicas = 0
max_replicas = 5
policy_state = {}
config = AutoscalingConfig(
min_replicas=min_replicas,
max_replicas=max_replicas,
downscale_smoothing_factor=10 if use_downscale_smoothing_factor else None,
downscaling_factor=10 if use_downscaling_factor else None,
upscale_delay_s=0,
downscale_delay_s=0,
)
ctx = AutoscalingContext(
config=config,
total_num_requests=0,
current_num_replicas=5,
target_num_replicas=5,
capacity_adjusted_min_replicas=min_replicas,
capacity_adjusted_max_replicas=max_replicas,
policy_state=policy_state,
deployment_id=None,
deployment_name=None,
app_name=None,
running_replicas=None,
current_time=None,
total_queued_requests=None,
aggregated_metrics=None,
raw_metrics=None,
last_scale_up_time=None,
last_scale_down_time=None,
)
new_num_replicas, _ = replica_queue_length_autoscaling_policy(ctx=ctx)
# Downscaling to 0 first stops at 1
assert new_num_replicas == 1
# Need to trigger this the second time to go to zero
ctx.target_num_replicas = 1
ctx.current_num_replicas = 1
new_num_replicas, _ = replica_queue_length_autoscaling_policy(ctx=ctx)
assert new_num_replicas == 0
# With smoothing factor < 1, the desired number of replicas shouldn't
# get stuck at a positive number, and instead should eventually drop
# to zero
if use_downscale_smoothing_factor:
config.downscale_smoothing_factor = 0.2
if use_downscaling_factor:
config.downscaling_factor = 0.2
# policy_manager = AutoscalingPolicyManager(config)
num_replicas = 5
for _ in range(5):
ctx = create_context_with_overrides(
ctx,
total_num_requests=0,
current_num_replicas=num_replicas,
target_num_replicas=num_replicas,
)
num_replicas, _ = replica_queue_length_autoscaling_policy(ctx=ctx)
assert num_replicas == 0
@pytest.mark.parametrize("downscale_to_zero_delay_s", [None, 300])
def test_upscale_downscale_delay(self, downscale_to_zero_delay_s):
"""Unit test for upscale_delay_s, downscale_delay_s and downscale_to_zero_delay_s"""
upscale_delay_s = 30.0
downscale_delay_s = 600.0
min_replicas = 0
max_replicas = 2
policy_state = {}
config = AutoscalingConfig(
min_replicas=min_replicas,
max_replicas=max_replicas,
target_ongoing_requests=1,
upscale_delay_s=30.0,
downscale_delay_s=600.0,
downscale_to_zero_delay_s=downscale_to_zero_delay_s,
)
upscale_wait_periods = int(upscale_delay_s / CONTROL_LOOP_INTERVAL_S)
downscale_wait_periods = int(downscale_delay_s / CONTROL_LOOP_INTERVAL_S)
# Check if downscale_to_zero_delay_s is set
if downscale_to_zero_delay_s:
downscale_to_zero_wait_periods = int(
downscale_to_zero_delay_s / CONTROL_LOOP_INTERVAL_S
)
else:
downscale_to_zero_wait_periods = int(
downscale_delay_s / CONTROL_LOOP_INTERVAL_S
)
overload_requests = 100
ctx = AutoscalingContext(
config=config,
total_num_requests=1,
current_num_replicas=0,
target_num_replicas=0,
capacity_adjusted_min_replicas=min_replicas,
capacity_adjusted_max_replicas=max_replicas,
policy_state=policy_state,
deployment_id=None,
deployment_name=None,
app_name=None,
running_replicas=None,
current_time=None,
total_queued_requests=None,
aggregated_metrics=None,
raw_metrics=None,
last_scale_up_time=None,
last_scale_down_time=None,
)
# Scale up when there are 0 replicas and current_handle_queued_queries > 0
new_num_replicas, _ = replica_queue_length_autoscaling_policy(ctx=ctx)
assert new_num_replicas == 1
# We should scale up only after enough consecutive scale-up decisions.
for i in range(upscale_wait_periods):
ctx = create_context_with_overrides(
ctx,
total_num_requests=overload_requests,
current_num_replicas=1,
target_num_replicas=1,
)
new_num_replicas, _ = replica_queue_length_autoscaling_policy(ctx=ctx)
assert new_num_replicas == 1, i
ctx = create_context_with_overrides(
ctx,
total_num_requests=overload_requests,
current_num_replicas=1,
target_num_replicas=1,
)
new_num_replicas, _ = replica_queue_length_autoscaling_policy(ctx=ctx)
assert new_num_replicas == 2
no_requests = 0
# We should scale down only after enough consecutive scale-down decisions.
# Downscaling to zero follows current_num_replicas->1->0
for i in range(downscale_wait_periods):
ctx = create_context_with_overrides(
ctx,
total_num_requests=no_requests,
current_num_replicas=2,
target_num_replicas=2,
)
new_num_replicas, _ = replica_queue_length_autoscaling_policy(ctx=ctx)
assert new_num_replicas == 2, i
ctx = create_context_with_overrides(
ctx,
total_num_requests=no_requests,
current_num_replicas=2,
target_num_replicas=2,
)
new_num_replicas, _ = replica_queue_length_autoscaling_policy(ctx=ctx)
assert new_num_replicas == 1
# We should scale down to zero only after enough consecutive downscale-to-zero decisions.
for i in range(downscale_to_zero_wait_periods):
ctx = create_context_with_overrides(
ctx,
total_num_requests=no_requests,
current_num_replicas=1,
target_num_replicas=1,
)
new_num_replicas, _ = replica_queue_length_autoscaling_policy(ctx=ctx)
assert new_num_replicas == 1, i
ctx = create_context_with_overrides(
ctx,
total_num_requests=no_requests,
current_num_replicas=1,
target_num_replicas=1,
)
new_num_replicas, _ = replica_queue_length_autoscaling_policy(ctx=ctx)
assert new_num_replicas == 0
# Get some scale-up decisions, but not enough to trigger a scale up.
for i in range(int(upscale_wait_periods / 2)):
ctx = create_context_with_overrides(
ctx,
total_num_requests=overload_requests,
current_num_replicas=1,
target_num_replicas=1,
)
new_num_replicas, _ = replica_queue_length_autoscaling_policy(ctx=ctx)
assert new_num_replicas == 1, i
# Interrupt with a scale-down decision.
ctx = create_context_with_overrides(
ctx,
total_num_requests=0,
current_num_replicas=1,
target_num_replicas=1,
)
replica_queue_length_autoscaling_policy(ctx=ctx)
# The counter should be reset, so it should require `upscale_wait_periods`
# more periods before we actually scale up.
for i in range(upscale_wait_periods):
ctx = create_context_with_overrides(
ctx,
total_num_requests=overload_requests,
current_num_replicas=1,
target_num_replicas=1,
)
new_num_replicas, _ = replica_queue_length_autoscaling_policy(ctx=ctx)
assert new_num_replicas == 1, i
ctx = create_context_with_overrides(
ctx,
total_num_requests=overload_requests,
current_num_replicas=1,
target_num_replicas=1,
)
new_num_replicas, _ = replica_queue_length_autoscaling_policy(ctx=ctx)
assert new_num_replicas == 2
# Get some scale-down decisions, but not enough to trigger a scale down.
for i in range(int(downscale_wait_periods / 2)):
ctx = create_context_with_overrides(
ctx,
total_num_requests=no_requests,
current_num_replicas=2,
target_num_replicas=2,
)
new_num_replicas, _ = replica_queue_length_autoscaling_policy(ctx=ctx)
assert new_num_replicas == 2, i
# Interrupt with a scale-up decision.
ctx = create_context_with_overrides(
ctx,
total_num_requests=200,
current_num_replicas=2,
target_num_replicas=2,
)
replica_queue_length_autoscaling_policy(ctx=ctx)
# The counter should be reset so it should require `downscale_wait_periods`
# more periods before we actually scale down.
# We should scale down only after enough consecutive scale-down decisions.
for i in range(downscale_wait_periods):
ctx = create_context_with_overrides(
ctx,
total_num_requests=no_requests,
current_num_replicas=2,
target_num_replicas=2,
)
new_num_replicas, _ = replica_queue_length_autoscaling_policy(ctx=ctx)
assert new_num_replicas == 2, i
# First scale down to 1 replica
ctx = create_context_with_overrides(
ctx,
total_num_requests=no_requests,
current_num_replicas=2,
target_num_replicas=2,
)
new_num_replicas, _ = replica_queue_length_autoscaling_policy(ctx=ctx)
assert new_num_replicas == 1
# Scale down to 0, but not enough to trigger a complete scale down to zero.
for i in range(int(downscale_to_zero_wait_periods / 2)):
ctx = create_context_with_overrides(
ctx,
total_num_requests=no_requests,
current_num_replicas=1,
target_num_replicas=1,
)
new_num_replicas, _ = replica_queue_length_autoscaling_policy(ctx=ctx)
assert new_num_replicas == 1, i
# Interrupt with a scale-up decision.
ctx = create_context_with_overrides(
ctx,
total_num_requests=100,
current_num_replicas=1,
target_num_replicas=1,
)
replica_queue_length_autoscaling_policy(ctx=ctx)
# The counter should be reset so it should require `downscale_to_zero_wait_periods`
# more periods before we actually scale down.
for i in range(downscale_to_zero_wait_periods):
ctx = create_context_with_overrides(
ctx,
total_num_requests=no_requests,
current_num_replicas=1,
target_num_replicas=1,
)
new_num_replicas, v = replica_queue_length_autoscaling_policy(ctx=ctx)
# print(new_num_replicas, v)
assert new_num_replicas == 1, i
ctx = create_context_with_overrides(
ctx,
total_num_requests=no_requests,
current_num_replicas=1,
target_num_replicas=1,
)
new_num_replicas, _ = replica_queue_length_autoscaling_policy(ctx=ctx)
assert new_num_replicas == 0
def test_replicas_delayed_startup(self):
"""Unit test simulating replicas taking time to start up."""
min_replicas = 1
max_replicas = 200
policy_state = {}
config = {
"min_replicas": min_replicas,
"max_replicas": max_replicas,
"upscale_delay_s": 0,
"downscale_delay_s": 100000,
"target_ongoing_requests": 1,
}
config = AutoscalingConfig(**config)
ctx = AutoscalingContext(
config=config,
target_num_replicas=1,
total_num_requests=100,
current_num_replicas=1,
capacity_adjusted_min_replicas=min_replicas,
capacity_adjusted_max_replicas=max_replicas,
policy_state=policy_state,
deployment_id=None,
deployment_name=None,
app_name=None,
running_replicas=None,
current_time=None,
total_queued_requests=None,
aggregated_metrics=None,
raw_metrics=None,
last_scale_up_time=None,
last_scale_down_time=None,
)
# new_num_replicas = policy_manager.get_decision_num_replicas(1, 100, 1)
new_num_replicas, _ = replica_queue_length_autoscaling_policy(ctx=ctx)
assert new_num_replicas == 100
# New target is 100, but no new replicas finished spinning up during this
# timestep.
ctx = create_context_with_overrides(
ctx,
total_num_requests=100,
current_num_replicas=1,
target_num_replicas=100,
)
new_num_replicas, _ = replica_queue_length_autoscaling_policy(ctx=ctx)
assert new_num_replicas == 100
# Two new replicas spun up during this timestep.
ctx = create_context_with_overrides(
ctx,
total_num_requests=123,
current_num_replicas=3,
target_num_replicas=100,
)
new_num_replicas, _ = replica_queue_length_autoscaling_policy(ctx=ctx)
assert new_num_replicas == 123
# A lot of queries got drained and a lot of replicas started up, but
# new_num_replicas should not decrease, because of the downscale delay.
ctx = create_context_with_overrides(
ctx,
total_num_requests=10,
current_num_replicas=4,
target_num_replicas=123,
)
new_num_replicas, _ = replica_queue_length_autoscaling_policy(ctx=ctx)
assert new_num_replicas == 123
@pytest.mark.parametrize("delay_s", [30.0, 0.0])
def test_fluctuating_ongoing_requests(self, delay_s):
"""
Simulates a workload that switches between too many and too few
ongoing requests.
"""
min_replicas = 1
max_replicas = 10
policy_state = {}
config = {
"min_replicas": min_replicas,
"max_replicas": max_replicas,
"upscale_delay_s": delay_s,
"downscale_delay_s": delay_s,
"target_ongoing_requests": 50,
}
config = AutoscalingConfig(**config)
if delay_s > 0:
wait_periods = int(delay_s / CONTROL_LOOP_INTERVAL_S)
assert wait_periods > 1
underload_requests, overload_requests = 2 * 20, 100
trials = 1000
ctx = AutoscalingContext(
config=config,
capacity_adjusted_min_replicas=min_replicas,
capacity_adjusted_max_replicas=max_replicas,
policy_state=policy_state,
target_num_replicas=None,
total_num_requests=None,
current_num_replicas=None,
deployment_id=None,
deployment_name=None,
app_name=None,
running_replicas=None,
current_time=None,
total_queued_requests=None,
aggregated_metrics=None,
raw_metrics=None,
last_scale_up_time=None,
last_scale_down_time=None,
)
new_num_replicas = None
for trial in range(trials):
if trial % 2 == 0:
ctx = create_context_with_overrides(
ctx,
total_num_requests=overload_requests,
current_num_replicas=1,
target_num_replicas=1,
)
new_num_replicas, _ = replica_queue_length_autoscaling_policy(ctx=ctx)
if delay_s > 0:
assert new_num_replicas == 1, trial
else:
assert new_num_replicas == 2, trial
else:
ctx = create_context_with_overrides(
ctx,
total_num_requests=underload_requests,
current_num_replicas=2,
target_num_replicas=2,
)
new_num_replicas, _ = replica_queue_length_autoscaling_policy(ctx=ctx)
if delay_s > 0:
assert new_num_replicas == 2, trial
else:
assert new_num_replicas == 1, trial
@pytest.mark.parametrize("ongoing_requests", [20, 100, 10])
def test_single_replica_receives_all_requests(self, ongoing_requests):
target_requests = 5
min_replicas = 1
max_replicas = 50
policy_state = {}
config = AutoscalingConfig(
min_replicas=min_replicas,
max_replicas=max_replicas,
target_ongoing_requests=target_requests,
upscale_delay_s=0.0,
downscale_delay_s=0.0,
)
ctx = AutoscalingContext(
config=config,
total_num_requests=ongoing_requests,
current_num_replicas=4,
target_num_replicas=4,
capacity_adjusted_min_replicas=min_replicas,
capacity_adjusted_max_replicas=max_replicas,
policy_state=policy_state,
deployment_id=None,
deployment_name=None,
app_name=None,
running_replicas=None,
current_time=None,
total_queued_requests=None,
aggregated_metrics=None,
raw_metrics=None,
last_scale_up_time=None,
last_scale_down_time=None,
)
new_num_replicas, _ = replica_queue_length_autoscaling_policy(ctx=ctx)
assert new_num_replicas == ongoing_requests / target_requests
def test_callable_and_direct_values(self):
config = AutoscalingConfig(min_replicas=1, max_replicas=10)
deployment_id = DeploymentID(name="test", app_name="test_app")
replica_id = ReplicaID(unique_id="r1", deployment_id=deployment_id)
# Test callables with lazy evaluation and caching
call_counts = {"requests": 0, "queued": 0, "agg": 0, "raw": 0}
ctx = AutoscalingContext(
config=config,
deployment_id=None,
deployment_name="test",
app_name=None,
current_num_replicas=5,
target_num_replicas=5,
running_replicas=[],
total_num_requests=lambda: (
call_counts.update({"requests": call_counts["requests"] + 1}),
42.0,
)[1],
total_queued_requests=lambda: (
call_counts.update({"queued": call_counts["queued"] + 1}),
10.0,
)[1],
aggregated_metrics=lambda: (
call_counts.update({"agg": call_counts["agg"] + 1}),
{"m": {replica_id: 5.0}},
)[1],
raw_metrics=lambda: (
call_counts.update({"raw": call_counts["raw"] + 1}),
{"m": {replica_id: [TimeStampedValue(1.0, 5.0)]}},
)[1],
capacity_adjusted_min_replicas=1,
capacity_adjusted_max_replicas=10,
policy_state={},
last_scale_up_time=None,
last_scale_down_time=None,
current_time=None,
)
# Callables not executed until accessed
assert all(c == 0 for c in call_counts.values())
# First access executes callables
assert ctx.total_num_requests == 42.0
assert ctx.total_queued_requests == 10.0
assert ctx.aggregated_metrics == {"m": {replica_id: 5.0}}
assert ctx.raw_metrics["m"][replica_id][0].value == 5.0
assert all(c == 1 for c in call_counts.values())
# Second access uses cached values
_ = ctx.total_num_requests
_ = ctx.total_queued_requests
_ = ctx.aggregated_metrics
_ = ctx.raw_metrics
assert all(c == 1 for c in call_counts.values())
# Test direct values (non-callable)
ctx2 = AutoscalingContext(
config=config,
deployment_id=None,
deployment_name="test",
app_name=None,
current_num_replicas=5,
target_num_replicas=5,
running_replicas=[],
total_num_requests=100.0,
total_queued_requests=20.0,
aggregated_metrics={"m2": {replica_id: 15.0}},
raw_metrics={"m2": {replica_id: [TimeStampedValue(2.0, 25.0)]}},
capacity_adjusted_min_replicas=1,
capacity_adjusted_max_replicas=10,
policy_state={},
last_scale_up_time=None,
last_scale_down_time=None,
current_time=None,
)
assert ctx2.total_num_requests == 100.0
assert ctx2.total_queued_requests == 20.0
assert ctx2.aggregated_metrics == {"m2": {replica_id: 15.0}}
assert ctx2.raw_metrics["m2"][replica_id][0].value == 25.0
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
| TestReplicaQueueLengthPolicy |
python | PrefectHQ__prefect | tests/utilities/schema_tools/test_hydration.py | {
"start": 9199,
"end": 11819
} | class ____:
@pytest.mark.parametrize(
"input_object, expected_output, ctx",
[
# Cases where __prefect_kind is "workspace_variable",
# but "variable_name"" is missing
(
{"param": {"__prefect_kind": "workspace_variable"}},
{},
HydrationContext(),
),
({"__prefect_kind": "workspace_variable"}, {}, HydrationContext()),
# variable not found in context and we don't render it
# we just assume it's fine
(
{
"param": {
"__prefect_kind": "workspace_variable",
"variable_name": "my-var",
}
},
{"param": WorkspaceVariable("my-var")},
HydrationContext(render_workspace_variables=False),
),
# variable exists in context and we don't render it
#
(
{
"param": {
"__prefect_kind": "workspace_variable",
"variable_name": "my-var",
}
},
{"param": WorkspaceVariable("my-var")},
HydrationContext(
workspace_variables={"my-var": "my-value"},
render_workspace_variables=False,
),
),
# variable not found in context and we render it
(
{
"param": {
"__prefect_kind": "workspace_variable",
"variable_name": "my-var",
}
},
{"param": WorkspaceVariableNotFound("my-var")},
HydrationContext(render_workspace_variables=True),
),
# variable exists in context and we render it
(
{
"param": {
"__prefect_kind": "workspace_variable",
"variable_name": "my-var",
}
},
{"param": "my-value"},
HydrationContext(
workspace_variables={"my-var": "my-value"},
render_workspace_variables=True,
),
),
],
)
def test_hydrate_with_null_prefect_kind(self, input_object, expected_output, ctx):
hydrated_value = hydrate(input_object, ctx)
assert hydrated_value == expected_output
| TestHydrateWithWorkspaceVariablePrefectKind |
python | apache__airflow | task-sdk/src/airflow/sdk/api/client.py | {
"start": 24328,
"end": 27298
} | class ____:
__slots__ = ("client",)
def __init__(self, client: Client):
self.client = client
def trigger(
self,
dag_id: str,
run_id: str,
conf: dict | None = None,
logical_date: datetime | None = None,
reset_dag_run: bool = False,
) -> OKResponse | ErrorResponse:
"""Trigger a Dag run via the API server."""
body = TriggerDAGRunPayload(logical_date=logical_date, conf=conf or {}, reset_dag_run=reset_dag_run)
try:
self.client.post(
f"dag-runs/{dag_id}/{run_id}", content=body.model_dump_json(exclude_defaults=True)
)
except ServerResponseError as e:
if e.response.status_code == HTTPStatus.CONFLICT:
if reset_dag_run:
log.info("Dag Run already exists; Resetting Dag Run.", dag_id=dag_id, run_id=run_id)
return self.clear(run_id=run_id, dag_id=dag_id)
log.info("Dag Run already exists!", detail=e.detail, dag_id=dag_id, run_id=run_id)
return ErrorResponse(error=ErrorType.DAGRUN_ALREADY_EXISTS)
raise
return OKResponse(ok=True)
def clear(self, dag_id: str, run_id: str) -> OKResponse:
"""Clear a Dag run via the API server."""
self.client.post(f"dag-runs/{dag_id}/{run_id}/clear")
# TODO: Error handling
return OKResponse(ok=True)
def get_state(self, dag_id: str, run_id: str) -> DagRunStateResponse:
"""Get the state of a Dag run via the API server."""
resp = self.client.get(f"dag-runs/{dag_id}/{run_id}/state")
return DagRunStateResponse.model_validate_json(resp.read())
def get_count(
self,
dag_id: str,
logical_dates: list[datetime] | None = None,
run_ids: list[str] | None = None,
states: list[str] | None = None,
) -> DRCount:
"""Get count of Dag runs matching the given criteria."""
params = {
"dag_id": dag_id,
"logical_dates": [d.isoformat() for d in logical_dates] if logical_dates is not None else None,
"run_ids": run_ids,
"states": states,
}
# Remove None values from params
params = {k: v for k, v in params.items() if v is not None}
resp = self.client.get("dag-runs/count", params=params)
return DRCount(count=resp.json())
def get_previous(
self,
dag_id: str,
logical_date: datetime,
state: str | None = None,
) -> PreviousDagRunResult:
"""Get the previous DAG run before the given logical date, optionally filtered by state."""
params = {
"logical_date": logical_date.isoformat(),
}
if state:
params["state"] = state
resp = self.client.get(f"dag-runs/{dag_id}/previous", params=params)
return PreviousDagRunResult(dag_run=resp.json())
| DagRunOperations |
python | PyCQA__pylint | tests/functional/ext/docparams/parameter/missing_param_doc_required_no_doc_rgx_check_init.py | {
"start": 369,
"end": 609
} | class ____:
def __init__(self, my_param: int) -> None: # [missing-param-doc]
"""
My init docstring
"""
def _private_method(self, my_param: int) -> None:
"""
My private method
"""
| MyClass |
python | numpy__numpy | numpy/_core/tests/test_function_base.py | {
"start": 667,
"end": 1471
} | class ____(float):
def __new__(cls, value):
return float.__new__(cls, value)
def __add__(self, x):
assert_(isinstance(x, PhysicalQuantity))
return PhysicalQuantity(float(x) + float(self))
__radd__ = __add__
def __sub__(self, x):
assert_(isinstance(x, PhysicalQuantity))
return PhysicalQuantity(float(self) - float(x))
def __rsub__(self, x):
assert_(isinstance(x, PhysicalQuantity))
return PhysicalQuantity(float(x) - float(self))
def __mul__(self, x):
return PhysicalQuantity(float(x) * float(self))
__rmul__ = __mul__
def __truediv__(self, x):
return PhysicalQuantity(float(self) / float(x))
def __rtruediv__(self, x):
return PhysicalQuantity(float(x) / float(self))
| PhysicalQuantity |
python | Lightning-AI__lightning | tests/tests_pytorch/callbacks/test_finetuning_callback.py | {
"start": 1108,
"end": 2900
} | class ____(BackboneFinetuning):
def on_train_epoch_start(self, trainer, pl_module):
super().on_train_epoch_start(trainer, pl_module)
epoch = trainer.current_epoch
if self.unfreeze_backbone_at_epoch <= epoch:
optimizer = trainer.optimizers[0]
current_lr = optimizer.param_groups[0]["lr"]
backbone_lr = self.previous_backbone_lr
if epoch < 6:
assert backbone_lr <= current_lr
else:
assert backbone_lr == current_lr
def test_finetuning_callback(tmp_path):
"""Test finetuning callbacks works as expected."""
seed_everything(42)
class FinetuningBoringModel(BoringModel):
def __init__(self):
super().__init__()
self.backbone = nn.Sequential(nn.Linear(32, 32, bias=False), nn.BatchNorm1d(32), nn.ReLU())
self.layer = torch.nn.Linear(32, 2)
self.backbone.has_been_used = False
def forward(self, x):
self.backbone.has_been_used = True
x = self.backbone(x)
return self.layer(x)
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.7)
return [optimizer], [lr_scheduler]
def train_dataloader(self):
return DataLoader(RandomDataset(32, 64), batch_size=2)
model = FinetuningBoringModel()
callback = TestBackboneFinetuningCallback(unfreeze_backbone_at_epoch=3, verbose=False)
trainer = Trainer(limit_train_batches=4, default_root_dir=tmp_path, callbacks=[callback], max_epochs=8)
trainer.fit(model)
assert model.backbone.has_been_used
| TestBackboneFinetuningCallback |
python | prabhupant__python-ds | data_structures/binary_trees/diameter.py | {
"start": 184,
"end": 694
} | class ____:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def height(root, ans):
if not root:
return 0
lheight = height(root.left, ans)
rheight = height(root.right, ans)
ans[0] = max(ans[0], 1 + lheight + rheight) # This is for diameter
return 1 + max(lheight, rheight) # This is for height
def diameter(root):
if not root:
return 0
ans = [-9999999999]
h = height(root, ans)
return ans[0]
| Node |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 246879,
"end": 247564
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of LinkProjectV2ToRepository"""
__schema__ = github_schema
__field_names__ = ("project_id", "repository_id", "client_mutation_id")
project_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="projectId")
"""The ID of the project to link to the repository."""
repository_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="repositoryId")
"""The ID of the repository to link to the project."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| LinkProjectV2ToRepositoryInput |
python | walkccc__LeetCode | solutions/376. Wiggle Subsequence/376.py | {
"start": 0,
"end": 296
} | class ____:
def wiggleMaxLength(self, nums: list[int]) -> int:
increasing = 1
decreasing = 1
for a, b in itertools.pairwise(nums):
if b > a:
increasing = decreasing + 1
elif b < a:
decreasing = increasing + 1
return max(increasing, decreasing)
| Solution |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 412709,
"end": 412886
} | class ____(Field):
"""FieldName schema wrapper."""
_schema = {"$ref": "#/definitions/FieldName"}
def __init__(self, *args):
super().__init__(*args)
| FieldName |
python | pandas-dev__pandas | pandas/tests/frame/methods/test_to_csv.py | {
"start": 410,
"end": 52055
} | class ____:
def read_csv(self, path, **kwargs):
params = {"index_col": 0}
params.update(**kwargs)
return read_csv(path, **params)
def test_to_csv_from_csv1(self, temp_file, float_frame):
path = str(temp_file)
float_frame.iloc[:5, float_frame.columns.get_loc("A")] = np.nan
float_frame.to_csv(path)
float_frame.to_csv(path, columns=["A", "B"])
float_frame.to_csv(path, header=False)
float_frame.to_csv(path, index=False)
def test_to_csv_from_csv1_datetime(self, temp_file, datetime_frame):
path = str(temp_file)
# test roundtrip
# freq does not roundtrip
datetime_frame.index = datetime_frame.index._with_freq(None)
datetime_frame.to_csv(path)
recons = self.read_csv(path, parse_dates=True)
expected = datetime_frame.copy()
expected.index = expected.index.as_unit("us")
tm.assert_frame_equal(expected, recons)
datetime_frame.to_csv(path, index_label="index")
recons = self.read_csv(path, index_col=None, parse_dates=True)
assert len(recons.columns) == len(datetime_frame.columns) + 1
# no index
datetime_frame.to_csv(path, index=False)
recons = self.read_csv(path, index_col=None, parse_dates=True)
tm.assert_almost_equal(datetime_frame.values, recons.values)
def test_to_csv_from_csv1_corner_case(self, temp_file):
path = str(temp_file)
dm = DataFrame(
{
"s1": Series(range(3), index=np.arange(3, dtype=np.int64)),
"s2": Series(range(2), index=np.arange(2, dtype=np.int64)),
}
)
dm.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(dm, recons)
def test_to_csv_from_csv2(self, temp_file, float_frame):
path = str(temp_file)
# duplicate index
df = DataFrame(
np.random.default_rng(2).standard_normal((3, 3)),
index=["a", "a", "b"],
columns=["x", "y", "z"],
)
df.to_csv(path)
result = self.read_csv(path)
tm.assert_frame_equal(result, df)
midx = MultiIndex.from_tuples([("A", 1, 2), ("A", 1, 2), ("B", 1, 2)])
df = DataFrame(
np.random.default_rng(2).standard_normal((3, 3)),
index=midx,
columns=["x", "y", "z"],
)
df.to_csv(path)
result = self.read_csv(path, index_col=[0, 1, 2], parse_dates=False)
tm.assert_frame_equal(result, df, check_names=False)
# column aliases
col_aliases = Index(["AA", "X", "Y", "Z"])
float_frame.to_csv(path, header=col_aliases)
rs = self.read_csv(path)
xp = float_frame.copy()
xp.columns = col_aliases
tm.assert_frame_equal(xp, rs)
msg = "Writing 4 cols but got 2 aliases"
with pytest.raises(ValueError, match=msg):
float_frame.to_csv(path, header=["AA", "X"])
def test_to_csv_from_csv3(self, temp_file):
path = str(temp_file)
df1 = DataFrame(np.random.default_rng(2).standard_normal((3, 1)))
df2 = DataFrame(np.random.default_rng(2).standard_normal((3, 1)))
df1.to_csv(path)
df2.to_csv(path, mode="a", header=False)
xp = pd.concat([df1, df2])
rs = read_csv(path, index_col=0)
rs.columns = [int(label) for label in rs.columns]
xp.columns = [int(label) for label in xp.columns]
tm.assert_frame_equal(xp, rs)
def test_to_csv_from_csv4(self, temp_file):
path = str(temp_file)
# GH 10833 (TimedeltaIndex formatting)
dt = pd.Timedelta(seconds=1)
df = DataFrame(
{"dt_data": [i * dt for i in range(3)]},
index=Index([i * dt for i in range(3)], name="dt_index"),
)
df.to_csv(path)
result = read_csv(path, index_col="dt_index")
result.index = pd.to_timedelta(result.index)
result["dt_data"] = pd.to_timedelta(result["dt_data"])
tm.assert_frame_equal(df, result, check_index_type=True)
def test_to_csv_from_csv5(self, temp_file, timezone_frame):
# tz, 8260
path = str(temp_file)
timezone_frame.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=["A"])
converter = (
lambda c: to_datetime(result[c])
.dt.tz_convert("UTC")
.dt.tz_convert(timezone_frame[c].dt.tz)
.dt.as_unit("ns")
)
result["B"] = converter("B")
result["C"] = converter("C")
result["A"] = result["A"].dt.as_unit("ns")
tm.assert_frame_equal(result, timezone_frame)
def test_to_csv_cols_reordering(self, temp_file):
# GH3454
chunksize = 5
N = int(chunksize * 2.5)
df = DataFrame(
np.ones((N, 3)),
index=Index([f"i-{i}" for i in range(N)], name="a"),
columns=Index([f"i-{i}" for i in range(3)], name="a"),
)
cs = df.columns
cols = [cs[2], cs[0]]
path = str(temp_file)
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = read_csv(path, index_col=0)
tm.assert_frame_equal(df[cols], rs_c, check_names=False)
@pytest.mark.parametrize("cols", [None, ["b", "a"]])
def test_to_csv_new_dupe_cols(self, temp_file, cols):
chunksize = 5
N = int(chunksize * 2.5)
# dupe cols
df = DataFrame(
np.ones((N, 3)),
index=Index([f"i-{i}" for i in range(N)], name="a"),
columns=["a", "a", "b"],
)
path = str(temp_file)
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = read_csv(path, index_col=0)
# we wrote them in a different order
# so compare them in that order
if cols is not None:
if df.columns.is_unique:
rs_c.columns = cols
else:
indexer, missing = df.columns.get_indexer_non_unique(cols)
rs_c.columns = df.columns.take(indexer)
for c in cols:
obj_df = df[c]
obj_rs = rs_c[c]
if isinstance(obj_df, Series):
tm.assert_series_equal(obj_df, obj_rs)
else:
tm.assert_frame_equal(obj_df, obj_rs, check_names=False)
# wrote in the same order
else:
rs_c.columns = df.columns
tm.assert_frame_equal(df, rs_c, check_names=False)
@pytest.mark.slow
def test_to_csv_dtnat(self, temp_file):
# GH3437
def make_dtnat_arr(n, nnat=None):
if nnat is None:
nnat = int(n * 0.1) # 10%
s = list(date_range("2000", freq="5min", periods=n))
if nnat:
for i in np.random.default_rng(2).integers(0, len(s), nnat):
s[i] = NaT
i = np.random.default_rng(2).integers(100)
s[-i] = NaT
s[i] = NaT
return s
chunksize = 1000
s1 = make_dtnat_arr(chunksize + 5)
s2 = make_dtnat_arr(chunksize + 5, 0)
path = str(temp_file)
df = DataFrame({"a": s1, "b": s2})
df.to_csv(path, chunksize=chunksize)
result = self.read_csv(path).apply(to_datetime)
expected = df[:]
expected["a"] = expected["a"].astype("M8[us]")
expected["b"] = expected["b"].astype("M8[us]")
tm.assert_frame_equal(result, expected, check_names=False)
def _return_result_expected(
self,
df,
chunksize,
temp_file,
r_dtype=None,
c_dtype=None,
rnlvl=None,
cnlvl=None,
dupe_col=False,
):
kwargs = {"parse_dates": False}
if cnlvl:
if rnlvl is not None:
kwargs["index_col"] = list(range(rnlvl))
kwargs["header"] = list(range(cnlvl))
df.to_csv(temp_file, encoding="utf8", chunksize=chunksize)
recons = self.read_csv(temp_file, **kwargs)
else:
kwargs["header"] = 0
df.to_csv(temp_file, encoding="utf8", chunksize=chunksize)
recons = self.read_csv(temp_file, **kwargs)
def _to_uni(x):
if not isinstance(x, str):
return x.decode("utf8")
return x
if dupe_col:
# read_Csv disambiguates the columns by
# labeling them dupe.1,dupe.2, etc'. monkey patch columns
recons.columns = df.columns
if rnlvl and not cnlvl:
delta_lvl = [recons.iloc[:, i].values for i in range(rnlvl - 1)]
ix = MultiIndex.from_arrays([list(recons.index)] + delta_lvl)
recons.index = ix
recons = recons.iloc[:, rnlvl - 1 :]
type_map = {"i": "i", "f": "f", "s": "O", "u": "O", "dt": "O", "p": "O"}
if r_dtype:
if r_dtype == "u": # unicode
r_dtype = "O"
recons.index = np.array(
[_to_uni(label) for label in recons.index], dtype=r_dtype
)
df.index = np.array(
[_to_uni(label) for label in df.index], dtype=r_dtype
)
elif r_dtype == "dt": # unicode
r_dtype = "O"
recons.index = np.array(
[Timestamp(label) for label in recons.index], dtype=r_dtype
)
df.index = np.array(
[Timestamp(label) for label in df.index], dtype=r_dtype
)
elif r_dtype == "p":
r_dtype = "O"
idx_list = to_datetime(recons.index)
recons.index = np.array(
[Timestamp(label) for label in idx_list], dtype=r_dtype
)
df.index = np.array(
list(map(Timestamp, df.index.to_timestamp())), dtype=r_dtype
)
else:
r_dtype = type_map.get(r_dtype)
recons.index = np.array(recons.index, dtype=r_dtype)
df.index = np.array(df.index, dtype=r_dtype)
if c_dtype:
if c_dtype == "u":
c_dtype = "O"
recons.columns = np.array(
[_to_uni(label) for label in recons.columns], dtype=c_dtype
)
df.columns = np.array(
[_to_uni(label) for label in df.columns], dtype=c_dtype
)
elif c_dtype == "dt":
c_dtype = "O"
recons.columns = np.array(
[Timestamp(label) for label in recons.columns], dtype=c_dtype
)
df.columns = np.array(
[Timestamp(label) for label in df.columns], dtype=c_dtype
)
elif c_dtype == "p":
c_dtype = "O"
col_list = to_datetime(recons.columns)
recons.columns = np.array(
[Timestamp(label) for label in col_list], dtype=c_dtype
)
col_list = df.columns.to_timestamp()
df.columns = np.array(
[Timestamp(label) for label in col_list], dtype=c_dtype
)
else:
c_dtype = type_map.get(c_dtype)
recons.columns = np.array(recons.columns, dtype=c_dtype)
df.columns = np.array(df.columns, dtype=c_dtype)
return df, recons
@pytest.mark.slow
@pytest.mark.parametrize(
"nrows", [2, 10, 99, 100, 101, 102, 198, 199, 200, 201, 202, 249, 250, 251]
)
def test_to_csv_nrows(self, nrows, temp_file):
df = DataFrame(
np.ones((nrows, 4)),
index=date_range("2020-01-01", periods=nrows),
columns=Index(list("abcd"), dtype=object),
)
result, expected = self._return_result_expected(df, 1000, temp_file, "dt", "s")
expected.index = expected.index.astype("M8[us]")
tm.assert_frame_equal(result, expected, check_names=False)
@pytest.mark.slow
@pytest.mark.parametrize(
"nrows", [2, 10, 99, 100, 101, 102, 198, 199, 200, 201, 202, 249, 250, 251]
)
@pytest.mark.parametrize(
"r_idx_type, c_idx_type", [("i", "i"), ("s", "s"), ("s", "dt"), ("p", "p")]
)
@pytest.mark.parametrize("ncols", [1, 2, 3, 4])
@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")
def test_to_csv_idx_types(self, nrows, r_idx_type, c_idx_type, ncols, temp_file):
axes = {
"i": lambda n: Index(np.arange(n), dtype=np.int64),
"s": lambda n: Index([f"{i}_{chr(i)}" for i in range(97, 97 + n)]),
"dt": lambda n: date_range("2020-01-01", periods=n),
"p": lambda n: period_range("2020-01-01", periods=n, freq="D"),
}
df = DataFrame(
np.ones((nrows, ncols)),
index=axes[r_idx_type](nrows),
columns=axes[c_idx_type](ncols),
)
result, expected = self._return_result_expected(
df,
1000,
temp_file,
r_idx_type,
c_idx_type,
)
if r_idx_type == "dt":
expected.index = expected.index.astype("M8[us]")
elif r_idx_type == "p":
expected.index = expected.index.astype("M8[ns]")
if c_idx_type == "dt":
expected.columns = expected.columns.astype("M8[us]")
elif c_idx_type == "p":
expected.columns = expected.columns.astype("M8[ns]")
tm.assert_frame_equal(result, expected, check_names=False)
@pytest.mark.slow
@pytest.mark.parametrize(
"nrows", [10, 98, 99, 100, 101, 102, 198, 199, 200, 201, 202, 249, 250, 251]
)
@pytest.mark.parametrize("ncols", [1, 2, 3, 4])
def test_to_csv_idx_ncols(self, nrows, ncols, temp_file):
df = DataFrame(
np.ones((nrows, ncols)),
index=Index([f"i-{i}" for i in range(nrows)], name="a"),
columns=Index([f"i-{i}" for i in range(ncols)], name="a"),
)
result, expected = self._return_result_expected(df, 1000, temp_file)
tm.assert_frame_equal(result, expected, check_names=False)
@pytest.mark.slow
@pytest.mark.parametrize("nrows", [10, 98, 99, 100, 101, 102])
def test_to_csv_dup_cols(self, nrows, temp_file):
df = DataFrame(
np.ones((nrows, 3)),
index=Index([f"i-{i}" for i in range(nrows)], name="a"),
columns=Index([f"i-{i}" for i in range(3)], name="a"),
)
cols = list(df.columns)
cols[:2] = ["dupe", "dupe"]
cols[-2:] = ["dupe", "dupe"]
ix = list(df.index)
ix[:2] = ["rdupe", "rdupe"]
ix[-2:] = ["rdupe", "rdupe"]
df.index = ix
df.columns = cols
result, expected = self._return_result_expected(
df, 1000, temp_file, dupe_col=True
)
tm.assert_frame_equal(result, expected, check_names=False)
@pytest.mark.slow
def test_to_csv_empty(self, temp_file):
df = DataFrame(index=np.arange(10, dtype=np.int64))
result, expected = self._return_result_expected(df, 1000, temp_file)
tm.assert_frame_equal(result, expected, check_column_type=False)
@pytest.mark.slow
def test_to_csv_chunksize(self, temp_file):
chunksize = 1000
rows = chunksize // 2 + 1
df = DataFrame(
np.ones((rows, 2)),
columns=Index(list("ab")),
index=MultiIndex.from_arrays([range(rows) for _ in range(2)]),
)
result, expected = self._return_result_expected(
df, chunksize, temp_file, rnlvl=2
)
tm.assert_frame_equal(result, expected, check_names=False)
@pytest.mark.slow
@pytest.mark.parametrize(
"nrows", [2, 10, 99, 100, 101, 102, 198, 199, 200, 201, 202, 249, 250, 251]
)
@pytest.mark.parametrize("ncols", [2, 3, 4])
@pytest.mark.parametrize(
"df_params, func_params",
[
[{"r_idx_nlevels": 2}, {"rnlvl": 2}],
[{"c_idx_nlevels": 2}, {"cnlvl": 2}],
[{"r_idx_nlevels": 2, "c_idx_nlevels": 2}, {"rnlvl": 2, "cnlvl": 2}],
],
)
def test_to_csv_params(self, nrows, df_params, func_params, ncols, temp_file):
if df_params.get("r_idx_nlevels"):
index = MultiIndex.from_arrays(
[f"i-{i}" for i in range(nrows)]
for _ in range(df_params["r_idx_nlevels"])
)
else:
index = None
if df_params.get("c_idx_nlevels"):
columns = MultiIndex.from_arrays(
[f"i-{i}" for i in range(ncols)]
for _ in range(df_params["c_idx_nlevels"])
)
else:
columns = Index([f"i-{i}" for i in range(ncols)])
df = DataFrame(np.ones((nrows, ncols)), index=index, columns=columns)
result, expected = self._return_result_expected(
df, 1000, temp_file, **func_params
)
tm.assert_frame_equal(result, expected, check_names=False)
def test_to_csv_from_csv_w_some_infs(self, temp_file, float_frame):
# test roundtrip with inf, -inf, nan, as full columns and mix
float_frame["G"] = np.nan
f = lambda x: [np.inf, np.nan][np.random.default_rng(2).random() < 0.5]
float_frame["h"] = float_frame.index.map(f)
path = str(temp_file)
float_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(float_frame, recons)
tm.assert_frame_equal(np.isinf(float_frame), np.isinf(recons))
def test_to_csv_from_csv_w_all_infs(self, temp_file, float_frame):
# test roundtrip with inf, -inf, nan, as full columns and mix
float_frame["E"] = np.inf
float_frame["F"] = -np.inf
path = str(temp_file)
float_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(float_frame, recons)
tm.assert_frame_equal(np.isinf(float_frame), np.isinf(recons))
def test_to_csv_no_index(self, temp_file):
# GH 3624, after appending columns, to_csv fails
path = str(temp_file)
df = DataFrame({"c1": [1, 2, 3], "c2": [4, 5, 6]})
df.to_csv(path, index=False)
result = read_csv(path)
tm.assert_frame_equal(df, result)
df["c3"] = Series([7, 8, 9], dtype="int64")
df.to_csv(path, index=False)
result = read_csv(path)
tm.assert_frame_equal(df, result)
def test_to_csv_with_mix_columns(self):
# gh-11637: incorrect output when a mix of integer and string column
# names passed as columns parameter in to_csv
df = DataFrame({0: ["a", "b", "c"], 1: ["aa", "bb", "cc"]})
df["test"] = "txt"
assert df.to_csv() == df.to_csv(columns=[0, 1, "test"])
def test_to_csv_headers(self, temp_file):
# GH6186, the presence or absence of `index` incorrectly
# causes to_csv to have different header semantics.
from_df = DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
to_df = DataFrame([[1, 2], [3, 4]], columns=["X", "Y"])
path = str(temp_file)
from_df.to_csv(path, header=["X", "Y"])
recons = self.read_csv(path)
tm.assert_frame_equal(to_df, recons)
from_df.to_csv(path, index=False, header=["X", "Y"])
recons = self.read_csv(path)
return_value = recons.reset_index(inplace=True)
assert return_value is None
tm.assert_frame_equal(to_df, recons)
def test_to_csv_multiindex(self, temp_file, float_frame, datetime_frame):
frame = float_frame
old_index = frame.index
arrays = np.arange(len(old_index) * 2, dtype=np.int64).reshape(2, -1)
new_index = MultiIndex.from_arrays(arrays, names=["first", "second"])
frame.index = new_index
path = str(temp_file)
frame.to_csv(path, header=False)
frame.to_csv(path, columns=["A", "B"])
# round trip
frame.to_csv(path)
df = self.read_csv(path, index_col=[0, 1], parse_dates=False)
# TODO to_csv drops column name
tm.assert_frame_equal(frame, df, check_names=False)
assert frame.index.names == df.index.names
# needed if setUp becomes a class method
float_frame.index = old_index
# try multiindex with dates
tsframe = datetime_frame
old_index = tsframe.index
new_index = [old_index, np.arange(len(old_index), dtype=np.int64)]
tsframe.index = MultiIndex.from_arrays(new_index)
tsframe.to_csv(path, index_label=["time", "foo"])
with tm.assert_produces_warning(UserWarning, match="Could not infer format"):
recons = self.read_csv(path, index_col=[0, 1], parse_dates=True)
# TODO to_csv drops column name
expected = tsframe.copy()
expected.index = MultiIndex.from_arrays([old_index.as_unit("us"), new_index[1]])
tm.assert_frame_equal(recons, expected, check_names=False)
# do not load index
tsframe.to_csv(path)
recons = self.read_csv(path, index_col=None)
assert len(recons.columns) == len(tsframe.columns) + 2
# no index
tsframe.to_csv(path, index=False)
recons = self.read_csv(path, index_col=None)
tm.assert_almost_equal(recons.values, datetime_frame.values)
# needed if setUp becomes class method
datetime_frame.index = old_index
def _make_frame(names=None):
if names is True:
names = ["first", "second"]
return DataFrame(
np.random.default_rng(2).integers(0, 10, size=(3, 3)),
columns=MultiIndex.from_tuples(
[("bah", "foo"), ("bah", "bar"), ("ban", "baz")], names=names
),
dtype="int64",
)
# column & index are multi-index
df = DataFrame(
np.ones((5, 3)),
columns=MultiIndex.from_arrays(
[[f"i-{i}" for i in range(3)] for _ in range(4)], names=list("abcd")
),
index=MultiIndex.from_arrays(
[[f"i-{i}" for i in range(5)] for _ in range(2)], names=list("ab")
),
)
df.to_csv(temp_file)
result = read_csv(temp_file, header=[0, 1, 2, 3], index_col=[0, 1])
tm.assert_frame_equal(df, result)
# column is mi
df = DataFrame(
np.ones((5, 3)),
columns=MultiIndex.from_arrays(
[[f"i-{i}" for i in range(3)] for _ in range(4)], names=list("abcd")
),
)
df.to_csv(temp_file)
result = read_csv(temp_file, header=[0, 1, 2, 3], index_col=0)
tm.assert_frame_equal(df, result)
# dup column names?
df = DataFrame(
np.ones((5, 3)),
columns=MultiIndex.from_arrays(
[[f"i-{i}" for i in range(3)] for _ in range(4)], names=list("abcd")
),
index=MultiIndex.from_arrays(
[[f"i-{i}" for i in range(5)] for _ in range(3)], names=list("abc")
),
)
df.to_csv(temp_file)
result = read_csv(temp_file, header=[0, 1, 2, 3], index_col=[0, 1, 2])
tm.assert_frame_equal(df, result)
# writing with no index
df = _make_frame()
df.to_csv(temp_file, index=False)
result = read_csv(temp_file, header=[0, 1])
tm.assert_frame_equal(df, result)
# we lose the names here
df = _make_frame(True)
df.to_csv(temp_file, index=False)
result = read_csv(temp_file, header=[0, 1])
assert com.all_none(*result.columns.names)
result.columns.names = df.columns.names
tm.assert_frame_equal(df, result)
# whatsnew example
df = _make_frame()
df.to_csv(temp_file)
result = read_csv(temp_file, header=[0, 1], index_col=[0])
tm.assert_frame_equal(df, result)
df = _make_frame(True)
df.to_csv(temp_file)
result = read_csv(temp_file, header=[0, 1], index_col=[0])
tm.assert_frame_equal(df, result)
# invalid options
df = _make_frame(True)
df.to_csv(temp_file)
for i in [6, 7]:
msg = f"len of {i}, but only 5 lines in file"
with pytest.raises(ParserError, match=msg):
read_csv(temp_file, header=list(range(i)), index_col=0)
# write with cols
msg = "cannot specify cols with a MultiIndex"
with pytest.raises(TypeError, match=msg):
df.to_csv(temp_file, columns=["foo", "bar"])
# empty
tsframe[:0].to_csv(temp_file)
recons = self.read_csv(temp_file)
exp = tsframe[:0]
exp.index = []
tm.assert_index_equal(recons.columns, exp.columns)
assert len(recons) == 0
def test_to_csv_interval_index(self, temp_file, using_infer_string):
# GH 28210
df = DataFrame({"A": list("abc"), "B": range(3)}, index=pd.interval_range(0, 3))
path = str(temp_file)
df.to_csv(path)
result = self.read_csv(path, index_col=0)
# can't roundtrip intervalindex via read_csv so check string repr (GH 23595)
expected = df.copy()
expected.index = expected.index.astype("str")
tm.assert_frame_equal(result, expected)
def test_to_csv_float32_nanrep(self, temp_file):
df = DataFrame(
np.random.default_rng(2).standard_normal((1, 4)).astype(np.float32)
)
df[1] = np.nan
path = str(temp_file)
df.to_csv(path, na_rep=999)
with open(path, encoding="utf-8") as f:
lines = f.readlines()
assert lines[1].split(",")[2] == "999"
def test_to_csv_withcommas(self, temp_file):
# Commas inside fields should be correctly escaped when saving as CSV.
df = DataFrame({"A": [1, 2, 3], "B": ["5,6", "7,8", "9,0"]})
path = str(temp_file)
df.to_csv(path)
df2 = self.read_csv(path)
tm.assert_frame_equal(df2, df)
def test_to_csv_mixed(self, temp_file):
def create_cols(name):
return [f"{name}{i:03d}" for i in range(5)]
df_float = DataFrame(
np.random.default_rng(2).standard_normal((100, 5)),
dtype="float64",
columns=create_cols("float"),
)
df_int = DataFrame(
np.random.default_rng(2).standard_normal((100, 5)).astype("int64"),
dtype="int64",
columns=create_cols("int"),
)
df_bool = DataFrame(True, index=df_float.index, columns=create_cols("bool"))
df_object = DataFrame(
"foo", index=df_float.index, columns=create_cols("object"), dtype="object"
)
df_dt = DataFrame(
Timestamp("20010101"),
index=df_float.index,
columns=create_cols("date"),
)
# add in some nans
df_float.iloc[30:50, 1:3] = np.nan
df_dt.iloc[30:50, 1:3] = np.nan
df = pd.concat([df_float, df_int, df_bool, df_object, df_dt], axis=1)
# dtype
dtypes = {}
for n, dtype in [
("float", np.float64),
("int", np.int64),
("bool", np.bool_),
("object", object),
]:
for c in create_cols(n):
dtypes[c] = dtype
path = str(temp_file)
df.to_csv(path)
rs = read_csv(path, index_col=0, dtype=dtypes, parse_dates=create_cols("date"))
tm.assert_frame_equal(rs, df)
def test_to_csv_dups_cols(self, temp_file):
df = DataFrame(
np.random.default_rng(2).standard_normal((1000, 30)),
columns=list(range(15)) + list(range(15)),
dtype="float64",
)
path = str(temp_file)
df.to_csv(path) # single dtype, fine
result = read_csv(path, index_col=0)
result.columns = df.columns
tm.assert_frame_equal(result, df)
df_float = DataFrame(
np.random.default_rng(2).standard_normal((1000, 3)), dtype="float64"
)
df_int = DataFrame(np.random.default_rng(2).standard_normal((1000, 3))).astype(
"int64"
)
df_bool = DataFrame(True, index=df_float.index, columns=range(3))
df_object = DataFrame("foo", index=df_float.index, columns=range(3))
df_dt = DataFrame(Timestamp("20010101"), index=df_float.index, columns=range(3))
df = pd.concat(
[df_float, df_int, df_bool, df_object, df_dt], axis=1, ignore_index=True
)
df.columns = [0, 1, 2] * 5
df.to_csv(temp_file)
result = read_csv(temp_file, index_col=0)
# date cols
for i in ["0.4", "1.4", "2.4"]:
result[i] = to_datetime(result[i])
result.columns = df.columns
tm.assert_frame_equal(result, df)
def test_to_csv_dups_cols2(self, temp_file):
# GH3457
df = DataFrame(
np.ones((5, 3)),
index=Index([f"i-{i}" for i in range(5)], name="foo"),
columns=Index(["a", "a", "b"]),
)
path = str(temp_file)
df.to_csv(path)
# read_csv will rename the dups columns
result = read_csv(path, index_col=0)
result = result.rename(columns={"a.1": "a"})
tm.assert_frame_equal(result, df)
@pytest.mark.parametrize("chunksize", [1, 5, 10])
def test_to_csv_chunking(self, chunksize, temp_file):
aa = DataFrame({"A": range(10)})
aa["B"] = aa.A + 1.0
aa["C"] = aa.A + 2.0
aa["D"] = aa.A + 3.0
path = str(temp_file)
aa.to_csv(path, chunksize=chunksize)
rs = read_csv(path, index_col=0)
tm.assert_frame_equal(rs, aa)
@pytest.mark.slow
def test_to_csv_wide_frame_formatting(self, temp_file, monkeypatch):
# Issue #8621
chunksize = 100
df = DataFrame(
np.random.default_rng(2).standard_normal((1, chunksize + 10)),
columns=None,
index=None,
)
path = str(temp_file)
with monkeypatch.context() as m:
m.setattr("pandas.io.formats.csvs._DEFAULT_CHUNKSIZE_CELLS", chunksize)
df.to_csv(path, header=False, index=False)
rs = read_csv(path, header=None)
tm.assert_frame_equal(rs, df)
def test_to_csv_bug(self, temp_file):
f1 = StringIO("a,1.0\nb,2.0")
df = self.read_csv(f1, header=None)
newdf = DataFrame({"t": df[df.columns[0]]})
path = str(temp_file)
newdf.to_csv(path)
recons = read_csv(path, index_col=0)
# don't check_names as t != 1
tm.assert_frame_equal(recons, newdf, check_names=False)
def test_to_csv_unicode(self, temp_file):
df = DataFrame({"c/\u03c3": [1, 2, 3]})
path = str(temp_file)
df.to_csv(path, encoding="UTF-8")
df2 = read_csv(path, index_col=0, encoding="UTF-8")
tm.assert_frame_equal(df, df2)
df.to_csv(path, encoding="UTF-8", index=False)
df2 = read_csv(path, index_col=None, encoding="UTF-8")
tm.assert_frame_equal(df, df2)
def test_to_csv_unicode_index_col(self):
buf = StringIO("")
df = DataFrame(
[["\u05d0", "d2", "d3", "d4"], ["a1", "a2", "a3", "a4"]],
columns=["\u05d0", "\u05d1", "\u05d2", "\u05d3"],
index=["\u05d0", "\u05d1"],
)
df.to_csv(buf, encoding="UTF-8")
buf.seek(0)
df2 = read_csv(buf, index_col=0, encoding="UTF-8")
tm.assert_frame_equal(df, df2)
def test_to_csv_stringio(self, float_frame):
buf = StringIO()
float_frame.to_csv(buf)
buf.seek(0)
recons = read_csv(buf, index_col=0)
tm.assert_frame_equal(recons, float_frame)
def test_to_csv_float_format(self, temp_file):
df = DataFrame(
[[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
index=["A", "B"],
columns=["X", "Y", "Z"],
)
path = str(temp_file)
df.to_csv(path, float_format="%.2f")
rs = read_csv(path, index_col=0)
xp = DataFrame(
[[0.12, 0.23, 0.57], [12.32, 123123.20, 321321.20]],
index=["A", "B"],
columns=["X", "Y", "Z"],
)
tm.assert_frame_equal(rs, xp)
def test_to_csv_float_format_over_decimal(self):
# GH#47436
df = DataFrame({"a": [0.5, 1.0]})
result = df.to_csv(
decimal=",",
float_format=lambda x: np.format_float_positional(x, trim="-"),
index=False,
)
expected_rows = ["a", "0.5", "1"]
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert result == expected
def test_to_csv_unicodewriter_quoting(self):
df = DataFrame({"A": [1, 2, 3], "B": ["foo", "bar", "baz"]})
buf = StringIO()
df.to_csv(buf, index=False, quoting=csv.QUOTE_NONNUMERIC, encoding="utf-8")
result = buf.getvalue()
expected_rows = ['"A","B"', '1,"foo"', '2,"bar"', '3,"baz"']
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert result == expected
@pytest.mark.parametrize("encoding", [None, "utf-8"])
def test_to_csv_quote_none(self, encoding):
# GH4328
df = DataFrame({"A": ["hello", '{"hello"}']})
buf = StringIO()
df.to_csv(buf, quoting=csv.QUOTE_NONE, encoding=encoding, index=False)
result = buf.getvalue()
expected_rows = ["A", "hello", '{"hello"}']
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert result == expected
def test_to_csv_index_no_leading_comma(self):
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}, index=["one", "two", "three"])
buf = StringIO()
df.to_csv(buf, index_label=False)
expected_rows = ["A,B", "one,1,4", "two,2,5", "three,3,6"]
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert buf.getvalue() == expected
def test_to_csv_lineterminators(self, temp_file):
# see gh-20353
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}, index=["one", "two", "three"])
path = str(temp_file)
# case 1: CRLF as line terminator
df.to_csv(path, lineterminator="\r\n")
expected = b",A,B\r\none,1,4\r\ntwo,2,5\r\nthree,3,6\r\n"
with open(path, mode="rb") as f:
assert f.read() == expected
def test_to_csv_lineterminators2(self, temp_file):
# see gh-20353
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}, index=["one", "two", "three"])
path = str(temp_file)
# case 2: LF as line terminator
df.to_csv(path, lineterminator="\n")
expected = b",A,B\none,1,4\ntwo,2,5\nthree,3,6\n"
with open(path, mode="rb") as f:
assert f.read() == expected
def test_to_csv_lineterminators3(self, temp_file):
# see gh-20353
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}, index=["one", "two", "three"])
path = str(temp_file)
# case 3: The default line terminator(=os.linesep)(gh-21406)
df.to_csv(path)
os_linesep = os.linesep.encode("utf-8")
expected = (
b",A,B"
+ os_linesep
+ b"one,1,4"
+ os_linesep
+ b"two,2,5"
+ os_linesep
+ b"three,3,6"
+ os_linesep
)
with open(path, mode="rb") as f:
assert f.read() == expected
def test_to_csv_from_csv_categorical(self):
# CSV with categoricals should result in the same output
# as when one would add a "normal" Series/DataFrame.
s = Series(pd.Categorical(["a", "b", "b", "a", "a", "c", "c", "c"]))
s2 = Series(["a", "b", "b", "a", "a", "c", "c", "c"])
res = StringIO()
s.to_csv(res, header=False)
exp = StringIO()
s2.to_csv(exp, header=False)
assert res.getvalue() == exp.getvalue()
df = DataFrame({"s": s})
df2 = DataFrame({"s": s2})
res = StringIO()
df.to_csv(res)
exp = StringIO()
df2.to_csv(exp)
assert res.getvalue() == exp.getvalue()
def test_to_csv_path_is_none(self, float_frame):
# GH 8215
# Make sure we return string for consistency with
# Series.to_csv()
csv_str = float_frame.to_csv(path_or_buf=None)
assert isinstance(csv_str, str)
recons = read_csv(StringIO(csv_str), index_col=0)
tm.assert_frame_equal(float_frame, recons)
@pytest.mark.parametrize(
"df,encoding",
[
(
DataFrame(
[[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
index=["A", "B"],
columns=["X", "Y", "Z"],
),
None,
),
# GH 21241, 21118
(DataFrame([["abc", "def", "ghi"]], columns=["X", "Y", "Z"]), "ascii"),
(DataFrame(5 * [[123, "你好", "世界"]], columns=["X", "Y", "Z"]), "gb2312"),
(
DataFrame(
5 * [[123, "Γειά σου", "Κόσμε"]], # noqa: RUF001
columns=["X", "Y", "Z"],
),
"cp737",
),
],
)
def test_to_csv_compression(self, temp_file, df, encoding, compression):
path = str(temp_file)
df.to_csv(path, compression=compression, encoding=encoding)
# test the round trip - to_csv -> read_csv
result = read_csv(path, compression=compression, index_col=0, encoding=encoding)
tm.assert_frame_equal(df, result)
# test the round trip using file handle - to_csv -> read_csv
with get_handle(
path, "w", compression=compression, encoding=encoding
) as handles:
df.to_csv(handles.handle, encoding=encoding)
assert not handles.handle.closed
result = read_csv(
path,
compression=compression,
encoding=encoding,
index_col=0,
).squeeze("columns")
tm.assert_frame_equal(df, result)
# explicitly make sure file is compressed
with tm.decompress_file(path, compression) as fh:
text = fh.read().decode(encoding or "utf8")
for col in df.columns:
assert col in text
with tm.decompress_file(path, compression) as fh:
tm.assert_frame_equal(df, read_csv(fh, index_col=0, encoding=encoding))
def test_to_csv_date_format(self, temp_file, datetime_frame):
path = str(temp_file)
dt_index = datetime_frame.index
datetime_frame = DataFrame(
{"A": dt_index, "B": dt_index.shift(1)}, index=dt_index
)
datetime_frame.to_csv(path, date_format="%Y%m%d")
# Check that the data was put in the specified format
test = read_csv(path, index_col=0)
datetime_frame_int = datetime_frame.map(lambda x: int(x.strftime("%Y%m%d")))
datetime_frame_int.index = datetime_frame_int.index.map(
lambda x: int(x.strftime("%Y%m%d"))
)
tm.assert_frame_equal(test, datetime_frame_int)
datetime_frame.to_csv(path, date_format="%Y-%m-%d")
# Check that the data was put in the specified format
test = read_csv(path, index_col=0)
datetime_frame_str = datetime_frame.map(lambda x: x.strftime("%Y-%m-%d"))
datetime_frame_str.index = datetime_frame_str.index.map(
lambda x: x.strftime("%Y-%m-%d")
)
tm.assert_frame_equal(test, datetime_frame_str)
# Check that columns get converted
datetime_frame_columns = datetime_frame.T
datetime_frame_columns.to_csv(path, date_format="%Y%m%d")
test = read_csv(path, index_col=0)
datetime_frame_columns = datetime_frame_columns.map(
lambda x: int(x.strftime("%Y%m%d"))
)
# Columns don't get converted to ints by read_csv
datetime_frame_columns.columns = datetime_frame_columns.columns.map(
lambda x: x.strftime("%Y%m%d")
)
tm.assert_frame_equal(test, datetime_frame_columns)
# test NaTs
nat_index = to_datetime(
["NaT"] * 10 + ["2000-01-01", "2000-01-01", "2000-01-01"]
)
nat_frame = DataFrame({"A": nat_index}, index=nat_index)
nat_frame.to_csv(path, date_format="%Y-%m-%d")
test = read_csv(path, parse_dates=[0, 1], index_col=0)
tm.assert_frame_equal(test, nat_frame)
@pytest.mark.parametrize("td", [pd.Timedelta(0), pd.Timedelta("10s")])
def test_to_csv_with_dst_transitions(self, td, temp_file):
path = str(temp_file)
# make sure we are not failing on transitions
times = date_range(
"2013-10-26 23:00",
"2013-10-27 01:00",
tz="Europe/London",
freq="h",
ambiguous="infer",
)
i = times + td
i = i._with_freq(None) # freq is not preserved by read_csv
time_range = np.array(range(len(i)), dtype="int64")
df = DataFrame({"A": time_range}, index=i)
df.to_csv(path, index=True)
# we have to reconvert the index as we
# don't parse the tz's
result = read_csv(path, index_col=0)
result.index = (
to_datetime(result.index, utc=True)
.tz_convert("Europe/London")
.as_unit("ns")
)
tm.assert_frame_equal(result, df)
@pytest.mark.parametrize(
"start,end",
[
["2015-03-29", "2015-03-30"],
["2015-10-25", "2015-10-26"],
],
)
def test_to_csv_with_dst_transitions_with_pickle(self, start, end, temp_file):
# GH11619
idx = date_range(start, end, freq="h", tz="Europe/Paris", unit="ns")
idx = idx._with_freq(None) # freq does not round-trip
idx._data._freq = None # otherwise there is trouble on unpickle
df = DataFrame({"values": 1, "idx": idx}, index=idx)
df.to_csv(temp_file, index=True)
result = read_csv(temp_file, index_col=0)
result.index = (
to_datetime(result.index, utc=True).tz_convert("Europe/Paris").as_unit("ns")
)
result["idx"] = to_datetime(result["idx"], utc=True).astype(
"datetime64[ns, Europe/Paris]"
)
tm.assert_frame_equal(result, df)
# assert working
df.astype(str)
path = str(temp_file)
df.to_pickle(path)
result = pd.read_pickle(path)
tm.assert_frame_equal(result, df)
def test_to_csv_quoting(self):
df = DataFrame(
{
"c_bool": [True, False],
"c_float": [1.0, 3.2],
"c_int": [42, np.nan],
"c_string": ["a", "b,c"],
}
)
expected_rows = [
",c_bool,c_float,c_int,c_string",
"0,True,1.0,42.0,a",
'1,False,3.2,,"b,c"',
]
expected = tm.convert_rows_list_to_csv_str(expected_rows)
result = df.to_csv()
assert result == expected
result = df.to_csv(quoting=None)
assert result == expected
expected_rows = [
",c_bool,c_float,c_int,c_string",
"0,True,1.0,42.0,a",
'1,False,3.2,,"b,c"',
]
expected = tm.convert_rows_list_to_csv_str(expected_rows)
result = df.to_csv(quoting=csv.QUOTE_MINIMAL)
assert result == expected
expected_rows = [
'"","c_bool","c_float","c_int","c_string"',
'"0","True","1.0","42.0","a"',
'"1","False","3.2","","b,c"',
]
expected = tm.convert_rows_list_to_csv_str(expected_rows)
result = df.to_csv(quoting=csv.QUOTE_ALL)
assert result == expected
# see gh-12922, gh-13259: make sure changes to
# the formatters do not break this behaviour
expected_rows = [
'"","c_bool","c_float","c_int","c_string"',
'0,True,1.0,42.0,"a"',
'1,False,3.2,"","b,c"',
]
expected = tm.convert_rows_list_to_csv_str(expected_rows)
result = df.to_csv(quoting=csv.QUOTE_NONNUMERIC)
assert result == expected
msg = "need to escape, but no escapechar set"
with pytest.raises(csv.Error, match=msg):
df.to_csv(quoting=csv.QUOTE_NONE)
with pytest.raises(csv.Error, match=msg):
df.to_csv(quoting=csv.QUOTE_NONE, escapechar=None)
expected_rows = [
",c_bool,c_float,c_int,c_string",
"0,True,1.0,42.0,a",
"1,False,3.2,,b!,c",
]
expected = tm.convert_rows_list_to_csv_str(expected_rows)
result = df.to_csv(quoting=csv.QUOTE_NONE, escapechar="!")
assert result == expected
expected_rows = [
",c_bool,c_ffloat,c_int,c_string",
"0,True,1.0,42.0,a",
"1,False,3.2,,bf,c",
]
expected = tm.convert_rows_list_to_csv_str(expected_rows)
result = df.to_csv(quoting=csv.QUOTE_NONE, escapechar="f")
assert result == expected
# see gh-3503: quoting Windows line terminators
# presents with encoding?
text_rows = ["a,b,c", '1,"test \r\n",3']
text = tm.convert_rows_list_to_csv_str(text_rows)
df = read_csv(StringIO(text))
buf = StringIO()
df.to_csv(buf, encoding="utf-8", index=False)
assert buf.getvalue() == text
# xref gh-7791: make sure the quoting parameter is passed through
# with multi-indexes
df = DataFrame({"a": [1, 2], "b": [3, 4], "c": [5, 6]})
df = df.set_index(["a", "b"])
expected_rows = ['"a","b","c"', '"1","3","5"', '"2","4","6"']
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert df.to_csv(quoting=csv.QUOTE_ALL) == expected
def test_period_index_date_overflow(self):
# see gh-15982
dates = ["1990-01-01", "2000-01-01", "3005-01-01"]
index = pd.PeriodIndex(dates, freq="D")
df = DataFrame([4, 5, 6], index=index)
result = df.to_csv()
expected_rows = [",0", "1990-01-01,4", "2000-01-01,5", "3005-01-01,6"]
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert result == expected
date_format = "%m-%d-%Y"
result = df.to_csv(date_format=date_format)
expected_rows = [",0", "01-01-1990,4", "01-01-2000,5", "01-01-3005,6"]
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert result == expected
# Overflow with pd.NaT
dates = ["1990-01-01", NaT, "3005-01-01"]
index = pd.PeriodIndex(dates, freq="D")
df = DataFrame([4, 5, 6], index=index)
result = df.to_csv()
expected_rows = [",0", "1990-01-01,4", ",5", "3005-01-01,6"]
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert result == expected
def test_multi_index_header(self):
# see gh-5539
columns = MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1), ("b", 2)])
df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]])
df.columns = columns
header = ["a", "b", "c", "d"]
result = df.to_csv(header=header)
expected_rows = [",a,b,c,d", "0,1,2,3,4", "1,5,6,7,8"]
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert result == expected
def test_to_csv_single_level_multi_index(self):
# see gh-26303
index = Index([(1,), (2,), (3,)])
df = DataFrame([[1, 2, 3]], columns=index)
df = df.reindex(columns=[(1,), (3,)])
expected = ",1,3\n0,1,3\n"
result = df.to_csv(lineterminator="\n")
tm.assert_almost_equal(result, expected)
def test_gz_lineend(self, tmp_path):
# GH 25311
df = DataFrame({"a": [1, 2]})
expected_rows = ["a", "1", "2"]
expected = tm.convert_rows_list_to_csv_str(expected_rows)
file_path = tmp_path / "__test_gz_lineend.csv.gz"
file_path.touch()
path = str(file_path)
df.to_csv(path, index=False)
with tm.decompress_file(path, compression="gzip") as f:
result = f.read().decode("utf-8")
assert result == expected
def test_to_csv_numpy_16_bug(self):
frame = DataFrame({"a": date_range("1/1/2000", periods=10)})
buf = StringIO()
frame.to_csv(buf)
result = buf.getvalue()
assert "2000-01-01" in result
def test_to_csv_na_quoting(self):
# GH 15891
# Normalize carriage return for Windows OS
result = (
DataFrame([None, None])
.to_csv(None, header=False, index=False, na_rep="")
.replace("\r\n", "\n")
)
expected = '""\n""\n'
assert result == expected
def test_to_csv_categorical_and_ea(self):
# GH#46812
df = DataFrame({"a": "x", "b": [1, pd.NA]})
df["b"] = df["b"].astype("Int16")
df["b"] = df["b"].astype("category")
result = df.to_csv()
expected_rows = [",a,b", "0,x,1", "1,x,"]
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert result == expected
def test_to_csv_categorical_and_interval(self):
# GH#46297
df = DataFrame(
{
"a": [
pd.Interval(
Timestamp("2020-01-01"),
Timestamp("2020-01-02"),
closed="both",
)
]
}
)
df["a"] = df["a"].astype("category")
result = df.to_csv()
expected_rows = [",a", '0,"[2020-01-01 00:00:00, 2020-01-02 00:00:00]"']
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert result == expected
def test_to_csv_warn_when_zip_tar_and_append_mode(self, tmp_path):
# GH57875
df = DataFrame({"a": [1, 2, 3]})
msg = (
"zip and tar do not support mode 'a' properly. This combination will "
"result in multiple files with same name being added to the archive"
)
zip_path = tmp_path / "test.zip"
tar_path = tmp_path / "test.tar"
with tm.assert_produces_warning(
RuntimeWarning, match=msg, raise_on_extra_warnings=False
):
df.to_csv(zip_path, mode="a")
with tm.assert_produces_warning(
RuntimeWarning, match=msg, raise_on_extra_warnings=False
):
df.to_csv(tar_path, mode="a")
def test_to_csv_escape_quotechar(self):
# GH61514
df = DataFrame(
{
"col_a": ["a", "a2"],
"col_b": ['b"c', None],
"col_c": ['de,f"', '"c'],
}
)
result = df.to_csv(quotechar='"', escapechar="\\", quoting=csv.QUOTE_NONE)
expected_rows = [
",col_a,col_b,col_c",
'0,a,b\\"c,de\\,f\\"',
'1,a2,,\\"c',
]
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert result == expected
| TestDataFrameToCSV |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/libtool_installation/package.py | {
"start": 326,
"end": 472
} | class ____(LibtoolDeletion, AutotoolsPackage):
"""Mock AutotoolsPackage to check proper installation of libtool archives."""
| LibtoolInstallation |
python | getsentry__sentry | src/sentry/overwatch/endpoints/overwatch_rpc.py | {
"start": 2570,
"end": 3749
} | class ____(StandardAuthentication):
"""Authentication for Overwatch-style HMAC signed requests."""
token_name = b"rpcsignature"
def accepts_auth(self, auth: list[bytes]) -> bool:
if not auth or len(auth) < 2:
return False
return auth[0].lower() == self.token_name
def authenticate_token(self, request: Request, token: str) -> tuple[Any, Any]:
compare_signature(request, token)
sentry_sdk.get_isolation_scope().set_tag("overwatch_rpc_auth", True)
return (AnonymousUser(), token)
def _can_use_prevent_ai_features(org: Organization) -> bool:
"""Check if organization has opted in to Prevent AI features."""
if not features.has("organizations:gen-ai-features", org):
return False
hide_ai_features = org.get_option("sentry:hide_ai_features", HIDE_AI_FEATURES_DEFAULT)
pr_review_test_generation_enabled = bool(
org.get_option(
"sentry:enable_pr_review_test_generation",
ENABLE_PR_REVIEW_TEST_GENERATION_DEFAULT,
)
)
return not hide_ai_features and pr_review_test_generation_enabled
@region_silo_endpoint
| OverwatchRpcSignatureAuthentication |
python | django__django | tests/generic_relations_regress/models.py | {
"start": 3174,
"end": 3264
} | class ____(models.Model):
name = models.CharField(primary_key=True, max_length=25)
| Board |
python | paramiko__paramiko | tests/test_channelfile.py | {
"start": 929,
"end": 995
} | class ____(ChannelFileBase):
klass = ChannelFile
| TestChannelFile |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 378623,
"end": 379505
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of UpdateTeamsRepository"""
__schema__ = github_schema
__field_names__ = ("repository_id", "team_ids", "permission", "client_mutation_id")
repository_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="repositoryId")
"""Repository ID being granted access to."""
team_ids = sgqlc.types.Field(sgqlc.types.non_null(sgqlc.types.list_of(sgqlc.types.non_null(ID))), graphql_name="teamIds")
"""A list of teams being granted access. Limit: 10"""
permission = sgqlc.types.Field(sgqlc.types.non_null(RepositoryPermission), graphql_name="permission")
"""Permission that should be granted to the teams."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| UpdateTeamsRepositoryInput |
python | walkccc__LeetCode | solutions/422. Valid Word Square/422.py | {
"start": 0,
"end": 301
} | class ____:
def validWordSquare(self, words: list[str]) -> bool:
for i, word in enumerate(words):
for j, c in enumerate(word):
if len(words) <= j or len(words[j]) <= i: # out-of-bounds
return False
if c != words[j][i]:
return False
return True
| Solution |
python | sphinx-doc__sphinx | tests/roots/test-ext-autodoc/target/preserve_defaults_special_constructs.py | {
"start": 615,
"end": 710
} | class ____(TypedDict):
"""docstring"""
a: int
b: object
c: list[int]
| MyTypedDict |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/layout/containers.py | {
"start": 6316,
"end": 14705
} | class ____(_Split):
"""
Several layouts, one stacked above/under the other. ::
+--------------------+
| |
+--------------------+
| |
+--------------------+
By default, this doesn't display a horizontal line between the children,
but if this is something you need, then create a HSplit as follows::
HSplit(children=[ ... ], padding_char='-',
padding=1, padding_style='#ffff00')
:param children: List of child :class:`.Container` objects.
:param window_too_small: A :class:`.Container` object that is displayed if
there is not enough space for all the children. By default, this is a
"Window too small" message.
:param align: `VerticalAlign` value.
:param width: When given, use this width instead of looking at the children.
:param height: When given, use this height instead of looking at the children.
:param z_index: (int or None) When specified, this can be used to bring
element in front of floating elements. `None` means: inherit from parent.
:param style: A style string.
:param modal: ``True`` or ``False``.
:param key_bindings: ``None`` or a :class:`.KeyBindings` object.
:param padding: (`Dimension` or int), size to be used for the padding.
:param padding_char: Character to be used for filling in the padding.
:param padding_style: Style to applied to the padding.
"""
def __init__(
self,
children: Sequence[AnyContainer],
window_too_small: Container | None = None,
align: VerticalAlign = VerticalAlign.JUSTIFY,
padding: AnyDimension = 0,
padding_char: str | None = None,
padding_style: str = "",
width: AnyDimension = None,
height: AnyDimension = None,
z_index: int | None = None,
modal: bool = False,
key_bindings: KeyBindingsBase | None = None,
style: str | Callable[[], str] = "",
) -> None:
super().__init__(
children=children,
window_too_small=window_too_small,
padding=padding,
padding_char=padding_char,
padding_style=padding_style,
width=width,
height=height,
z_index=z_index,
modal=modal,
key_bindings=key_bindings,
style=style,
)
self.align = align
self._children_cache: SimpleCache[tuple[Container, ...], list[Container]] = (
SimpleCache(maxsize=1)
)
self._remaining_space_window = Window() # Dummy window.
def preferred_width(self, max_available_width: int) -> Dimension:
if self.width is not None:
return to_dimension(self.width)
if self.children:
dimensions = [c.preferred_width(max_available_width) for c in self.children]
return max_layout_dimensions(dimensions)
else:
return Dimension()
def preferred_height(self, width: int, max_available_height: int) -> Dimension:
if self.height is not None:
return to_dimension(self.height)
dimensions = [
c.preferred_height(width, max_available_height) for c in self._all_children
]
return sum_layout_dimensions(dimensions)
def reset(self) -> None:
for c in self.children:
c.reset()
@property
def _all_children(self) -> list[Container]:
"""
List of child objects, including padding.
"""
def get() -> list[Container]:
result: list[Container] = []
# Padding Top.
if self.align in (VerticalAlign.CENTER, VerticalAlign.BOTTOM):
result.append(Window(width=Dimension(preferred=0)))
# The children with padding.
for child in self.children:
result.append(child)
result.append(
Window(
height=self.padding,
char=self.padding_char,
style=self.padding_style,
)
)
if result:
result.pop()
# Padding right.
if self.align in (VerticalAlign.CENTER, VerticalAlign.TOP):
result.append(Window(width=Dimension(preferred=0)))
return result
return self._children_cache.get(tuple(self.children), get)
def write_to_screen(
self,
screen: Screen,
mouse_handlers: MouseHandlers,
write_position: WritePosition,
parent_style: str,
erase_bg: bool,
z_index: int | None,
) -> None:
"""
Render the prompt to a `Screen` instance.
:param screen: The :class:`~prompt_toolkit.layout.screen.Screen` class
to which the output has to be written.
"""
sizes = self._divide_heights(write_position)
style = parent_style + " " + to_str(self.style)
z_index = z_index if self.z_index is None else self.z_index
if sizes is None:
self.window_too_small.write_to_screen(
screen, mouse_handlers, write_position, style, erase_bg, z_index
)
else:
#
ypos = write_position.ypos
xpos = write_position.xpos
width = write_position.width
# Draw child panes.
for s, c in zip(sizes, self._all_children):
c.write_to_screen(
screen,
mouse_handlers,
WritePosition(xpos, ypos, width, s),
style,
erase_bg,
z_index,
)
ypos += s
# Fill in the remaining space. This happens when a child control
# refuses to take more space and we don't have any padding. Adding a
# dummy child control for this (in `self._all_children`) is not
# desired, because in some situations, it would take more space, even
# when it's not required. This is required to apply the styling.
remaining_height = write_position.ypos + write_position.height - ypos
if remaining_height > 0:
self._remaining_space_window.write_to_screen(
screen,
mouse_handlers,
WritePosition(xpos, ypos, width, remaining_height),
style,
erase_bg,
z_index,
)
def _divide_heights(self, write_position: WritePosition) -> list[int] | None:
"""
Return the heights for all rows.
Or None when there is not enough space.
"""
if not self.children:
return []
width = write_position.width
height = write_position.height
# Calculate heights.
dimensions = [c.preferred_height(width, height) for c in self._all_children]
# Sum dimensions
sum_dimensions = sum_layout_dimensions(dimensions)
# If there is not enough space for both.
# Don't do anything.
if sum_dimensions.min > height:
return None
# Find optimal sizes. (Start with minimal size, increase until we cover
# the whole height.)
sizes = [d.min for d in dimensions]
child_generator = take_using_weights(
items=list(range(len(dimensions))), weights=[d.weight for d in dimensions]
)
i = next(child_generator)
# Increase until we meet at least the 'preferred' size.
preferred_stop = min(height, sum_dimensions.preferred)
preferred_dimensions = [d.preferred for d in dimensions]
while sum(sizes) < preferred_stop:
if sizes[i] < preferred_dimensions[i]:
sizes[i] += 1
i = next(child_generator)
# Increase until we use all the available space. (or until "max")
if not get_app().is_done:
max_stop = min(height, sum_dimensions.max)
max_dimensions = [d.max for d in dimensions]
while sum(sizes) < max_stop:
if sizes[i] < max_dimensions[i]:
sizes[i] += 1
i = next(child_generator)
return sizes
| HSplit |
python | kamyu104__LeetCode-Solutions | Python/number-of-ways-to-reorder-array-to-get-same-bst.py | {
"start": 1350,
"end": 1869
} | class ____(object):
def numOfWays(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
def dfs(nums):
if len(nums) <= 2:
return 1
left = [v for v in nums if v < nums[0]]
right = [v for v in nums if v > nums[0]]
result = dp[len(left)+len(right)][len(left)]
result = result*dfs(left) % MOD
result = result*dfs(right) % MOD
return result
return (dfs(nums)-1)%MOD
| Solution |
python | kamyu104__LeetCode-Solutions | Python/maximize-sum-of-array-after-k-negations.py | {
"start": 1581,
"end": 1950
} | class ____(object):
def largestSumAfterKNegations(self, A, K):
"""
:type A: List[int]
:type K: int
:rtype: int
"""
A.sort()
remain = K
for i in xrange(K):
if A[i] >= 0:
break
A[i] = -A[i]
remain -= 1
return sum(A) - (remain%2)*min(A)*2
| Solution2 |
python | keras-team__keras | keras/src/quantizers/quantizers_test.py | {
"start": 23369,
"end": 26751
} | class ____(testing.TestCase):
@parameterized.named_parameters(
("bits_2_sym_False", 2, False),
("bits_4_sym_False", 4, False),
("bits_8_sym_False", 8, False),
("bits_2_sym_True", 2, True),
("bits_4_sym_True", 4, True),
("bits_8_sym_True", 8, True),
)
def test_quantize_dequantize_roundtrip_error_bound_per_tensor(
self, bits, symmetric
):
"""
For finite inputs and positive scales, the reconstruction error
|x_hat - clip(x)| is bounded by 0.5 * scale elementwise.
"""
rng = np.random.default_rng(0)
x = ops.array(rng.standard_normal((64, 32)), "float32")
scale = ops.array(0.05) # per-tensor scale
maxq = ops.array(ops.subtract(ops.power(2, bits), 1), "float32")
zero = ops.array(maxq / 2.0 if symmetric else 3.0, "float32")
quantized = quantize_with_zero_point(x, scale, zero, maxq)
dequantized = dequantize_with_zero_point(quantized, scale, zero)
# Representable dequantization range:
# [scale*(0 - zero), scale*(maxq - zero)]
lo = ops.multiply(scale, ops.subtract(ops.array(0.0), zero))
hi = ops.multiply(scale, ops.subtract(maxq, zero))
x_clipped = ops.clip(x, lo, hi)
err = ops.abs(dequantized - x_clipped)
self.assertTrue(
ops.all(err <= (ops.add(ops.multiply(0.5, scale), 1e-7)))
)
def test_quantize_clipping_behavior_extremes(self):
"""
Very negative q == 0 ; very positive q == maxq.
"""
maxq = ops.array(15.0)
scale = ops.array(0.1)
zero = ops.array(7.0)
x = ops.array([[-1e6, 1e6]], "float32")
quantized = quantize_with_zero_point(x, scale, zero, maxq)
self.assertEqual(quantized.shape, (1, 2))
self.assertEqual(quantized[0, 0], 0.0)
self.assertEqual(quantized[0, 1], maxq)
def test_zero_scale_guard_no_nans_for_finite_inputs(self):
"""
If scale == 0, quantize should not produce NaNs (uses epsilon
replacement).
"""
x = ops.array([[0.0, 1.0, -2.0]])
scale = ops.array(0.0) # triggers epsilon path
zero = ops.array(5.0)
maxq = ops.array(15.0)
q = quantize_with_zero_point(x, scale, zero, maxq)
self.assertFalse(ops.any(ops.isnan(q)))
# Dequantize should also be finite
x_hat = dequantize_with_zero_point(q, scale, zero)
self.assertTrue(ops.all(ops.isfinite(x_hat)))
@parameterized.parameters(4, 8)
def test_idempotent_quantize_when_input_is_already_levels(self, bits):
"""
If input is already exactly on representable dequantized grid,
quantize→dequantize should return the same values (within float eps).
"""
scale = ops.array(0.125)
maxq = ops.array(ops.subtract(ops.power(2, bits), 1), "float32")
zero = ops.array(ops.divide(maxq, 2.0))
# Build dequantized grid points: x = scale * (k - zero), k in [0..maxq]
ks = ops.arange(0, ops.add(maxq, 1))
x_vals = ops.multiply(scale, ops.subtract(ks, zero))
x = ops.reshape(x_vals, (1, -1))
q = quantize_with_zero_point(x, scale, zero, maxq)
x_hat = dequantize_with_zero_point(q, scale, zero)
self.assertAllClose(x_hat, x, rtol=0, atol=1e-6)
| GPTQQuantizerTest |
python | wandb__wandb | wandb/_pydantic/v1_compat.py | {
"start": 9772,
"end": 14342
} | class ____:
pass
# Pick the mixin type based on the detected Pydantic version.
PydanticCompatMixin: type = V2Mixin if IS_PYDANTIC_V2 else V1Mixin
# ----------------------------------------------------------------------------
# Decorators and other pydantic helpers
# ----------------------------------------------------------------------------
if IS_PYDANTIC_V2:
from pydantic import alias_generators
# https://docs.pydantic.dev/latest/api/config/#pydantic.alias_generators.to_camel
to_camel = alias_generators.to_camel # e.g. "foo_bar" -> "fooBar"
# https://docs.pydantic.dev/latest/api/functional_validators/#pydantic.functional_validators.field_validator
field_validator = pydantic.field_validator
# https://docs.pydantic.dev/latest/api/functional_validators/#pydantic.functional_validators.model_validator
model_validator = pydantic.model_validator
# https://docs.pydantic.dev/latest/api/fields/#pydantic.fields.computed_field
computed_field = pydantic.computed_field
# https://docs.pydantic.dev/latest/api/aliases/#pydantic.aliases.AliasChoices
AliasChoices = pydantic.AliasChoices
else:
from pydantic.utils import to_lower_camel
V2ValidatorMode = Literal["before", "after", "wrap", "plain"]
# NOTE:
# - `to_lower_camel` in v1 equals `to_camel` in v2 (lowerCamelCase).
# - `to_camel` in v1 equals `to_pascal` in v2 (UpperCamelCase).
to_camel = to_lower_camel
# Ensures we can use v2's `@field_validator` by invoking v1's `@validator`
# if v1 is detected.
def field_validator(
*fields: str,
mode: V2ValidatorMode = "after",
check_fields: bool | None = None,
**_: Any,
) -> Callable:
return pydantic.validator( # type: ignore[deprecated]
*fields,
pre=(mode == "before"),
always=True,
check_fields=bool(check_fields),
allow_reuse=True,
)
# Ensures we can use v2's `@model_validator` by invoking v1's `@root_validator`
# if v1 is detected.
def model_validator(*, mode: V2ValidatorMode, **_: Any) -> Callable:
if mode == "after":
def _decorator(v2_method: Callable) -> Any:
# Patch the behavior for `@model_validator(mode="after")` in
# v1. This is complicated because:
# - In v2 it decorates an instance method, so the function takes
# `self` as the first argument.
# - In v1 `@root_validator(pre=False)` decorates a classmethod,
# so the function takes `cls` as the first argument.
def v1_method(
cls: type[V1Model], values: dict[str, Any]
) -> dict[str, Any]:
# Values should already be validated in an "after"
# validator, so use `construct()` to instantiate without
# revalidating.
v_self = v2_method(cls.construct(**values))
# Pydantic v1 expects the validator to return a
# `{field_name -> value}` mapping.
return {f: getattr(v_self, f) for f in v_self.__fields__}
return pydantic.root_validator(pre=False, allow_reuse=True)( # type: ignore[call-overload]
classmethod(v1_method)
)
return _decorator
else:
return pydantic.root_validator(pre=(mode == "before"), allow_reuse=True) # type: ignore[call-overload]
@overload # type: ignore[no-redef]
def computed_field(func: Callable | property, /) -> property: ...
@overload
def computed_field(
func: None, /, **_: Any
) -> Callable[[Callable | property], property]: ...
def computed_field(
func: Callable | property | None = None, /, **_: Any
) -> property | Callable[[Callable | property], property]:
"""Compatibility wrapper for Pydantic v2's `computed_field` in v1."""
def always_property(f: Callable | property) -> property:
# Convert the method to a property only if needed
return f if isinstance(f, property) else property(f)
# Handle both decorator styles
return always_property if (func is None) else always_property(func)
class AliasChoices: # type: ignore [no-redef]
"""Placeholder for Pydantic v2's AliasChoices to retain partial v1 support."""
aliases: list[str]
def __init__(self, *aliases: str):
self.aliases = list(aliases)
| V2Mixin |
python | huggingface__transformers | src/transformers/models/bert/modeling_bert.py | {
"start": 13261,
"end": 14647
} | class ____(nn.Module):
def __init__(self, config, is_causal=False, layer_idx=None, is_cross_attention=False):
super().__init__()
self.is_cross_attention = is_cross_attention
attention_class = BertCrossAttention if is_cross_attention else BertSelfAttention
self.self = attention_class(config, is_causal=is_causal, layer_idx=layer_idx)
self.output = BertSelfOutput(config)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor]:
attention_mask = attention_mask if not self.is_cross_attention else encoder_attention_mask
attention_output, attn_weights = self.self(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
past_key_values=past_key_values,
cache_position=cache_position,
**kwargs,
)
attention_output = self.output(attention_output, hidden_states)
return attention_output, attn_weights
| BertAttention |
python | pytorch__pytorch | torch/_dynamo/exc.py | {
"start": 10158,
"end": 18202
} | class ____(ObservedException):
# A TypeError exception to be raised from inside Dynamo tracing. This can happen on generator.send(..) method
pass
observed_exception_map = {
StopIteration: ObservedUserStopIteration,
LookupError: ObservedLookupError,
IndexError: ObservedIndexError,
GeneratorExit: ObservedGeneratorExit,
KeyError: ObservedKeyError,
AttributeError: ObservedAttributeError,
RuntimeError: ObservedRuntimeError,
NotImplementedError: ObservedNotImplementedError,
TypeError: ObservedTypeError,
}
def get_dynamo_observed_exception(exc_type: type[Exception]) -> type[ObservedException]:
if exc_type not in observed_exception_map:
name = getattr(exc_type, "__name__", str(exc_type))
observed_exception_map[exc_type] = type( # type: ignore[assignment]
f"Observed{name}Error", (ObservedException,), {}
)
# pyrefly: ignore [index-error]
return observed_exception_map[exc_type]
def raise_observed_exception(
exc_type: type[Exception],
tx: InstructionTranslatorBase,
*,
args: Optional[list[Any]] = None,
kwargs: Optional[dict[str, Any]] = None,
msg: Optional[str] = None,
) -> NoReturn:
from .variables import BuiltinVariable
# CPython here raises an exception. Since there is no python code, we have to manually setup the exception
# stack and raise the exception.
# If a message is provided but no args, use the message as the first argument
if msg is not None and (args is None or len(args) == 0):
args = [msg]
exception_vt = BuiltinVariable(exc_type).call_function(tx, args or [], kwargs or {}) # type: ignore[arg-type]
tx.exn_vt_stack.set_current_exception(exception_vt) # type: ignore[arg-type]
raised_exc = get_dynamo_observed_exception(exc_type)
# Store the original exception arguments for better error messages
if args:
raise raised_exc(*args)
raise raised_exc
def handle_observed_exception(tx: Any) -> None:
# This is essentially exception handling code, equivalent of this pseudo code
#
# try:
# ... somebody raising StopIteration
# except StopIteration
# pass
#
# If this was going through the python code, we would have called exception_handler method, but FOR_ITER
# handles the exception completely in CPython. For example for 3.11, the resulting bytecode is
#
#
# 6 46 LOAD_GLOBAL 2 (StopIteration)
# 58 RAISE_VARARGS 1
# >> 60 PUSH_EXC_INFO
# 7 62 LOAD_GLOBAL 2 (StopIteration)
# 74 CHECK_EXC_MATCH
# 76 POP_JUMP_FORWARD_IF_FALSE 3 (to 84)
# 78 POP_TOP
# 8 80 POP_EXCEPT
#
# Fortunately this translates to a simple pop from the exn_vt_stack
tx.exn_vt_stack.clear_current_exception()
# These exceptions are ok to fallback to eager/graph_break.
exceptions_allowed_to_be_fallback = (
torch._subclasses.fake_tensor.DataDependentOutputException,
torch._subclasses.fake_tensor.DynamicOutputShapeException,
torch._subclasses.fake_tensor.UnsupportedOperatorException,
torch._subclasses.fake_tensor.UnsupportedFakeTensorException,
torch._subclasses.fake_tensor.UnsupportedMutationAliasingException,
)
def unimplemented_with_warning(
e: Exception,
code: types.CodeType,
*,
gb_type: str,
context: str,
explanation: str,
hints: list[str],
) -> NoReturn:
# This function calls unimplemented internally and eventually graph breaks
# or falls to eager. unimplemented itself does not print any user warnings,
# i.e., its very silent. This helper function is intended when an error is
# encountered in the torch.compile stack which is worth showing as warning
# to the user. For example, if AOT Autograd backend fails with a fake tensor
# exception, its ok to fallback to eager but not silently. Here, we can use
# this function to log the message and the stack trace.
graph_break_msg = format_error_msg_verbose(e, code)
torch._logging.trace_structured(
"artifact",
metadata_fn=lambda: {
"name": "dynamo_graph_break_reason",
"encoding": "string",
},
payload_fn=lambda: graph_break_msg,
)
graph_breaks_log.debug("%s", graph_break_msg)
_unimplemented = unimplemented
# to prevent a graph break registry entry
_unimplemented(
gb_type=gb_type,
context=context,
explanation=explanation,
hints=hints,
from_exc=e,
log_warning=True,
)
def format_graph_break_message(
gb_type: str,
context: str,
explanation: str,
hints: list[str],
) -> str:
explanation = textwrap.indent(explanation, " ").lstrip()
hints_str = "\n".join(
" Hint: " + textwrap.indent(hint, " ").lstrip() for hint in hints
)
context = textwrap.indent(context, " ").lstrip()
msg = f"""\
{gb_type}
Explanation: {explanation}
{hints_str}
Developer debug context: {context}
"""
return msg
@lru_cache(maxsize=1)
def _load_gb_type_to_gb_id_map() -> dict[str, Any]:
"""
Loads the gb_type to gb_id map from the graph break registry from JSON file with caching.
Includes historical gb_type (mapping behavior of duplicate gb_types with different gb_ids is undefined).
"""
try:
script_dir = Path(__file__).resolve().parent
registry_path = get_file_path_2(
"", str(script_dir), "graph_break_registry.json"
)
with open(registry_path) as f:
registry = json.load(f)
except Exception:
log.exception("Error accessing the registry file")
registry = {}
mapping = {}
for k, v in registry.items():
for entry in v:
mapping[entry["Gb_type"]] = k
return mapping
def get_gbid_documentation_link(gb_type: str) -> Optional[str]:
"""
Retrieves the GBID documentation link for a given graph break type.
Args:
gb_type: The graph break type to look up.
Returns:
A string containing the documentation URL if found, otherwise None.
"""
GRAPH_BREAK_SITE_URL = (
"https://meta-pytorch.github.io/compile-graph-break-site/gb/" # @lint-ignore
)
gb_type_to_gb_id_map = _load_gb_type_to_gb_id_map()
if gb_type in gb_type_to_gb_id_map:
return (
f"{GRAPH_BREAK_SITE_URL}gb{gb_type_to_gb_id_map[gb_type].lstrip('GB')}.html"
)
return None
_NOTHING = object()
def unimplemented(
*,
gb_type: str,
context: str,
explanation: str,
hints: list[str],
from_exc: Any = _NOTHING,
log_warning: bool = False,
) -> NoReturn:
"""
Called within dynamo to cause a graph break.
Args:
gb_type: Context-free graph break type. It should be a short string without any
information specific to the tracing context (i.e. no dynamically-generated strings)
context: Developer context for the graph break. It can contain tracing context/dynamic strings.
explanation: User-facing context-dependent explanation for the graph break. Can be dynamic.
hints: List of user-facing hints for the graph break.
"""
msg = format_graph_break_message(gb_type, context, explanation, hints)
documentation_link = get_gbid_documentation_link(gb_type)
if documentation_link:
msg += f"\n For more details about this graph break, please visit: {documentation_link}"
if log_warning:
log.warning(msg)
if from_exc is not _NOTHING:
past_real_stack = None
if hasattr(from_exc, "real_stack"):
past_real_stack = from_exc.real_stack
raise Unsupported(msg, real_stack=past_real_stack) from from_exc
raise Unsupported(msg)
# KeyError has special handling for its args
# see https://github.com/python/cpython/blob/3.11/Objects/exceptions.c#L2534 for details
| ObservedTypeError |
python | allegroai__clearml | clearml/backend_api/services/v2_13/models.py | {
"start": 42442,
"end": 44917
} | class ____(Response):
"""
Response of models.delete_many endpoint.
:param deleted: Number of models deleted
:type deleted: int
:param urls:
:type urls: Sequence[str]
"""
_service = "models"
_action = "delete_many"
_version = "2.13"
_schema = {
"definitions": {},
"failures": {
"item": {
"error": {
"description": "Error info",
"properties": {
"codes": {"item": {"type": "integer"}, "type": "array"},
"data": {"additionalProperties": True, "type": "object"},
"msg": {"type": "string"},
},
"type": "object",
},
"id": {"description": "ID of the failed entity", "type": "string"},
"type": "object",
},
"type": "array",
},
"properties": {
"deleted": {
"description": "Number of models deleted",
"type": ["integer", "null"],
},
"urls": {
"descrition": "The urls of the deleted model files",
"items": {"type": "string"},
"type": ["array", "null"],
},
},
}
def __init__(self, deleted: Optional[int] = None, urls: Optional[List[str]] = None, **kwargs: Any) -> None:
super(DeleteManyResponse, self).__init__(**kwargs)
self.deleted = deleted
self.urls = urls
@schema_property("deleted")
def deleted(self) -> Optional[int]:
return self._property_deleted
@deleted.setter
def deleted(self, value: Optional[int]) -> None:
if value is None:
self._property_deleted = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "deleted", six.integer_types)
self._property_deleted = value
@schema_property("urls")
def urls(self) -> Optional[List[str]]:
return self._property_urls
@urls.setter
def urls(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_urls = None
return
self.assert_isinstance(value, "urls", (list, tuple))
self.assert_isinstance(value, "urls", six.string_types, is_array=True)
self._property_urls = value
| DeleteManyResponse |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/executor/init.py | {
"start": 274,
"end": 1522
} | class ____(
NamedTuple(
"InitExecutorContext",
[
("job", PublicAttr[IJob]),
("executor_def", PublicAttr[ExecutorDefinition]),
("executor_config", PublicAttr[Mapping[str, object]]),
("instance", PublicAttr[DagsterInstance]),
],
)
):
"""Executor-specific initialization context.
Args:
job (IJob): The job to be executed.
executor_def (ExecutorDefinition): The definition of the executor currently being
constructed.
executor_config (dict): The parsed config passed to the executor.
instance (DagsterInstance): The current instance.
"""
def __new__(
cls,
job: IJob,
executor_def: ExecutorDefinition,
executor_config: Mapping[str, object],
instance: DagsterInstance,
):
return super().__new__(
cls,
job=check.inst_param(job, "job", IJob),
executor_def=check.inst_param(executor_def, "executor_def", ExecutorDefinition),
executor_config=check.mapping_param(executor_config, "executor_config", key_type=str),
instance=check.inst_param(instance, "instance", DagsterInstance),
)
| InitExecutorContext |
python | PrefectHQ__prefect | src/integrations/prefect-dbt/tests/core/test_tracker.py | {
"start": 11662,
"end": 15839
} | class ____:
"""Test thread execution functionality."""
@pytest.fixture
def mock_thread_execution_setup(self, monkeypatch):
"""Set up common mocking for thread execution tests."""
# Mock run_task_sync
mock_run_task = Mock(return_value=Mock(spec=State))
monkeypatch.setattr("prefect_dbt.core._tracker.run_task_sync", mock_run_task)
# Mock hydrated_context
mock_context_manager = Mock()
mock_context_manager.__enter__ = Mock()
mock_context_manager.__exit__ = Mock()
monkeypatch.setattr(
"prefect_dbt.core._tracker.hydrated_context",
Mock(return_value=mock_context_manager),
)
return mock_run_task
@pytest.mark.parametrize(
"return_value,expected_result",
[
(Mock(spec=State), Mock(spec=State)),
(None, None),
],
)
def test_run_task_in_thread_stores_result(
self,
sample_node_id,
mock_task,
sample_task_run_id,
mock_thread_execution_setup,
return_value,
expected_result,
):
"""Test that run_task_in_thread stores the correct result."""
tracker = NodeTaskTracker()
parameters = {"param": "value"}
context = {"context": "data"}
# Configure mock to return specified value
mock_thread_execution_setup.return_value = return_value
tracker.run_task_in_thread(
sample_node_id, mock_task, sample_task_run_id, parameters, context
)
# Wait for thread to complete
time.sleep(0.2)
# Verify result was stored
result = tracker.get_task_result(sample_node_id)
if return_value is not None:
# For mock objects, just verify it's a mock with the same spec
assert isinstance(result, Mock)
assert result._spec_class == return_value._spec_class
else:
assert result == expected_result
@pytest.mark.parametrize(
"dependencies_setup,expected_wait_count",
[
({"dep1": Mock(spec=State), "dep2": Mock(spec=State)}, 2),
({"dep1": Mock(spec=State)}, 1),
({}, 0),
],
)
def test_run_task_in_thread_with_dependencies(
self,
sample_node_id,
mock_task,
sample_task_run_id,
mock_state,
mock_thread_execution_setup,
dependencies_setup,
expected_wait_count,
):
"""Test that run_task_in_thread handles dependencies correctly."""
tracker = NodeTaskTracker()
parameters = {"param": "value"}
context = {"context": "data"}
# Set up dependencies
dependencies = list(dependencies_setup.keys())
tracker.set_node_dependencies(sample_node_id, dependencies)
# Set up dependency results
for dep_id, result in dependencies_setup.items():
tracker.set_task_result(dep_id, result)
# Configure mock
mock_thread_execution_setup.return_value = mock_state
tracker.run_task_in_thread(
sample_node_id, mock_task, sample_task_run_id, parameters, context
)
# Wait for thread to complete
time.sleep(0.2)
# Verify run_task_sync was called with correct dependencies
mock_thread_execution_setup.assert_called_once()
call_args = mock_thread_execution_setup.call_args
assert len(call_args[1]["wait_for"]) == expected_wait_count
def test_run_task_in_thread_starts_daemon_thread(
self, sample_node_id, mock_task, sample_task_run_id, mock_thread_execution_setup
):
"""Test that run_task_in_thread starts a daemon thread."""
tracker = NodeTaskTracker()
parameters = {"param": "value"}
context = {"context": "data"}
tracker.run_task_in_thread(
sample_node_id, mock_task, sample_task_run_id, parameters, context
)
# Wait for thread to start and potentially complete
time.sleep(0.1)
# Verify run_task_sync was called
mock_thread_execution_setup.assert_called_once()
| TestNodeTaskTrackerThreadExecution |
python | gevent__gevent | src/gevent/tests/test__doctests.py | {
"start": 208,
"end": 825
} | class ____(doctest.OutputChecker):
"""
Pattern-normalizing output checker. Inspired by one used in zope.testing.
"""
def __init__(self, patterns):
self.transformers = [functools.partial(re.sub, replacement) for re, replacement in patterns]
def check_output(self, want, got, optionflags):
if got == want:
return True
for transformer in self.transformers:
want = transformer(want)
got = transformer(got)
return doctest.OutputChecker.check_output(self, want, got, optionflags)
FORBIDDEN_MODULES = set()
| RENormalizingOutputChecker |
python | wandb__wandb | wandb/sdk/artifacts/storage_handlers/wb_local_artifact_handler.py | {
"start": 599,
"end": 2672
} | class ____(StorageHandler):
"""Handles loading and storing Artifact reference-type files."""
_scheme: Literal["wandb-client-artifact"]
def __init__(self) -> None:
self._scheme = "wandb-client-artifact"
def can_handle(self, parsed_url: ParseResult) -> bool:
return parsed_url.scheme == self._scheme
def load_path(
self,
manifest_entry: ArtifactManifestEntry,
local: bool = False,
) -> URIStr | FilePathStr:
raise NotImplementedError(
"Should not be loading a path for an artifact entry with unresolved client id."
)
def store_path(
self,
artifact: Artifact,
path: URIStr | FilePathStr,
name: StrPath | None = None,
checksum: bool = True,
max_objects: int | None = None,
) -> list[ArtifactManifestEntry]:
"""Store the file or directory at the given path within the specified artifact.
Args:
artifact: The artifact doing the storing
path (str): The path to store
name (str): If specified, the logical name that should map to `path`
Returns:
list[ArtifactManifestEntry]: Manifest entries to store in the
artifact.
"""
parsed = urlparse(path)
client_id = parsed.netloc
target_path = parsed.path.lstrip("/")
target_artifact = artifact_instance_cache_by_client_id.get(client_id)
if not isinstance(target_artifact, wandb.Artifact):
raise TypeError("Artifact passed to store_path() must be a wandb.Artifact.")
target_entry = target_artifact.manifest.entries[target_path] # type: ignore
if target_entry is None:
raise RuntimeError("Local entry not found - invalid reference")
# Return the new entry
return [
ArtifactManifestEntry(
path=name or os.path.basename(path),
ref=path,
size=0,
digest=target_entry.digest,
)
]
| WBLocalArtifactHandler |
python | catalyst-team__catalyst | examples/reinforcement_learning/reinforce.py | {
"start": 515,
"end": 1200
} | class ____:
def __init__(self, capacity: int):
self.capacity = capacity
self.buffer = deque(maxlen=capacity)
def append(self, rollout: Rollout):
self.buffer.append(rollout)
def sample(self, idx: int) -> Sequence[np.array]:
states, actions, rewards = self.buffer[idx]
states = np.array(states, dtype=np.float32)
actions = np.array(actions, dtype=np.int64)
rewards = np.array(rewards, dtype=np.float32)
return states, actions, rewards
def __len__(self) -> int:
return len(self.buffer)
# as far as RL does not have some predefined dataset,
# we need to specify epoch length by ourselfs
| RolloutBuffer |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/visitors.py | {
"start": 21290,
"end": 22320
} | class ____(ExternalTraversal):
"""Base class for visitor objects which can traverse using
the :func:`.visitors.cloned_traverse` function.
Direct usage of the :func:`.visitors.cloned_traverse` function is usually
preferred.
"""
__slots__ = ()
def copy_and_process(
self, list_: List[ExternallyTraversible]
) -> List[ExternallyTraversible]:
"""Apply cloned traversal to the given list of elements, and return
the new list.
"""
return [self.traverse(x) for x in list_]
@overload
def traverse(self, obj: Literal[None]) -> None: ...
@overload
def traverse(
self, obj: ExternallyTraversible
) -> ExternallyTraversible: ...
def traverse(
self, obj: Optional[ExternallyTraversible]
) -> Optional[ExternallyTraversible]:
"""Traverse and visit the given expression structure."""
return cloned_traverse(
obj, self.__traverse_options__, self._visitor_dict
)
| CloningExternalTraversal |
python | scipy__scipy | scipy/optimize/tests/test_minimize_constrained.py | {
"start": 1655,
"end": 3070
} | class ____:
"""Problem 15.4 from Nocedal and Wright
The following optimization problem:
minimize 2*(x[0]**2 + x[1]**2 - 1) - x[0]
Subject to: x[0]**2 + x[1]**2 - 1 = 0
"""
def __init__(self, a, b, degrees=60, constr_jac=None, constr_hess=None):
rads = degrees/180*np.pi
self.x0 = [np.cos(rads), np.sin(rads)]
self.x_opt = np.array([1.0, 0.0])
self.constr_jac = constr_jac
self.constr_hess = constr_hess
self.a = a
self.b = b
self.bounds = None
def _test_args(self, a, b):
if self.a != a or self.b != b:
raise ValueError()
def fun(self, x, a, b):
self._test_args(a, b)
return 2*(x[0]**2 + x[1]**2 - 1) - x[0]
def grad(self, x, a, b):
self._test_args(a, b)
return np.array([4*x[0]-1, 4*x[1]])
def hess(self, x, a, b):
self._test_args(a, b)
return 4*np.eye(2)
@property
def constr(self):
def fun(x):
return x[0]**2 + x[1]**2
if self.constr_jac is None:
def jac(x):
return [[4*x[0], 4*x[1]]]
else:
jac = self.constr_jac
if self.constr_hess is None:
def hess(x, v):
return 2*v[0]*np.eye(2)
else:
hess = self.constr_hess
return NonlinearConstraint(fun, 1, 1, jac, hess)
| MaratosTestArgs |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/migrate_to_manifest_only/declarative_component_schema.py | {
"start": 11221,
"end": 13150
} | class ____(BaseModel):
type: Literal["JwtAuthenticator"]
secret_key: str = Field(
...,
description="Secret used to sign the JSON web token.",
examples=["{{ config['secret_key'] }}"],
)
base64_encode_secret_key: Optional[bool] = Field(
False,
description='When set to true, the secret key will be base64 encoded prior to being encoded as part of the JWT. Only set to "true" when required by the API.',
)
algorithm: Algorithm = Field(
...,
description="Algorithm used to sign the JSON web token.",
examples=["ES256", "HS256", "RS256", "{{ config['algorithm'] }}"],
)
token_duration: Optional[int] = Field(
1200,
description="The amount of time in seconds a JWT token can be valid after being issued.",
examples=[1200, 3600],
title="Token Duration",
)
header_prefix: Optional[str] = Field(
None,
description="The prefix to be used within the Authentication header.",
examples=["Bearer", "Basic"],
title="Header Prefix",
)
jwt_headers: Optional[JwtHeaders] = Field(
None,
description="JWT headers used when signing JSON web token.",
title="JWT Headers",
)
additional_jwt_headers: Optional[Dict[str, Any]] = Field(
None,
description="Additional headers to be included with the JWT headers object.",
title="Additional JWT Headers",
)
jwt_payload: Optional[JwtPayload] = Field(
None,
description="JWT Payload used when signing JSON web token.",
title="JWT Payload",
)
additional_jwt_payload: Optional[Dict[str, Any]] = Field(
None,
description="Additional properties to be added to the JWT payload.",
title="Additional JWT Payload Properties",
)
parameters: Optional[Dict[str, Any]] = Field(None, alias="$parameters")
| JwtAuthenticator |
python | django__django | tests/test_utils/tests.py | {
"start": 68277,
"end": 68364
} | class ____:
urlpatterns = [path("second/", empty_response, name="second")]
| SecondUrls |
python | getsentry__sentry | tests/sentry/api/endpoints/test_project_templates_index.py | {
"start": 2420,
"end": 4681
} | class ____(ProjectTemplateAPIBase):
method = "POST"
def test_post__no_feature(self) -> None:
response = self.get_error_response(self.organization.id, status_code=404)
assert response.status_code == 404
@with_feature(PROJECT_TEMPLATE_FEATURE_FLAG)
def test_post(self) -> None:
response = self.get_success_response(self.organization.id, name="Test Project Template")
assert response.status_code == 201
@with_feature(PROJECT_TEMPLATE_FEATURE_FLAG)
def test_post__as_member_without_permission(self) -> None:
"""
Test that a member is part of the organization, but does not have the required
permissions to create a project template.
The user is a member of the organization, but does not have write access to the org.
"""
org_two = self.create_organization()
self.create_team(organization=org_two, members=[self.user])
self.create_project_template(organization=org_two)
response = self.get_error_response(org_two.id, status_code=403)
assert response.status_code == 403
@with_feature(PROJECT_TEMPLATE_FEATURE_FLAG)
def test_post__with_options(self) -> None:
test_options = {"test-key": "value"}
response = self.get_success_response(
self.organization.id,
name="Test Project Template",
options=test_options,
)
assert response.status_code == 201
new_template = ProjectTemplate.objects.get(id=response.data["id"])
created_options = {opt.key: opt.value for opt in new_template.options.all()}
assert created_options == test_options
@with_feature(PROJECT_TEMPLATE_FEATURE_FLAG)
def test_post__no_name(self) -> None:
response = self.get_error_response(self.organization.id, status_code=400)
assert response.status_code == 400
@with_feature(PROJECT_TEMPLATE_FEATURE_FLAG)
@patch("sentry.api.base.create_audit_entry")
def test_post__audit_log(self, mock_audit: MagicMock) -> None:
self.get_success_response(
self.organization.id,
name="Test Project Template",
)
mock_audit.assert_called()
mock_audit.reset_mock()
| ProjectTemplateIndexPostTest |
python | django__django | tests/middleware/test_csp.py | {
"start": 4173,
"end": 7036
} | class ____(SimpleTestCase):
def test_no_decorators(self):
response = self.client.get("/csp-base/")
self.assertEqual(response[CSP.HEADER_ENFORCE], basic_policy)
self.assertEqual(response[CSP.HEADER_REPORT_ONLY], basic_policy)
def test_csp_disabled_enforced(self):
"""
`csp_override({})` only disables the enforced CSP header.
"""
response = self.client.get("/csp-disabled-enforced/")
self.assertNotIn(CSP.HEADER_ENFORCE, response)
self.assertEqual(response[CSP.HEADER_REPORT_ONLY], basic_policy)
def test_csp_report_only_disabled(self):
"""
`csp_report_only_override({})` only disables the report-only header.
"""
response = self.client.get("/csp-disabled-report-only/")
self.assertNotIn(CSP.HEADER_REPORT_ONLY, response)
self.assertEqual(response[CSP.HEADER_ENFORCE], basic_policy)
def test_csp_disabled_both(self):
"""
Using both CSP decorators with empty mappings will clear both headers.
"""
response = self.client.get("/csp-disabled-both/")
self.assertNotIn(CSP.HEADER_ENFORCE, response)
self.assertNotIn(CSP.HEADER_REPORT_ONLY, response)
def test_csp_override_enforced(self):
"""
`csp_override` only overrides the enforced header.
"""
response = self.client.get("/csp-override-enforced/")
self.assertEqual(
response[CSP.HEADER_ENFORCE], "default-src 'self'; img-src 'self' data:"
)
self.assertEqual(response[CSP.HEADER_REPORT_ONLY], basic_policy)
def test_csp_report_only_override(self):
"""
`csp_report_only_override` only overrides the report-only header.
"""
response = self.client.get("/csp-override-report-only/")
self.assertEqual(
response[CSP.HEADER_REPORT_ONLY], "default-src 'self'; img-src 'self' data:"
)
self.assertEqual(response[CSP.HEADER_ENFORCE], basic_policy)
def test_csp_override_both_decorator(self):
"""
Using both CSP decorators overrides both CSP Django settings.
"""
response = self.client.get("/csp-override-both/")
self.assertEqual(
response[CSP.HEADER_ENFORCE], "default-src 'self'; img-src 'self' data:"
)
self.assertEqual(
response[CSP.HEADER_REPORT_ONLY], "default-src 'self'; img-src 'self' data:"
)
@override_settings(
ROOT_URLCONF="middleware.urls",
SECURE_CSP_REPORT_ONLY={
"default-src": [CSP.NONE],
"img-src": [CSP.SELF],
"script-src": [CSP.SELF],
"style-src": [CSP.SELF],
"report-uri": "/csp-report/",
},
)
@modify_settings(
MIDDLEWARE={"append": "django.middleware.csp.ContentSecurityPolicyMiddleware"}
)
| CSPMiddlewareWithDecoratedViewsTest |
python | pytorch__pytorch | torch/distributions/kumaraswamy.py | {
"start": 692,
"end": 3735
} | class ____(TransformedDistribution):
r"""
Samples from a Kumaraswamy distribution.
Example::
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
>>> m = Kumaraswamy(torch.tensor([1.0]), torch.tensor([1.0]))
>>> m.sample() # sample from a Kumaraswamy distribution with concentration alpha=1 and beta=1
tensor([ 0.1729])
Args:
concentration1 (float or Tensor): 1st concentration parameter of the distribution
(often referred to as alpha)
concentration0 (float or Tensor): 2nd concentration parameter of the distribution
(often referred to as beta)
"""
arg_constraints = {
"concentration1": constraints.positive,
"concentration0": constraints.positive,
}
# pyrefly: ignore [bad-override]
support = constraints.unit_interval
has_rsample = True
def __init__(
self,
concentration1: Union[Tensor, float],
concentration0: Union[Tensor, float],
validate_args: Optional[bool] = None,
) -> None:
self.concentration1, self.concentration0 = broadcast_all(
concentration1, concentration0
)
base_dist = Uniform(
torch.full_like(self.concentration0, 0),
torch.full_like(self.concentration0, 1),
validate_args=validate_args,
)
transforms = [
PowerTransform(exponent=self.concentration0.reciprocal()),
AffineTransform(loc=1.0, scale=-1.0),
PowerTransform(exponent=self.concentration1.reciprocal()),
]
# pyrefly: ignore [bad-argument-type]
super().__init__(base_dist, transforms, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Kumaraswamy, _instance)
new.concentration1 = self.concentration1.expand(batch_shape)
new.concentration0 = self.concentration0.expand(batch_shape)
return super().expand(batch_shape, _instance=new)
@property
def mean(self) -> Tensor:
return _moments(self.concentration1, self.concentration0, 1)
@property
def mode(self) -> Tensor:
# Evaluate in log-space for numerical stability.
log_mode = (
self.concentration0.reciprocal() * (-self.concentration0).log1p()
- (-self.concentration0 * self.concentration1).log1p()
)
log_mode[(self.concentration0 < 1) | (self.concentration1 < 1)] = nan
return log_mode.exp()
@property
def variance(self) -> Tensor:
return _moments(self.concentration1, self.concentration0, 2) - torch.pow(
self.mean, 2
)
def entropy(self):
t1 = 1 - self.concentration1.reciprocal()
t0 = 1 - self.concentration0.reciprocal()
H0 = torch.digamma(self.concentration0 + 1) + euler_constant
return (
t0
+ t1 * H0
- torch.log(self.concentration1)
- torch.log(self.concentration0)
)
| Kumaraswamy |
python | getsentry__sentry | src/sentry/ingest/transaction_clusterer/rules.py | {
"start": 2941,
"end": 4252
} | class ____:
def __init__(self, namespace: ClustererNamespace):
self._storage = namespace.value.persistent_storage
self._tracker = namespace.value.tracker
def read_sorted(self, project: Project) -> list[tuple[ReplacementRule, int]]:
ret = project.get_option(self._storage, default=[])
# normalize tuple vs. list for json writing
return [tuple(lst) for lst in ret]
def read(self, project: Project) -> RuleSet:
rules = {rule: last_seen for rule, last_seen in self.read_sorted(project)}
self.last_read = rules
return rules
def _sort(self, rules: RuleSet) -> list[tuple[ReplacementRule, int]]:
"""Sort rules by number of slashes, i.e. depth of the rule"""
return sorted(rules.items(), key=lambda p: p[0].count("/"), reverse=True)
def write(self, project: Project, rules: RuleSet) -> None:
"""Writes the rules to project options, sorted by depth."""
# we make sure the database stores lists such that they are json round trippable
converted_rules = [list(tup) for tup in self._sort(rules)]
# Track the number of rules per project.
metrics.distribution(self._tracker, len(converted_rules))
project.update_option(self._storage, converted_rules)
| ProjectOptionRuleStore |
python | getsentry__sentry | src/sentry/newsletter/dummy.py | {
"start": 271,
"end": 2021
} | class ____:
def __init__(
self,
user,
list_id,
list_name=None,
list_description=None,
email=None,
verified=None,
subscribed=False,
subscribed_date=None,
unsubscribed_date=None,
**kwargs,
):
from sentry.users.models.useremail import UserEmail
self.email = user.email or email
self.list_id = list_id
self.list_description = list_description
self.list_name = list_name
# is the email address verified?
self.verified = (
UserEmail.objects.get_primary_email(user).is_verified if verified is None else verified
)
# are they subscribed to ``list_id``
self.subscribed = subscribed
if subscribed:
self.subscribed_date = subscribed_date or timezone.now()
elif subscribed is False:
self.unsubscribed_date = unsubscribed_date or timezone.now()
def __getitem__(self, key):
return getattr(self, key)
def get(self, key, default=None):
return getattr(self, key, default)
def update(
self, verified=None, subscribed=None, subscribed_date=None, unsubscribed_date=None, **kwargs
):
if verified is not None:
self.verified = verified
if subscribed is not None:
self.subscribed = subscribed
if subscribed_date is not None:
self.subscribed_date = subscribed_date
elif subscribed:
self.subscribed_date = timezone.now()
if unsubscribed_date is not None:
self.unsubscribed_date = unsubscribed_date
elif subscribed is False:
self.unsubscribed_date = timezone.now()
| NewsletterSubscription |
python | keon__algorithms | tests/test_tree.py | {
"start": 336,
"end": 478
} | class ____:
def __init__(self, val, left=None, right=None):
self.val = val
self.left = left
self.right = right
| Node |
python | anthropics__anthropic-sdk-python | src/anthropic/_streaming.py | {
"start": 5024,
"end": 8048
} | class ____(Generic[_T], metaclass=_AsyncStreamMeta):
"""Provides the core interface to iterate over an asynchronous stream response."""
response: httpx.Response
_decoder: SSEDecoder | SSEBytesDecoder
def __init__(
self,
*,
cast_to: type[_T],
response: httpx.Response,
client: AsyncAnthropic,
) -> None:
self.response = response
self._cast_to = cast_to
self._client = client
self._decoder = client._make_sse_decoder()
self._iterator = self.__stream__()
async def __anext__(self) -> _T:
return await self._iterator.__anext__()
async def __aiter__(self) -> AsyncIterator[_T]:
async for item in self._iterator:
yield item
async def _iter_events(self) -> AsyncIterator[ServerSentEvent]:
async for sse in self._decoder.aiter_bytes(self.response.aiter_bytes()):
yield sse
async def __stream__(self) -> AsyncIterator[_T]:
cast_to = cast(Any, self._cast_to)
response = self.response
process_data = self._client._process_response_data
iterator = self._iter_events()
async for sse in iterator:
if sse.event == "completion":
yield process_data(data=sse.json(), cast_to=cast_to, response=response)
if (
sse.event == "message_start"
or sse.event == "message_delta"
or sse.event == "message_stop"
or sse.event == "content_block_start"
or sse.event == "content_block_delta"
or sse.event == "content_block_stop"
):
data = sse.json()
if is_dict(data) and "type" not in data:
data["type"] = sse.event
yield process_data(data=data, cast_to=cast_to, response=response)
if sse.event == "ping":
continue
if sse.event == "error":
body = sse.data
try:
body = sse.json()
err_msg = f"{body}"
except Exception:
err_msg = sse.data or f"Error code: {response.status_code}"
raise self._client._make_status_error(
err_msg,
body=body,
response=self.response,
)
# As we might not fully consume the response stream, we need to close it explicitly
await response.aclose()
async def __aenter__(self) -> Self:
return self
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
await self.close()
async def close(self) -> None:
"""
Close the response and release the connection.
Automatically called if the response body is read to completion.
"""
await self.response.aclose()
| AsyncStream |
python | getsentry__sentry | tests/sentry/sentry_apps/tasks/test_sentry_apps.py | {
"start": 74332,
"end": 76973
} | class ____(TestCase):
def setUp(self) -> None:
self.sentry_app = self.create_sentry_app(
name="Test App",
webhook_url="https://example.com",
organization=self.organization,
events=["issue.created", "issue.resolved", "error.created"],
)
self.install = self.create_sentry_app_installation(
organization=self.organization, slug=self.sentry_app.slug
)
def test_regenerate_service_hook_for_installation_success(self):
with assume_test_silo_mode(SiloMode.REGION):
hook = ServiceHook.objects.get(installation_id=self.install.id)
hook.events = ["issue.resolved", "error.created"]
hook.save()
with self.tasks(), assume_test_silo_mode(SiloMode.CONTROL):
regenerate_service_hooks_for_installation(
installation_id=self.install.id,
webhook_url=self.sentry_app.webhook_url,
events=self.sentry_app.events,
)
with assume_test_silo_mode(SiloMode.REGION):
hook.refresh_from_db()
assert set(hook.events) == {"issue.created", "issue.resolved", "error.created"}
def test_regenerate_service_hook_for_installation_event_not_in_app_events(self):
with self.tasks(), assume_test_silo_mode(SiloMode.CONTROL):
regenerate_service_hooks_for_installation(
installation_id=self.install.id,
webhook_url=self.sentry_app.webhook_url,
events=self.sentry_app.events,
)
with assume_test_silo_mode(SiloMode.REGION):
hook = ServiceHook.objects.get(installation_id=self.install.id)
assert set(hook.events) == {"issue.created", "issue.resolved", "error.created"}
def test_regenerate_service_hook_for_installation_with_empty_app_events(self):
with assume_test_silo_mode(SiloMode.CONTROL):
self.sentry_app.update(events=[])
assert self.sentry_app.events == []
with assume_test_silo_mode(SiloMode.REGION):
hook = ServiceHook.objects.get(installation_id=self.install.id)
assert hook.events != []
with self.tasks(), assume_test_silo_mode(SiloMode.CONTROL):
regenerate_service_hooks_for_installation(
installation_id=self.install.id,
webhook_url=self.sentry_app.webhook_url,
events=self.sentry_app.events,
)
with assume_test_silo_mode(SiloMode.REGION):
hook.refresh_from_db()
assert hook.events == []
| TestBackfillServiceHooksEvents |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/variables/variables_test.py | {
"start": 26037,
"end": 28278
} | class ____(test.TestCase):
def testNoVars(self):
with ops.Graph().as_default(), self.cached_session() as sess:
uninited = variables.report_uninitialized_variables()
self.assertEqual(0, self.evaluate(uninited).size)
def testAssertVariablesInitialized(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.Variable([1, 2], name="v")
w = variables.Variable([3, 4], name="w")
_ = v, w
uninited = variables.report_uninitialized_variables()
self.assertAllEqual(np.array([b"v", b"w"]), self.evaluate(uninited))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(0, self.evaluate(uninited).size)
@test_util.run_v1_only("b/120545219")
def testVariableList(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variable_v1.VariableV1([1, 2], name="v")
w = variable_v1.VariableV1([3, 4], name="w")
uninited = variables.report_uninitialized_variables()
self.assertAllEqual(np.array([b"v", b"w"]), self.evaluate(uninited))
self.evaluate(w.initializer)
self.assertAllEqual(np.array([b"v"]), self.evaluate(uninited))
self.evaluate(v.initializer)
self.assertEqual(0, self.evaluate(uninited).size)
def testZeroSizeVarInitialized(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.Variable(array_ops.zeros([0, 2]), name="v")
uninited = variables.report_uninitialized_variables()
self.evaluate(v.initializer) # not strictly necessary
self.assertEqual(0, self.evaluate(uninited).size)
def testTrainingWithZeroSizeVar(self):
with ops.Graph().as_default(), self.cached_session() as sess:
a = variables.Variable(array_ops.zeros([0, 2]))
b = variables.Variable(array_ops.ones([2, 2]))
objective = math_ops.reduce_sum(b + math_ops.matmul(
a, a, transpose_a=True))
self.evaluate(variables.global_variables_initializer())
do_opt = gradient_descent.GradientDescentOptimizer(0.1).minimize(
objective)
self.evaluate([do_opt])
self.assertAllClose([[0.9, 0.9], [0.9, 0.9]], self.evaluate(b))
@test_util.run_v1_only("b/120545219")
| IsInitializedTest |
python | weaviate__weaviate-python-client | weaviate/rbac/models.py | {
"start": 713,
"end": 783
} | class ____:
group_id: str
group_type: GroupTypes
| GroupAssignment |
python | huggingface__transformers | tests/models/qwen2_5_vl/test_processing_qwen2_5_vl.py | {
"start": 1108,
"end": 11827
} | class ____(ProcessorTesterMixin, unittest.TestCase):
processor_class = Qwen2_5_VLProcessor
model_id = "Qwen/Qwen2-VL-7B-Instruct"
@classmethod
def _setup_from_pretrained(cls, model_id, **kwargs):
return super()._setup_from_pretrained(model_id, patch_size=4, max_pixels=56 * 56, min_pixels=28 * 28, **kwargs)
def test_get_num_vision_tokens(self):
"Tests general functionality of the helper used internally in vLLM"
processor = self.get_processor()
output = processor._get_num_multimodal_tokens(image_sizes=[(100, 100), (300, 100), (500, 30)])
self.assertTrue("num_image_tokens" in output)
self.assertEqual(len(output["num_image_tokens"]), 3)
self.assertTrue("num_image_patches" in output)
self.assertEqual(len(output["num_image_patches"]), 3)
@require_torch
@require_av
def _test_apply_chat_template(
self,
modality: str,
batch_size: int,
return_tensors: str,
input_name: str,
processor_name: str,
input_data: list[str],
):
processor = self.get_processor()
if processor.chat_template is None:
self.skipTest("Processor has no chat template")
if processor_name not in self.processor_class.get_attributes():
self.skipTest(f"{processor_name} attribute not present in {self.processor_class}")
batch_messages = [
[
{
"role": "user",
"content": [{"type": "text", "text": "Describe this."}],
},
]
] * batch_size
# Test that jinja can be applied
formatted_prompt = processor.apply_chat_template(batch_messages, add_generation_prompt=True, tokenize=False)
self.assertEqual(len(formatted_prompt), batch_size)
# Test that tokenizing with template and directly with `self.tokenizer` gives same output
formatted_prompt_tokenized = processor.apply_chat_template(
batch_messages, add_generation_prompt=True, tokenize=True, return_tensors=return_tensors
)
add_special_tokens = True
if processor.tokenizer.bos_token is not None and formatted_prompt[0].startswith(processor.tokenizer.bos_token):
add_special_tokens = False
tok_output = processor.tokenizer(
formatted_prompt, return_tensors=return_tensors, add_special_tokens=add_special_tokens
)
expected_output = tok_output.input_ids
self.assertListEqual(expected_output.tolist(), formatted_prompt_tokenized.tolist())
# Test that kwargs passed to processor's `__call__` are actually used
tokenized_prompt_100 = processor.apply_chat_template(
batch_messages,
add_generation_prompt=True,
tokenize=True,
padding="max_length",
truncation=True,
return_tensors=return_tensors,
max_length=100,
)
self.assertEqual(len(tokenized_prompt_100[0]), 100)
# Test that `return_dict=True` returns text related inputs in the dict
out_dict_text = processor.apply_chat_template(
batch_messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors=return_tensors,
)
self.assertTrue(all(key in out_dict_text for key in ["input_ids", "attention_mask"]))
self.assertEqual(len(out_dict_text["input_ids"]), batch_size)
self.assertEqual(len(out_dict_text["attention_mask"]), batch_size)
# Test that with modality URLs and `return_dict=True`, we get modality inputs in the dict
for idx, url in enumerate(input_data[:batch_size]):
batch_messages[idx][0]["content"] = [batch_messages[idx][0]["content"][0], {"type": modality, "url": url}]
out_dict = processor.apply_chat_template(
batch_messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors=return_tensors,
num_frames=2, # by default no more than 2 frames, otherwise too slow
)
input_name = getattr(self, input_name)
self.assertTrue(input_name in out_dict)
self.assertEqual(len(out_dict["input_ids"]), batch_size)
self.assertEqual(len(out_dict["attention_mask"]), batch_size)
if modality == "video":
# qwen pixels don't scale with bs same way as other models, calculate expected video token count based on video_grid_thw
expected_video_token_count = 0
for thw in out_dict["video_grid_thw"]:
expected_video_token_count += thw[0] * thw[1] * thw[2]
mm_len = expected_video_token_count
else:
mm_len = batch_size * 192
self.assertEqual(len(out_dict[input_name]), mm_len)
return_tensor_to_type = {"pt": torch.Tensor, "np": np.ndarray, None: list}
for k in out_dict:
self.assertIsInstance(out_dict[k], return_tensor_to_type[return_tensors])
@require_av
def test_apply_chat_template_video_frame_sampling(self):
processor = self.get_processor()
if processor.chat_template is None:
self.skipTest("Processor has no chat template")
signature = inspect.signature(processor.__call__)
if "videos" not in {*signature.parameters.keys()} or (
signature.parameters.get("videos") is not None
and signature.parameters["videos"].annotation == inspect._empty
):
self.skipTest("Processor doesn't accept videos at input")
messages = [
[
{
"role": "user",
"content": [
{"type": "video"},
{"type": "text", "text": "What is shown in this video?"},
],
},
]
]
formatted_prompt = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
self.assertEqual(len(formatted_prompt), 1)
formatted_prompt_tokenized = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True)
expected_output = processor.tokenizer(formatted_prompt, return_tensors=None).input_ids
self.assertListEqual(expected_output, formatted_prompt_tokenized)
out_dict = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True, return_dict=True)
self.assertListEqual(list(out_dict.keys()), ["input_ids", "attention_mask"])
# Add video URL for return dict and load with `num_frames` arg
messages[0][0]["content"][0] = {
"type": "video",
"url": url_to_local_path(
"https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/tiny_video.mp4"
),
}
num_frames = 3
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
num_frames=num_frames,
)
self.assertTrue(self.videos_input_name in out_dict_with_video)
self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 360)
# Load with `fps` arg
fps = 1
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
fps=fps,
)
self.assertTrue(self.videos_input_name in out_dict_with_video)
self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 360)
# Load with `fps` and `num_frames` args, should raise an error
with self.assertRaises(ValueError):
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
fps=fps,
num_frames=num_frames,
)
# Load without any arg should load the whole video
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
)
self.assertTrue(self.videos_input_name in out_dict_with_video)
self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 1080)
# Load video as a list of frames (i.e. images). NOTE: each frame should have same size
# because we assume they come from one video
messages[0][0]["content"][0] = {
"type": "video",
"url": [
url_to_local_path(
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
),
url_to_local_path(
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
),
],
}
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
)
self.assertTrue(self.videos_input_name in out_dict_with_video)
self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 160)
# When the inputs are frame URLs/paths we expect that those are already
# sampled and will raise an error is asked to sample again.
with self.assertRaisesRegex(
ValueError, "Sampling frames from a list of images is not supported! Set `do_sample_frames=False`"
):
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
do_sample_frames=True,
)
def test_kwargs_overrides_custom_image_processor_kwargs(self):
processor = self.get_processor()
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs()
image_input = self.prepare_image_inputs()
inputs = processor(text=input_str, images=image_input, max_pixels=56 * 56 * 4, return_tensors="pt")
self.assertEqual(inputs[self.images_input_name].shape[0], 612)
inputs = processor(text=input_str, images=image_input, return_tensors="pt")
self.assertEqual(inputs[self.images_input_name].shape[0], 100)
| Qwen2_5_VLProcessorTest |
python | readthedocs__readthedocs.org | readthedocs/rtd_tests/tests/test_profile_views.py | {
"start": 453,
"end": 9180
} | class ____(TestCase):
def setUp(self):
self.user = get(User)
self.user.set_password("test")
self.user.save()
self.client.login(username=self.user.username, password="test")
def test_edit_profile(self):
resp = self.client.get(
reverse("profiles_profile_edit"),
)
self.assertTrue(resp.status_code, 200)
resp = self.client.post(
reverse("profiles_profile_edit"),
data={
"first_name": "Read",
"last_name": "Docs",
"homepage": "readthedocs.org",
},
)
self.assertTrue(resp.status_code, 200)
self.assertEqual(resp["Location"], "/accounts/edit/")
self.user.refresh_from_db()
self.user.profile.refresh_from_db()
self.assertEqual(self.user.first_name, "Read")
self.assertEqual(self.user.last_name, "Docs")
self.assertEqual(self.user.profile.homepage, "readthedocs.org")
def test_edit_profile_with_invalid_values(self):
resp = self.client.get(
reverse("profiles_profile_edit"),
)
self.assertTrue(resp.status_code, 200)
resp = self.client.post(
reverse("profiles_profile_edit"),
data={
"first_name": "a" * 31,
"last_name": "b" * 31,
"homepage": "c" * 101,
},
)
FORM_ERROR_FORMAT = "Ensure this value has at most {} characters (it has {})."
self.assertFormError(
resp.context.get('form'),
field="first_name",
errors=FORM_ERROR_FORMAT.format(30, 31),
)
self.assertFormError(
resp.context.get('form'),
field="last_name",
errors=FORM_ERROR_FORMAT.format(30, 31),
)
self.assertFormError(
resp.context.get('form'),
field="homepage",
errors=FORM_ERROR_FORMAT.format(100, 101),
)
def test_delete_account(self):
resp = self.client.get(
reverse("delete_account"),
)
self.assertEqual(resp.status_code, 200)
resp = self.client.post(
reverse("delete_account"),
data={
"username": self.user.username,
},
)
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp["Location"], reverse("homepage"))
self.assertFalse(
User.objects.filter(username=self.user.username).exists(),
)
def test_profile_detail(self):
resp = self.client.get(
reverse("profiles_profile_detail", args=(self.user.username,)),
)
self.assertTrue(resp.status_code, 200)
def test_profile_detail_logout(self):
self.client.logout()
resp = self.client.get(
reverse("profiles_profile_detail", args=(self.user.username,)),
)
self.assertTrue(resp.status_code, 200)
def test_profile_detail_not_found(self):
resp = self.client.get(
reverse("profiles_profile_detail", args=("not-found",)),
)
self.assertTrue(resp.status_code, 404)
def test_account_advertising(self):
resp = self.client.get(
reverse("account_advertising"),
)
self.assertEqual(resp.status_code, 200)
self.assertTrue(self.user.profile.allow_ads)
resp = self.client.post(
reverse("account_advertising"),
data={"allow_ads": False},
)
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp["Location"], reverse("account_advertising"))
self.user.profile.refresh_from_db()
self.assertFalse(self.user.profile.allow_ads)
def test_list_api_tokens(self):
resp = self.client.get(reverse("profiles_tokens"))
self.assertEqual(resp.status_code, 200)
self.assertContains(resp, "You currently have no API tokens.")
token = Token.objects.create(user=self.user)
resp = self.client.get(reverse("profiles_tokens"))
self.assertEqual(resp.status_code, 200)
self.assertContains(resp, token.key)
def test_create_api_token(self):
self.assertEqual(Token.objects.filter(user=self.user).count(), 0)
resp = self.client.get(reverse("profiles_tokens_create"))
self.assertEqual(resp.status_code, 405) # GET not allowed
resp = self.client.post(reverse("profiles_tokens_create"))
self.assertEqual(resp.status_code, 302)
self.assertEqual(Token.objects.filter(user=self.user).count(), 1)
def test_delete_api_token(self):
Token.objects.create(user=self.user)
self.assertEqual(Token.objects.filter(user=self.user).count(), 1)
resp = self.client.post(reverse("profiles_tokens_delete"))
self.assertEqual(resp.status_code, 302)
self.assertEqual(Token.objects.filter(user=self.user).count(), 0)
def test_list_security_logs(self):
project = get(Project, users=[self.user], slug="project")
another_project = get(Project, users=[self.user], slug="another-project")
another_user = get(User)
actions = [
AuditLog.AUTHN,
AuditLog.AUTHN_FAILURE,
AuditLog.LOGOUT,
AuditLog.PAGEVIEW,
]
ips = [
"10.10.10.1",
"10.10.10.2",
]
users = [self.user, another_user]
AuditLog.objects.all().delete()
for action, ip, user in itertools.product(actions, ips, users):
get(
AuditLog,
user=user,
action=action,
ip=ip,
)
get(
AuditLog,
user=user,
action=action,
project=project,
ip=ip,
)
get(
AuditLog,
user=user,
action=action,
project=another_project,
ip=ip,
)
self.assertEqual(AuditLog.objects.count(), 48)
queryset = AuditLog.objects.filter(
log_user_id=self.user.pk,
action__in=[AuditLog.AUTHN, AuditLog.AUTHN_FAILURE, AuditLog.LOGOUT],
)
# Show logs from the current user
# and for authn/authn_failure/logout events only.
resp = self.client.get(reverse("profiles_security_log"))
self.assertEqual(resp.status_code, 200)
auditlogs = resp.context_data["object_list"]
self.assertQuerySetEqual(auditlogs, queryset)
# Show logs filtered by project.
resp = self.client.get(reverse("profiles_security_log") + "?project=project")
self.assertEqual(resp.status_code, 200)
auditlogs = resp.context_data["object_list"]
self.assertQuerySetEqual(auditlogs, queryset.filter(log_project_slug="project"))
# Show logs filtered by IP.
ip = "10.10.10.2"
resp = self.client.get(reverse("profiles_security_log") + f"?ip={ip}")
self.assertEqual(resp.status_code, 200)
auditlogs = resp.context_data["object_list"]
self.assertQuerySetEqual(auditlogs, queryset.filter(ip=ip))
# Show logs filtered by action.
resp = self.client.get(
reverse("profiles_security_log") + "?action=authentication"
)
self.assertEqual(resp.status_code, 200)
auditlogs = resp.context_data["object_list"]
self.assertQuerySetEqual(auditlogs, queryset.filter(action=AuditLog.AUTHN))
# Show logs filtered by action.
resp = self.client.get(
reverse("profiles_security_log") + "?action=authentication-failure"
)
self.assertEqual(resp.status_code, 200)
auditlogs = resp.context_data["object_list"]
self.assertQuerySetEqual(
auditlogs, queryset.filter(action=AuditLog.AUTHN_FAILURE)
)
# Show logs filtered by invalid values.
for filter in ["ip", "project"]:
resp = self.client.get(
reverse("profiles_security_log") + f"?{filter}=invalid"
)
self.assertEqual(resp.status_code, 200)
auditlogs = resp.context_data["object_list"]
self.assertEqual(auditlogs.count(), 0, filter)
# If action isn't a valid value, the filter is just ignored.
resp = self.client.get(reverse("profiles_security_log") + "?action=invalid")
self.assertEqual(resp.status_code, 200)
auditlogs = resp.context_data["object_list"]
self.assertQuerySetEqual(auditlogs, queryset)
@override_settings(
RTD_ALLOW_ORGANIZATIONS=True,
)
| ProfileViewsTest |
python | numpy__numpy | numpy/linalg/tests/test_linalg.py | {
"start": 77886,
"end": 79058
} | class ____:
@pytest.mark.parametrize("arr, ind", [
(np.ones((4, 6, 8, 2)), 2),
(np.ones((3, 3, 2)), 1),
])
def test_non_square_handling(self, arr, ind):
with assert_raises(LinAlgError):
linalg.tensorinv(arr, ind=ind)
@pytest.mark.parametrize("shape, ind", [
# examples from docstring
((4, 6, 8, 3), 2),
((24, 8, 3), 1),
])
def test_tensorinv_shape(self, shape, ind):
a = np.eye(24).reshape(shape)
ainv = linalg.tensorinv(a=a, ind=ind)
expected = a.shape[ind:] + a.shape[:ind]
actual = ainv.shape
assert_equal(actual, expected)
@pytest.mark.parametrize("ind", [
0, -2,
])
def test_tensorinv_ind_limit(self, ind):
a = np.eye(24).reshape((4, 6, 8, 3))
with assert_raises(ValueError):
linalg.tensorinv(a=a, ind=ind)
def test_tensorinv_result(self):
# mimic a docstring example
a = np.eye(24).reshape((24, 8, 3))
ainv = linalg.tensorinv(a, ind=1)
b = np.ones(24)
assert_allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
| TestTensorinv |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 426347,
"end": 432440
} | class ____(sgqlc.types.Interface):
"""A subset of repository info."""
__schema__ = github_schema
__field_names__ = (
"archived_at",
"created_at",
"description",
"description_html",
"fork_count",
"has_discussions_enabled",
"has_issues_enabled",
"has_projects_enabled",
"has_wiki_enabled",
"homepage_url",
"is_archived",
"is_fork",
"is_in_organization",
"is_locked",
"is_mirror",
"is_private",
"is_template",
"license_info",
"lock_reason",
"mirror_url",
"name",
"name_with_owner",
"open_graph_image_url",
"owner",
"pushed_at",
"resource_path",
"short_description_html",
"updated_at",
"url",
"uses_custom_open_graph_image",
"visibility",
)
archived_at = sgqlc.types.Field(DateTime, graphql_name="archivedAt")
"""Identifies the date and time when the repository was archived."""
created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt")
"""Identifies the date and time when the object was created."""
description = sgqlc.types.Field(String, graphql_name="description")
"""The description of the repository."""
description_html = sgqlc.types.Field(sgqlc.types.non_null(HTML), graphql_name="descriptionHTML")
"""The description of the repository rendered to HTML."""
fork_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="forkCount")
"""Returns how many forks there are of this repository in the whole
network.
"""
has_discussions_enabled = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="hasDiscussionsEnabled")
"""Indicates if the repository has the Discussions feature enabled."""
has_issues_enabled = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="hasIssuesEnabled")
"""Indicates if the repository has issues feature enabled."""
has_projects_enabled = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="hasProjectsEnabled")
"""Indicates if the repository has the Projects feature enabled."""
has_wiki_enabled = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="hasWikiEnabled")
"""Indicates if the repository has wiki feature enabled."""
homepage_url = sgqlc.types.Field(URI, graphql_name="homepageUrl")
"""The repository's URL."""
is_archived = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isArchived")
"""Indicates if the repository is unmaintained."""
is_fork = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isFork")
"""Identifies if the repository is a fork."""
is_in_organization = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isInOrganization")
"""Indicates if a repository is either owned by an organization, or
is a private fork of an organization repository.
"""
is_locked = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isLocked")
"""Indicates if the repository has been locked or not."""
is_mirror = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isMirror")
"""Identifies if the repository is a mirror."""
is_private = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isPrivate")
"""Identifies if the repository is private or internal."""
is_template = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isTemplate")
"""Identifies if the repository is a template that can be used to
generate new repositories.
"""
license_info = sgqlc.types.Field("License", graphql_name="licenseInfo")
"""The license associated with the repository"""
lock_reason = sgqlc.types.Field(RepositoryLockReason, graphql_name="lockReason")
"""The reason the repository has been locked."""
mirror_url = sgqlc.types.Field(URI, graphql_name="mirrorUrl")
"""The repository's original mirror URL."""
name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name")
"""The name of the repository."""
name_with_owner = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="nameWithOwner")
"""The repository's name with owner."""
open_graph_image_url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="openGraphImageUrl")
"""The image used to represent this repository in Open Graph data."""
owner = sgqlc.types.Field(sgqlc.types.non_null("RepositoryOwner"), graphql_name="owner")
"""The User owner of the repository."""
pushed_at = sgqlc.types.Field(DateTime, graphql_name="pushedAt")
"""Identifies the date and time when the repository was last pushed
to.
"""
resource_path = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="resourcePath")
"""The HTTP path for this repository"""
short_description_html = sgqlc.types.Field(
sgqlc.types.non_null(HTML),
graphql_name="shortDescriptionHTML",
args=sgqlc.types.ArgDict((("limit", sgqlc.types.Arg(Int, graphql_name="limit", default=200)),)),
)
"""A description of the repository, rendered to HTML without any
links in it.
Arguments:
* `limit` (`Int`): How many characters to return. (default: `200`)
"""
updated_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="updatedAt")
"""Identifies the date and time when the object was last updated."""
url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="url")
"""The HTTP URL for this repository"""
uses_custom_open_graph_image = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="usesCustomOpenGraphImage")
"""Whether this repository has a custom image to use with Open Graph
as opposed to being represented by the owner's avatar.
"""
visibility = sgqlc.types.Field(sgqlc.types.non_null(RepositoryVisibility), graphql_name="visibility")
"""Indicates the repository's visibility level."""
| RepositoryInfo |
python | jmcnamara__XlsxWriter | xlsxwriter/test/styles/test_styles09.py | {
"start": 380,
"end": 3914
} | class ____(unittest.TestCase):
"""
Test assembling a complete Styles file.
"""
def test_assemble_xml_file(self):
"""Test for simple font styles."""
self.maxDiff = None
fh = StringIO()
style = Styles()
style._set_filehandle(fh)
workbook = Workbook()
format1 = workbook.add_format(
{
"font_color": "#9C0006",
"bg_color": "#FFC7CE",
"font_condense": 1,
"font_extend": 1,
"has_fill": 1,
"has_font": 1,
}
)
# Get (and set) the DXF format index.
format1._get_dxf_index()
workbook._prepare_format_properties()
style._set_style_properties(
[
workbook.xf_formats,
workbook.palette,
workbook.font_count,
workbook.num_formats,
workbook.border_count,
workbook.fill_count,
workbook.custom_colors,
workbook.dxf_formats,
workbook.has_comments,
]
)
style._assemble_xml_file()
workbook.fileclosed = 1
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<styleSheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<fonts count="1">
<font>
<sz val="11"/>
<color theme="1"/>
<name val="Calibri"/>
<family val="2"/>
<scheme val="minor"/>
</font>
</fonts>
<fills count="2">
<fill>
<patternFill patternType="none"/>
</fill>
<fill>
<patternFill patternType="gray125"/>
</fill>
</fills>
<borders count="1">
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
</borders>
<cellStyleXfs count="1">
<xf numFmtId="0" fontId="0" fillId="0" borderId="0"/>
</cellStyleXfs>
<cellXfs count="1">
<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0"/>
</cellXfs>
<cellStyles count="1">
<cellStyle name="Normal" xfId="0" builtinId="0"/>
</cellStyles>
<dxfs count="1">
<dxf>
<font>
<condense val="0"/>
<extend val="0"/>
<color rgb="FF9C0006"/>
</font>
<fill>
<patternFill>
<bgColor rgb="FFFFC7CE"/>
</patternFill>
</fill>
</dxf>
</dxfs>
<tableStyles count="0" defaultTableStyle="TableStyleMedium9" defaultPivotStyle="PivotStyleLight16"/>
</styleSheet>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
| TestAssembleStyles |
python | tensorflow__tensorflow | tensorflow/tools/compatibility/testdata/test_file_v1_12.py | {
"start": 880,
"end": 2750
} | class ____(test_util.TensorFlowTestCase):
"""Test various APIs that have been changed in 2.0."""
@classmethod
def setUpClass(cls):
cls._tf_api_version = 1 if hasattr(tf, 'contrib') else 2
def setUp(self):
tf.compat.v1.enable_v2_behavior()
def testRenames(self):
self.assertAllClose(1.04719755, tf.acos(0.5))
self.assertAllClose(0.5, tf.rsqrt(4.0))
def testSerializeSparseTensor(self):
sp_input = tf.SparseTensor(
indices=tf.constant([[1]], dtype=tf.int64),
values=tf.constant([2], dtype=tf.int64),
dense_shape=[2])
with self.cached_session():
serialized_sp = tf.serialize_sparse(sp_input, 'serialize_name', tf.string)
self.assertEqual((3,), serialized_sp.shape)
self.assertTrue(serialized_sp[0].numpy()) # check non-empty
def testSerializeManySparse(self):
sp_input = tf.SparseTensor(
indices=tf.constant([[0, 1]], dtype=tf.int64),
values=tf.constant([2], dtype=tf.int64),
dense_shape=[1, 2])
with self.cached_session():
serialized_sp = tf.serialize_many_sparse(
sp_input, 'serialize_name', tf.string)
self.assertEqual((1, 3), serialized_sp.shape)
def testArgMaxMin(self):
self.assertAllClose(
[1],
tf.argmax([[1, 3, 2]], name='abc', dimension=1))
self.assertAllClose(
[0, 0, 0],
tf.argmax([[1, 3, 2]], dimension=0))
self.assertAllClose(
[0],
tf.argmin([[1, 3, 2]], name='abc', dimension=1))
def testSoftmaxCrossEntropyWithLogits(self):
out = tf.nn.softmax_cross_entropy_with_logits(
logits=[0.1, 0.8], labels=[0, 1])
self.assertAllClose(out, 0.40318608)
out = tf.nn.softmax_cross_entropy_with_logits_v2(
logits=[0.1, 0.8], labels=[0, 1])
self.assertAllClose(out, 0.40318608)
if __name__ == "__main__":
test_lib.main()
| TestUpgrade |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_kubernetes_engine.py | {
"start": 48314,
"end": 48567
} | class ____:
def test_template_fields(self):
assert set(GKEStartKueueJobOperator.template_fields) == set(GKEOperatorMixin.template_fields) | set(
KubernetesStartKueueJobOperator.template_fields
)
| TestGKEStartKueueJobOperator |
python | facebook__pyre-check | client/commands/subscription.py | {
"start": 2048,
"end": 2193
} | class ____:
pass
Body = Union[TypeErrors, StatusUpdate, Error, IncrementalTelemetry]
@dataclasses.dataclass(frozen=True)
| IncrementalTelemetry |
python | google__jax | jax/_src/state/discharge.py | {
"start": 4461,
"end": 5542
} | class ____(Protocol):
def __call__(
self,
in_avals: Sequence[core.AbstractValue],
out_avals: Sequence[core.AbstractValue],
*args: Any,
**params: Any,
) -> tuple[Sequence[Any | None], Any | Sequence[Any]]:
"""Discharge rule for a primitive.
See :func:`discharge_state` for an explanation of what discharge means.
Args:
in_avals: Input abstract values.
out_avals: Output abstract values.
*args: Input values.
**params: Primitive parameters.
Returns:
A tuple of ``(new_invals, new_outvals)`` where:
* ``new_invals`` contains updated values for discharged ``Ref`` inputs,
or ``None`` if the input is not a ``Ref`` or was not updated.
* ``new_outvals`` is the primitive's output. A sequence if the primitive
has multiple results, otherwise a single value.
"""
_discharge_rules: dict[core.Primitive, DischargeRule] = {}
def register_discharge_rule(prim: core.Primitive):
def register(f: DischargeRule):
_discharge_rules[prim] = f
return register
| DischargeRule |
python | django__django | django/db/models/sql/subqueries.py | {
"start": 1394,
"end": 5145
} | class ____(Query):
"""An UPDATE SQL query."""
compiler = "SQLUpdateCompiler"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._setup_query()
def _setup_query(self):
"""
Run on initialization and at the end of chaining. Any attributes that
would normally be set in __init__() should go here instead.
"""
self.values = []
self.related_ids = None
self.related_updates = {}
def clone(self):
obj = super().clone()
obj.related_updates = self.related_updates.copy()
return obj
def update_batch(self, pk_list, values, using):
self.add_update_values(values)
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
self.clear_where()
self.add_filter(
"pk__in", pk_list[offset : offset + GET_ITERATOR_CHUNK_SIZE]
)
self.get_compiler(using).execute_sql(NO_RESULTS)
def add_update_values(self, values):
"""
Convert a dictionary of field name to value mappings into an update
query. This is the entry point for the public update() method on
querysets.
"""
values_seq = []
for name, val in values.items():
field = self.get_meta().get_field(name)
model = field.model._meta.concrete_model
if field.name == "pk" and model._meta.is_composite_pk:
raise FieldError(
"Composite primary key fields must be updated individually."
)
if not field.concrete:
raise FieldError(
"Cannot update model field %r (only concrete fields are permitted)."
% field
)
if model is not self.get_meta().concrete_model:
self.add_related_update(model, field, val)
continue
values_seq.append((field, model, val))
return self.add_update_fields(values_seq)
def add_update_fields(self, values_seq):
"""
Append a sequence of (field, model, value) triples to the internal list
that will be used to generate the UPDATE query. Might be more usefully
called add_update_targets() to hint at the extra information here.
"""
for field, model, val in values_seq:
# Omit generated fields.
if field.generated:
continue
if hasattr(val, "resolve_expression"):
# Resolve expressions here so that annotations are no longer
# needed
val = val.resolve_expression(self, allow_joins=False, for_save=True)
self.values.append((field, model, val))
def add_related_update(self, model, field, value):
"""
Add (name, value) to an update query for an ancestor model.
Update are coalesced so that only one update query per ancestor is run.
"""
self.related_updates.setdefault(model, []).append((field, None, value))
def get_related_updates(self):
"""
Return a list of query objects: one for each update required to an
ancestor model. Each query will have the same filtering conditions as
the current query but will only update a single table.
"""
if not self.related_updates:
return []
result = []
for model, values in self.related_updates.items():
query = UpdateQuery(model)
query.values = values
if self.related_ids is not None:
query.add_filter("pk__in", self.related_ids[model])
result.append(query)
return result
| UpdateQuery |
python | redis__redis-py | redis/commands/search/hybrid_query.py | {
"start": 6555,
"end": 8335
} | class ____:
def __init__(self, method: CombinationMethods, **kwargs) -> None:
"""
Create a new combine results method object.
Args:
method: The combine method to use - RRF or LINEAR.
kwargs: Additional combine parameters.
For RRF, the following parameters are supported(at least one should be provided):
WINDOW: Limits fusion scopeLimits fusion scope.
CONSTANT: Controls decay of rank influence.
YIELD_SCORE_AS: The name of the field to yield the calculated score as.
For LINEAR, supported parameters (at least one should be provided):
ALPHA: The weight of the first query.
BETA: The weight of the second query.
YIELD_SCORE_AS: The name of the field to yield the calculated score as.
The additional parameters are not validated and are passed as is to the server.
The supported format is to provide the parameter names and values like the following:
CombineResultsMethod(CombinationMethods.RRF, WINDOW=3, CONSTANT=0.5)
CombineResultsMethod(CombinationMethods.LINEAR, ALPHA=0.5, BETA=0.5)
"""
self._method = method
self._kwargs = kwargs
def get_args(self) -> List[Union[str, int]]:
args: List[Union[str, int]] = ["COMBINE", self._method.value]
if self._kwargs:
args.append(len(self._kwargs.items()) * 2)
for key, value in self._kwargs.items():
args.extend((key, value))
return args
@experimental
| CombineResultsMethod |
python | networkx__networkx | networkx/algorithms/traversal/tests/test_bfs.py | {
"start": 70,
"end": 4078
} | class ____:
@classmethod
def setup_class(cls):
# simple graph
G = nx.Graph()
G.add_edges_from([(0, 1), (1, 2), (1, 3), (2, 4), (3, 4)])
cls.G = G
def test_successor(self):
assert dict(nx.bfs_successors(self.G, source=0)) == {0: [1], 1: [2, 3], 2: [4]}
def test_predecessor(self):
assert dict(nx.bfs_predecessors(self.G, source=0)) == {1: 0, 2: 1, 3: 1, 4: 2}
def test_bfs_tree(self):
T = nx.bfs_tree(self.G, source=0)
assert sorted(T.nodes()) == sorted(self.G.nodes())
assert sorted(T.edges()) == [(0, 1), (1, 2), (1, 3), (2, 4)]
def test_bfs_edges(self):
edges = nx.bfs_edges(self.G, source=0)
assert list(edges) == [(0, 1), (1, 2), (1, 3), (2, 4)]
def test_bfs_edges_reverse(self):
D = nx.DiGraph()
D.add_edges_from([(0, 1), (1, 2), (1, 3), (2, 4), (3, 4)])
edges = nx.bfs_edges(D, source=4, reverse=True)
assert list(edges) == [(4, 2), (4, 3), (2, 1), (1, 0)]
def test_bfs_edges_sorting(self):
D = nx.DiGraph()
D.add_edges_from([(0, 1), (0, 2), (1, 4), (1, 3), (2, 5)])
sort_desc = partial(sorted, reverse=True)
edges_asc = nx.bfs_edges(D, source=0, sort_neighbors=sorted)
edges_desc = nx.bfs_edges(D, source=0, sort_neighbors=sort_desc)
assert list(edges_asc) == [(0, 1), (0, 2), (1, 3), (1, 4), (2, 5)]
assert list(edges_desc) == [(0, 2), (0, 1), (2, 5), (1, 4), (1, 3)]
def test_bfs_tree_isolates(self):
G = nx.Graph()
G.add_node(1)
G.add_node(2)
T = nx.bfs_tree(G, source=1)
assert sorted(T.nodes()) == [1]
assert sorted(T.edges()) == []
def test_bfs_layers(self):
expected = {
0: [0],
1: [1],
2: [2, 3],
3: [4],
}
for sources in [0, [0], (i for i in [0]), [0, 0]]:
assert dict(enumerate(nx.bfs_layers(self.G, sources))) == expected
def test_bfs_layers_missing_source(self):
with pytest.raises(nx.NetworkXError):
next(nx.bfs_layers(self.G, sources="abc"))
with pytest.raises(nx.NetworkXError):
next(nx.bfs_layers(self.G, sources=["abc"]))
def test_descendants_at_distance(self):
for distance, descendants in enumerate([{0}, {1}, {2, 3}, {4}]):
assert nx.descendants_at_distance(self.G, 0, distance) == descendants
def test_descendants_at_distance_missing_source(self):
with pytest.raises(nx.NetworkXError):
nx.descendants_at_distance(self.G, "abc", 0)
def test_bfs_labeled_edges_directed(self):
D = nx.cycle_graph(5, create_using=nx.DiGraph)
expected = [
(0, 1, "tree"),
(1, 2, "tree"),
(2, 3, "tree"),
(3, 4, "tree"),
(4, 0, "reverse"),
]
answer = list(nx.bfs_labeled_edges(D, 0))
assert expected == answer
D.add_edge(4, 4)
expected.append((4, 4, "level"))
answer = list(nx.bfs_labeled_edges(D, 0))
assert expected == answer
D.add_edge(0, 2)
D.add_edge(1, 5)
D.add_edge(2, 5)
D.remove_edge(4, 4)
expected = [
(0, 1, "tree"),
(0, 2, "tree"),
(1, 2, "level"),
(1, 5, "tree"),
(2, 3, "tree"),
(2, 5, "forward"),
(3, 4, "tree"),
(4, 0, "reverse"),
]
answer = list(nx.bfs_labeled_edges(D, 0))
assert expected == answer
G = D.to_undirected()
G.add_edge(4, 4)
expected = [
(0, 1, "tree"),
(0, 2, "tree"),
(0, 4, "tree"),
(1, 2, "level"),
(1, 5, "tree"),
(2, 3, "tree"),
(2, 5, "forward"),
(4, 3, "forward"),
(4, 4, "level"),
]
answer = list(nx.bfs_labeled_edges(G, 0))
assert expected == answer
| TestBFS |
python | numba__numba | numba/core/funcdesc.py | {
"start": 7545,
"end": 8273
} | class ____(FunctionDescriptor):
"""
A FunctionDescriptor subclass for opaque external functions
(e.g. raw C functions).
"""
__slots__ = ()
def __init__(self, name, restype, argtypes):
args = ["arg%d" % i for i in range(len(argtypes))]
def mangler(a, x, abi_tags, uid=None):
return a
super(ExternalFunctionDescriptor, self
).__init__(native=True, modname=None, qualname=name,
unique_name=name, doc='', typemap=None,
restype=restype, calltypes=None, args=args,
kws=None,
mangler=mangler,
argtypes=argtypes)
| ExternalFunctionDescriptor |
python | pypa__virtualenv | src/virtualenv/seed/embed/via_app_data/pip_install/copy.py | {
"start": 142,
"end": 1226
} | class ____(PipInstall):
def _sync(self, src, dst):
copy(src, dst)
def _generate_new_files(self):
# create the pyc files
new_files = super()._generate_new_files()
new_files.update(self._cache_files())
return new_files
def _cache_files(self):
version = self._creator.interpreter.version_info
py_c_ext = f".{self._creator.interpreter.implementation.lower()}-{version.major}{version.minor}.pyc"
for root, dirs, files in os.walk(str(self._image_dir), topdown=True):
root_path = Path(root)
for name in files:
if name.endswith(".py"):
yield root_path / f"{name[:-3]}{py_c_ext}"
for name in dirs:
yield root_path / name / "__pycache__"
def _fix_records(self, new_files):
extra_record_data_str = self._records_text(new_files)
with (self._dist_info / "RECORD").open("ab") as file_handler:
file_handler.write(extra_record_data_str.encode("utf-8"))
__all__ = [
"CopyPipInstall",
]
| CopyPipInstall |
python | allegroai__clearml | clearml/backend_api/services/v2_13/events.py | {
"start": 55626,
"end": 58338
} | class ____(Request):
"""
Get 'plot' events for the given tasks
:param tasks: List of task IDs
:type tasks: Sequence[str]
:param iters: Max number of latest iterations for which to return debug images
:type iters: int
:param scroll_id: Scroll ID of previous call (used for getting more results)
:type scroll_id: str
"""
_service = "events"
_action = "get_multi_task_plots"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"iters": {
"description": "Max number of latest iterations for which to return debug images",
"type": "integer",
},
"scroll_id": {
"description": "Scroll ID of previous call (used for getting more results)",
"type": "string",
},
"tasks": {
"description": "List of task IDs",
"items": {"description": "Task ID", "type": "string"},
"type": "array",
},
},
"required": ["tasks"],
"type": "object",
}
def __init__(
self, tasks: List[str], iters: Optional[int] = None, scroll_id: Optional[str] = None, **kwargs: Any
) -> None:
super(GetMultiTaskPlotsRequest, self).__init__(**kwargs)
self.tasks = tasks
self.iters = iters
self.scroll_id = scroll_id
@schema_property("tasks")
def tasks(self) -> List[str]:
return self._property_tasks
@tasks.setter
def tasks(self, value: List[str]) -> None:
if value is None:
self._property_tasks = None
return
self.assert_isinstance(value, "tasks", (list, tuple))
self.assert_isinstance(value, "tasks", six.string_types, is_array=True)
self._property_tasks = value
@schema_property("iters")
def iters(self) -> Optional[int]:
return self._property_iters
@iters.setter
def iters(self, value: Optional[int]) -> None:
if value is None:
self._property_iters = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "iters", six.integer_types)
self._property_iters = value
@schema_property("scroll_id")
def scroll_id(self) -> Optional[str]:
return self._property_scroll_id
@scroll_id.setter
def scroll_id(self, value: Optional[str]) -> None:
if value is None:
self._property_scroll_id = None
return
self.assert_isinstance(value, "scroll_id", six.string_types)
self._property_scroll_id = value
| GetMultiTaskPlotsRequest |
python | readthedocs__readthedocs.org | readthedocs/doc_builder/backends/sphinx.py | {
"start": 10833,
"end": 16173
} | class ____(BaseSphinx):
"""Builder to generate PDF documentation."""
relative_output_dir = "pdf"
sphinx_builder = "latex"
pdf_file_name = None
def build(self):
language = self.get_language(self.project)
self.run(
*self.get_sphinx_cmd(),
"-T",
"-b",
self.sphinx_builder,
"-d",
self.sphinx_doctrees_dir,
"-D",
f"language={language}",
# Sphinx's source directory (SOURCEDIR).
# We are executing this command at the location of the `conf.py` file (CWD).
# TODO: ideally we should execute it from where the repository was clonned,
# but that could lead unexpected behavior to some users:
# https://github.com/readthedocs/readthedocs.org/pull/9888#issuecomment-1384649346
".",
# Sphinx's output build directory (OUTPUTDIR)
self.absolute_container_output_dir,
cwd=os.path.dirname(self.config_file),
bin_path=self.python_env.venv_bin(),
)
tex_files = glob(os.path.join(self.absolute_host_output_dir, "*.tex"))
if not tex_files:
raise BuildUserError(message_id=BuildUserError.TEX_FILE_NOT_FOUND)
# Run LaTeX -> PDF conversions
success = self._build_latexmk(self.project_path)
self._post_build()
return success
def _build_latexmk(self, cwd):
# These steps are copied from the Makefile generated by Sphinx >= 1.6
# https://github.com/sphinx-doc/sphinx/blob/master/sphinx/texinputs/Makefile_t
images = []
for extension in ("png", "gif", "jpg", "jpeg"):
images.extend(Path(self.absolute_host_output_dir).glob(f"*.{extension}"))
# FIXME: instead of checking by language here, what we want to check if
# ``latex_engine`` is ``platex``
pdfs = []
if self.project.language == "ja":
# Japanese language is the only one that requires this extra
# step. I don't know exactly why but most of the documentation that
# I read differentiate this language from the others. I suppose
# it's because it mix kanji (Chinese) with its own symbols.
pdfs = Path(self.absolute_host_output_dir).glob("*.pdf")
for image in itertools.chain(images, pdfs):
self.run(
"extractbb",
image.name,
cwd=self.absolute_host_output_dir,
record=False,
)
rcfile = "latexmkrc"
if self.project.language == "ja":
rcfile = "latexmkjarc"
self.run(
"cat",
rcfile,
cwd=self.absolute_host_output_dir,
)
if self.build_env.command_class == DockerBuildCommand:
latex_class = DockerLatexBuildCommand
else:
latex_class = LatexBuildCommand
cmd = [
"latexmk",
"-r",
rcfile,
# FIXME: check for platex here as well
"-pdfdvi" if self.project.language == "ja" else "-pdf",
# When ``-f`` is used, latexmk will continue building if it
# encounters errors. We still receive a failure exit code in this
# case, but the correct steps should run.
"-f",
"-dvi-",
"-ps-",
f"-jobname={self.project.slug}",
"-interaction=nonstopmode",
]
cmd_ret = self.build_env.run_command_class(
cls=latex_class,
cmd=cmd,
warn_only=True,
cwd=self.absolute_host_output_dir,
)
self.pdf_file_name = f"{self.project.slug}.pdf"
return cmd_ret.successful
def _post_build(self):
"""Internal post build to cleanup PDF output directory and leave only one .pdf file."""
if not self.pdf_file_name:
raise BuildUserError(BuildUserError.PDF_NOT_FOUND)
# TODO: merge this with ePUB since it's pretty much the same
temp_pdf_file = f"/tmp/{self.project.slug}-{self.version.slug}.pdf"
target_file = os.path.join(
self.absolute_container_output_dir,
self.pdf_file_name,
)
# NOTE: we currently support only one .pdf per version
pdf_sphinx_filepath = os.path.join(self.absolute_container_output_dir, self.pdf_file_name)
pdf_sphinx_filepath_host = os.path.join(
self.absolute_host_output_dir,
self.pdf_file_name,
)
if os.path.exists(pdf_sphinx_filepath_host):
self.run(
"mv",
pdf_sphinx_filepath,
temp_pdf_file,
cwd=self.project_path,
record=False,
)
self.run(
"rm",
"-r",
self.absolute_container_output_dir,
cwd=self.project_path,
record=False,
)
self.run(
"mkdir",
"-p",
self.absolute_container_output_dir,
cwd=self.project_path,
record=False,
)
self.run("mv", temp_pdf_file, target_file, cwd=self.project_path, record=False)
| PdfBuilder |
python | boto__boto3 | tests/unit/docs/test_service.py | {
"start": 701,
"end": 11363
} | class ____(BaseDocsTest):
def test_document_service(self):
service_documenter = ServiceDocumenter(
'myservice', self.session, self.root_services_path
)
contents = service_documenter.document_service().decode('utf-8')
lines = [
'*********',
'MyService',
'*********',
'======',
'Client',
'======',
'.. py:class:: MyService.Client',
'These are the available methods:',
' myservice/client/sample_operation',
'==========',
'Paginators',
'==========',
'The available paginators are:',
' myservice/paginator/SampleOperation',
'=======',
'Waiters',
'=======',
'The available waiters are:',
' myservice/waiter/SampleOperationComplete',
'=========',
'Resources',
'=========',
'Resources are available in boto3 via the ',
'``resource`` method. For more detailed instructions ',
'and examples on the usage of resources, see the ',
'resources ',
'The available resources are:',
' myservice/service-resource/index',
' myservice/sample/index',
]
self.assert_contains_lines_in_order(lines, contents)
self.assert_contains_lines_in_order(
[
'================',
'Service Resource',
'================',
'.. py:class:: MyService.ServiceResource()',
' A resource representing AWS MyService::',
' import boto3',
" myservice = boto3.resource('myservice')",
'Actions',
"These are the resource's available actions:",
'.. toctree::',
' :maxdepth: 1',
' :titlesonly:',
' sample_operation',
'Sub-resources',
"These are the resource's available sub-resources:",
'.. toctree::',
' :maxdepth: 1',
' :titlesonly:',
' Sample',
'Collections',
"These are the resource's available collections:",
'.. toctree::',
' :maxdepth: 1',
' :titlesonly:',
' samples',
],
self.get_nested_service_contents(
'myservice', 'service-resource', 'index'
),
)
self.assert_contains_lines_in_order(
[
'======',
'Sample',
'======',
'.. py:class:: MyService.Sample(name)',
"These are the resource's available identifiers:",
'.. toctree::',
' :maxdepth: 1',
' :titlesonly:',
' name',
"These are the resource's available attributes:",
'.. toctree::',
' :maxdepth: 1',
' :titlesonly:',
' bar',
' foo',
"These are the resource's available actions:",
'.. toctree::',
' :maxdepth: 1',
' :titlesonly:',
' load',
' operate',
' reload',
"These are the resource's available waiters:",
'.. toctree::',
' :maxdepth: 1',
' :titlesonly:',
' wait_until_complete',
],
self.get_nested_service_contents('myservice', 'sample', 'index'),
)
self.assert_contains_lines_in_order(
[
'sample_operation',
'.. py:method:: MyService.Client.sample_operation(**kwargs)',
' **Examples**',
' Sample Description.',
' ::',
' response = client.sample_operation(',
],
self.get_nested_service_contents(
'myservice', 'client', 'sample_operation'
),
)
self.assert_contains_lines_in_order(
[
'SampleOperation',
'.. py:class:: MyService.Paginator.SampleOperation',
' .. py:method:: paginate(**kwargs)',
],
self.get_nested_service_contents(
'myservice', 'paginator', 'SampleOperation'
),
)
self.assert_contains_lines_in_order(
[
'SampleOperationComplete',
'.. py:class:: MyService.Waiter.SampleOperationComplete',
' .. py:method:: wait(**kwargs)',
],
self.get_nested_service_contents(
'myservice', 'waiter', 'SampleOperationComplete'
),
)
self.assert_contains_lines_in_order(
[
'sample_operation',
'.. py:method:: MyService.ServiceResource.sample_operation(**kwargs)',
],
self.get_nested_service_contents(
'myservice', 'service-resource', 'sample_operation'
),
)
self.assert_contains_lines_in_order(
[
'Sample',
'.. py:method:: MyService.ServiceResource.Sample(name)',
],
self.get_nested_service_contents(
'myservice', 'service-resource', 'Sample'
),
)
self.assert_contains_lines_in_order(
[
'samples',
'.. py:attribute:: MyService.ServiceResource.samples',
' .. py:method:: all()',
' .. py:method:: filter(**kwargs)',
' .. py:method:: limit(**kwargs)',
' .. py:method:: page_size(**kwargs)',
],
self.get_nested_service_contents(
'myservice', 'service-resource', 'samples'
),
)
self.assert_contains_lines_in_order(
[
'name',
'.. py:attribute:: MyService.Sample.name',
],
self.get_nested_service_contents('myservice', 'sample', 'name'),
)
self.assert_contains_lines_in_order(
[
'name',
'.. py:attribute:: MyService.Sample.name',
],
self.get_nested_service_contents('myservice', 'sample', 'name'),
)
self.assert_contains_lines_in_order(
[
'bar',
'.. py:attribute:: MyService.Sample.bar',
],
self.get_nested_service_contents('myservice', 'sample', 'bar'),
)
self.assert_contains_lines_in_order(
[
'load',
'.. py:method:: MyService.Sample.load()',
],
self.get_nested_service_contents('myservice', 'sample', 'load'),
)
self.assert_contains_lines_in_order(
[
'wait_until_complete',
'.. py:method:: MyService.Sample.wait_until_complete(**kwargs)',
],
self.get_nested_service_contents(
'myservice', 'sample', 'wait_until_complete'
),
)
def test_document_service_no_resource(self):
os.remove(self.resource_model_file)
service_documenter = ServiceDocumenter(
'myservice', self.session, self.root_services_path
)
contents = service_documenter.document_service().decode('utf-8')
assert 'Service Resource' not in contents
def test_document_service_no_paginators(self):
# Delete the resource model so that the resource is not documented
# as it may try to look at the paginator model during documentation.
os.remove(self.resource_model_file)
os.remove(self.paginator_model_file)
service_documenter = ServiceDocumenter(
'myservice', self.session, self.root_services_path
)
contents = service_documenter.document_service().decode('utf-8')
assert 'Paginators' not in contents
def test_document_service_no_waiter(self):
# Delete the resource model so that the resource is not documented
# as it may try to look at the waiter model during documentation.
os.remove(self.resource_model_file)
os.remove(self.waiter_model_file)
service_documenter = ServiceDocumenter(
'myservice', self.session, self.root_services_path
)
contents = service_documenter.document_service().decode('utf-8')
assert 'Waiters' not in contents
def test_creates_correct_path_to_examples_based_on_service_name(self):
path = os.sep.join(
[os.path.dirname(boto3.__file__), 'examples', 'myservice.rst']
)
path = os.path.realpath(path)
with mock.patch('os.path.isfile') as isfile:
isfile.return_value = False
s = ServiceDocumenter(
'myservice', self.session, self.root_services_path
)
s.document_service()
assert isfile.call_args_list[-1] == mock.call(path)
def test_injects_examples_when_found(self):
examples_path = os.sep.join(
[os.path.dirname(__file__), '..', 'data', 'examples']
)
service_documenter = ServiceDocumenter(
'myservice', self.session, self.root_services_path
)
service_documenter.EXAMPLE_PATH = examples_path
contents = service_documenter.document_service().decode('utf-8')
assert 'This is an example' in contents
assert 'This is for another service' not in contents
def test_service_with_context_params(self):
self.json_model['clientContextParams'] = {
'MyContextParam': {
'documentation': 'This is my context param',
'type': 'boolean',
}
}
self.setup_client_and_resource()
service_documenter = ServiceDocumenter(
'myservice', self.session, self.root_services_path
)
contents = service_documenter.document_service().decode('utf-8')
lines = [
"=========================",
"Client Context Parameters",
"=========================",
"* ``my_context_param`` (boolean) - This is my context param",
]
self.assert_contains_lines_in_order(lines, contents)
| TestServiceDocumenter |
python | matplotlib__matplotlib | galleries/examples/specialty_plots/skewt.py | {
"start": 3001,
"end": 3438
} | class ____(mspines.Spine):
def _adjust_location(self):
pts = self._path.vertices
if self.spine_type == 'top':
pts[:, 0] = self.axes.upper_xlim
else:
pts[:, 0] = self.axes.lower_xlim
# This class handles registration of the skew-xaxes as a projection as well
# as setting up the appropriate transformations. It also overrides standard
# spines and axes instances as appropriate.
| SkewSpine |
python | openai__openai-python | src/openai/types/responses/response_file_search_tool_call.py | {
"start": 260,
"end": 1062
} | class ____(BaseModel):
attributes: Optional[Dict[str, Union[str, float, bool]]] = None
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
structured format, and querying for objects via API or the dashboard. Keys are
strings with a maximum length of 64 characters. Values are strings with a
maximum length of 512 characters, booleans, or numbers.
"""
file_id: Optional[str] = None
"""The unique ID of the file."""
filename: Optional[str] = None
"""The name of the file."""
score: Optional[float] = None
"""The relevance score of the file - a value between 0 and 1."""
text: Optional[str] = None
"""The text that was retrieved from the file."""
| Result |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.