language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | readthedocs__readthedocs.org | readthedocs/builds/migrations/0059_add_version_date_index.py | {
"start": 150,
"end": 511
} | class ____(migrations.Migration):
safe = Safe.always()
dependencies = [
("builds", "0058_alter_version_created_alter_version_modified"),
]
operations = [
migrations.AddIndex(
model_name="build",
index=models.Index(fields=["version", "date"], name="builds_buil_version_259bbf_idx"),
),
]
| Migration |
python | jpadilla__pyjwt | jwt/exceptions.py | {
"start": 327,
"end": 480
} | class ____(DecodeError):
"""Raised when a token's signature doesn't match the one provided as part of
the token."""
pass
| InvalidSignatureError |
python | great-expectations__great_expectations | great_expectations/render/renderer/content_block/content_block.py | {
"start": 810,
"end": 17412
} | class ____(Renderer):
_rendered_component_type: Type[RenderedComponentContent] = TextContent
_default_header = ""
_default_content_block_styling: Dict[str, JSONValues] = {"classes": ["col-12"]}
_default_element_styling = {}
@classmethod
def validate_input(cls, render_object: Any) -> None:
pass
@classmethod
def render(cls, render_object: Any, **kwargs) -> Union[_rendered_component_type, Any, None]:
cls.validate_input(render_object)
exception_list_content_block: bool = kwargs.get("exception_list_content_block", False)
data_docs_exception_message = """\
An unexpected Exception occurred during data docs rendering. Because of this error, certain parts of data docs will \
not be rendered properly and/or may not appear altogether. Please use the trace, included in this message, to \
diagnose and repair the underlying issue. Detailed information follows:
""" # noqa: E501 # FIXME CoP
runtime_configuration = {
"styling": cls._get_element_styling(),
"include_column_name": kwargs.pop("include_column_name", None),
}
# The specific way we render the render_object is contingent on the type of the object
render_fn: Callable
if isinstance(render_object, list):
render_fn = cls._render_list
else:
render_fn = cls._render_other
result = render_fn(
render_object,
exception_list_content_block,
runtime_configuration,
data_docs_exception_message,
kwargs,
)
return result
@classmethod
def _get_content_block_fn_from_render_object(
cls, obj_: ExpectationConfiguration | ExpectationValidationResult
):
expectation_type = cls._get_expectation_type(obj_)
expectation_config = (
obj_.expectation_config if isinstance(obj_, ExpectationValidationResult) else obj_
)
return cls._get_content_block_fn(
expectation_type=expectation_type, expectation_config=expectation_config
)
@classmethod
def _render_list( # noqa: C901, PLR0912 # FIXME CoP
cls,
render_object: list,
exception_list_content_block: bool,
runtime_configuration: dict,
data_docs_exception_message: str,
kwargs: dict,
) -> Optional[_rendered_component_type]:
"""Helper method to render list render_objects - refer to `render` for more context"""
blocks = []
has_failed_evr = (
False if isinstance(render_object[0], ExpectationValidationResult) else None
)
for obj_ in render_object:
content_block_fn = cls._get_content_block_fn_from_render_object(obj_)
if isinstance(obj_, ExpectationValidationResult) and not obj_.success:
has_failed_evr = True
if content_block_fn is not None and not exception_list_content_block:
try:
if isinstance(obj_, ExpectationValidationResult):
expectation_config = obj_.expectation_config
result = content_block_fn(
configuration=expectation_config,
result=obj_,
runtime_configuration=runtime_configuration,
**kwargs,
)
else:
result = content_block_fn(
configuration=obj_,
runtime_configuration=runtime_configuration,
**kwargs,
)
except Exception as e:
exception_traceback = traceback.format_exc()
exception_message = (
data_docs_exception_message
+ f'{type(e).__name__}: "{e!s}". Traceback: "{exception_traceback}".'
)
logger.error(exception_message) # noqa: TRY400 # FIXME CoP
if isinstance(obj_, ExpectationValidationResult):
content_block_fn = cls._get_content_block_fn("_missing_content_block_fn")
expectation_config = obj_.expectation_config
result = content_block_fn(
configuration=expectation_config,
result=obj_,
runtime_configuration=runtime_configuration,
**kwargs,
)
else:
content_block_fn = cls._missing_content_block_fn
result = content_block_fn(
configuration=obj_,
runtime_configuration=runtime_configuration,
**kwargs,
)
else: # noqa: PLR5501 # FIXME CoP
if isinstance(obj_, ExpectationValidationResult):
content_block_fn = (
cls._missing_content_block_fn
if exception_list_content_block
else cls._get_content_block_fn("_missing_content_block_fn")
)
expectation_config = obj_.expectation_config
result = content_block_fn(
configuration=expectation_config,
result=obj_,
runtime_configuration=runtime_configuration,
**kwargs,
)
else:
content_block_fn = cls._missing_content_block_fn
result = content_block_fn(
configuration=obj_,
runtime_configuration=runtime_configuration,
**kwargs,
)
if result is not None:
if isinstance(obj_, ExpectationConfiguration):
expectation_notes = cls._render_expectation_notes(obj_)
if expectation_notes:
# this adds collapse content block to expectation string
result[0] = [result[0], expectation_notes]
horizontal_rule = RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": "",
"tag": "hr",
"styling": {
"classes": ["mt-1", "mb-1"],
},
},
"styling": {"parent": {"styles": {"list-style-type": "none"}}},
}
)
result.append(horizontal_rule)
blocks += result
if len(blocks) > 0:
rendered_component_type_init_kwargs = {
cls._content_block_type: blocks,
"styling": cls._get_content_block_styling(),
}
rendered_component_type_default_init_kwargs = getattr(
cls, "_rendered_component_default_init_kwargs", {}
)
rendered_component_type_init_kwargs.update(rendered_component_type_default_init_kwargs)
content_block = cls._rendered_component_type(**rendered_component_type_init_kwargs)
cls._process_content_block(
content_block,
has_failed_evr=has_failed_evr,
render_object=render_object,
)
return content_block
else:
return None
@classmethod
def _render_other( # noqa: C901 # FIXME CoP
cls,
render_object: Any,
exception_list_content_block: bool,
runtime_configuration: dict,
data_docs_exception_message: str,
kwargs: dict,
) -> Any:
"""Helper method to render non-list render_objects - refer to `render` for more context"""
content_block_fn = cls._get_content_block_fn_from_render_object(render_object)
if content_block_fn is not None and not exception_list_content_block:
try:
if isinstance(render_object, ExpectationValidationResult):
result = content_block_fn(
result=render_object,
runtime_configuration=runtime_configuration,
**kwargs,
)
else:
result = content_block_fn(
configuration=render_object,
runtime_configuration=runtime_configuration,
**kwargs,
)
except Exception as e:
exception_traceback = traceback.format_exc()
exception_message = (
data_docs_exception_message
+ f'{type(e).__name__}: "{e!s}". Traceback: "{exception_traceback}".'
)
logger.error(exception_message) # noqa: TRY400 # FIXME CoP
if isinstance(render_object, ExpectationValidationResult):
content_block_fn = cls._get_content_block_fn("_missing_content_block_fn")
result = content_block_fn(
result=render_object,
runtime_configuration=runtime_configuration,
**kwargs,
)
else:
content_block_fn = cls._missing_content_block_fn
result = content_block_fn(
configuration=render_object,
runtime_configuration=runtime_configuration,
**kwargs,
)
else: # noqa: PLR5501 # FIXME CoP
if isinstance(render_object, ExpectationValidationResult):
content_block_fn = (
cls._missing_content_block_fn
if exception_list_content_block
else cls._get_content_block_fn("_missing_content_block_fn")
)
result = content_block_fn(
result=render_object,
runtime_configuration=runtime_configuration,
**kwargs,
)
else:
content_block_fn = cls._missing_content_block_fn
result = content_block_fn(
configuration=render_object,
runtime_configuration=runtime_configuration,
**kwargs,
)
if result is not None:
if isinstance(render_object, ExpectationConfiguration):
expectation_notes = cls._render_expectation_notes(render_object)
if expectation_notes:
result.append(expectation_notes)
return result
@classmethod
def _render_expectation_description(
cls,
configuration: ExpectationConfiguration,
runtime_configuration: dict,
**kwargs,
) -> list[RenderedStringTemplateContent]:
expectation = configuration.to_domain_obj()
description = expectation.description
if not description:
raise ValueError("Cannot render an expectation with no description.") # noqa: TRY003 # FIXME CoP
return [
RenderedStringTemplateContent(
string_template={"template": description},
styling=runtime_configuration.get("styling", {}),
)
]
@classmethod
def _render_expectation_notes(
cls, expectation_config: ExpectationConfiguration
) -> CollapseContent:
notes = expectation_config.notes
if not notes:
return None
else:
collapse_link = RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": "$icon",
"params": {"icon": ""},
"styling": {
"params": {
"icon": {
"classes": ["fas", "fa-comment", "text-info"],
"tag": "i",
}
}
},
},
}
)
if isinstance(notes, str):
note_content = [
RenderedMarkdownContent(
**{
"content_block_type": "markdown",
"markdown": notes,
"styling": {"parent": {"styles": {"color": "red"}}},
}
)
]
elif isinstance(notes, list):
note_content = [
RenderedMarkdownContent(
**{
"content_block_type": "markdown",
"markdown": note,
"styling": {"parent": {}},
}
)
for note in notes
]
else:
note_content = None
notes_block = TextContent(
**{
"content_block_type": "text",
"subheader": "Notes:",
"text": note_content,
"styling": {
"classes": ["col-12", "mt-2", "mb-2"],
"parent": {"styles": {"list-style-type": "none"}},
},
}
)
return CollapseContent(
**{
"collapse_toggle_link": collapse_link,
"collapse": [notes_block],
"inline_link": True,
"styling": {
"body": {"classes": ["card", "card-body", "p-1"]},
"parent": {"styles": {"list-style-type": "none"}},
},
}
)
@classmethod
def _process_content_block(cls, content_block, has_failed_evr, render_object=None) -> None:
header = cls._get_header()
if header != "":
content_block.header = header
@classmethod
def _get_content_block_fn(
cls,
expectation_type: str,
expectation_config: ExpectationConfiguration | None = None,
) -> Callable | None:
# Prioritize `description` param on Expectation before falling back to renderer
if expectation_config:
content_block_fn = cls._get_content_block_fn_from_expectation_description(
expectation_config=expectation_config,
)
if content_block_fn:
return content_block_fn
content_block_fn = get_renderer_impl(
object_name=expectation_type, renderer_type=LegacyRendererType.PRESCRIPTIVE
)
return content_block_fn[1] if content_block_fn else None
@classmethod
def _get_content_block_fn_from_expectation_description(
cls, expectation_config: ExpectationConfiguration
) -> Callable | None:
expectation = expectation_config.to_domain_obj()
description = expectation.description
if description:
return cls._render_expectation_description
return None
@classmethod
def list_available_expectations(cls):
expectations = [
object_name
for object_name in _registered_renderers
if object_name.startswith("expect_")
]
return expectations
@classmethod
def _missing_content_block_fn(
cls,
configuration: Optional[ExpectationConfiguration] = None,
result: Optional[ExpectationValidationResult] = None,
runtime_configuration: Optional[dict] = None,
**kwargs,
):
return []
@classmethod
def _get_content_block_styling(cls):
return cls._default_content_block_styling
@classmethod
def _get_element_styling(cls):
return cls._default_element_styling
@classmethod
def _get_header(cls):
return cls._default_header
| ContentBlockRenderer |
python | sphinx-doc__sphinx | sphinx/transforms/__init__.py | {
"start": 14242,
"end": 14462
} | class ____(SphinxTransform):
"""Emit :event:`doctree-read` event."""
default_priority = 880
def apply(self, **kwargs: Any) -> None:
self.env.events.emit('doctree-read', self.document)
| DoctreeReadEvent |
python | PyCQA__pydocstyle | src/tests/test_cases/nested_class.py | {
"start": 192,
"end": 774
} | class ____:
expect('PublicNestedClass',
'D106: Missing docstring in public nested class')
class PublicNestedClass:
expect('PublicNestedClassInPublicNestedClass',
'D106: Missing docstring in public nested class')
class PublicNestedClassInPublicNestedClass:
pass
class _PrivateNestedClassInPublicNestedClass:
pass
class _PrivateNestedClass:
class PublicNestedClassInPrivateNestedClass:
pass
class _PrivateNestedClassInPrivateNestedClass:
pass
| PublicClass |
python | getsentry__sentry | tests/sentry/integrations/discord/test_integration.py | {
"start": 9748,
"end": 18496
} | class ____(DiscordSetupTestCase):
def setUp(self) -> None:
super().setUp()
self.user_id = "user1234"
self.guild_id = "12345"
self.guild_name = "guild_name"
@responses.activate
def test_get_guild_name(self) -> None:
provider = self.provider()
responses.add(
responses.GET,
url=f"{DiscordClient.base_url}{GUILD_URL.format(guild_id=self.guild_id)}",
match=[header_matcher({"Authorization": f"Bot {self.bot_token}"})],
json={
"id": self.guild_id,
"name": self.guild_name,
},
)
responses.add(
responses.POST,
url=self.token_url,
json={
"access_token": "access_token",
},
)
responses.add(
responses.GET, url=f"{DiscordClient.base_url}/users/@me", json={"id": "user_1234"}
)
responses.add(
responses.GET,
url=f"{DiscordClient.base_url}/users/@me/guilds/{self.guild_id}/member",
json={},
)
result = provider.build_integration({"guild_id": self.guild_id, "code": self.user_id})
assert result["name"] == self.guild_name
@responses.activate
def test_build_integration_no_code_in_state(self) -> None:
provider = self.provider()
responses.add(
responses.GET,
url=f"{DiscordClient.base_url}{GUILD_URL.format(guild_id=self.guild_id)}",
match=[header_matcher({"Authorization": f"Bot {self.bot_token}"})],
json={
"id": self.guild_id,
"name": self.guild_name,
},
)
with pytest.raises(IntegrationError):
provider.build_integration({"guild_id": "guild_id", "code": ""})
@responses.activate
def test_get_guild_name_failure(self) -> None:
provider = self.provider()
(responses.add(responses.GET, f"{DISCORD_BASE_URL}/guilds/guild_name", status=500),)
responses.add(
responses.POST,
url=self.token_url,
json={
"access_token": "access_token",
},
)
responses.add(
responses.GET, url=f"{DiscordClient.base_url}/users/@me", json={"id": self.user_id}
)
responses.add(
responses.GET,
url=f"{DiscordClient.base_url}/users/@me/guilds/{self.guild_id}/member",
json={},
)
result = provider.build_integration({"guild_id": self.guild_id, "code": self.user_id})
assert result["name"] == self.guild_id
@responses.activate
def test_get_user_insufficient_permission(self) -> None:
provider = self.provider()
responses.add(
responses.GET,
url=f"{DiscordClient.base_url}{GUILD_URL.format(guild_id=self.guild_id)}",
match=[header_matcher({"Authorization": f"Bot {self.bot_token}"})],
json={
"id": self.guild_id,
"name": self.guild_name,
},
)
responses.add(
responses.POST,
url=self.token_url,
json={
"access_token": "access_token",
},
)
responses.add(
responses.GET, url=f"{DiscordClient.base_url}/users/@me", json={"id": self.user_id}
)
responses.add(
responses.GET,
url=f"{DiscordClient.base_url}/users/@me/guilds/{self.guild_id}/member",
json={"code": 10004, "message": "Unknown guild"},
status=404,
)
with pytest.raises(IntegrationError):
provider.build_integration({"guild_id": self.guild_id, "code": self.user_id})
@responses.activate
def test_get_discord_user_id(self) -> None:
provider = self.provider()
responses.add(
responses.POST,
url=self.token_url,
json={
"access_token": "access_token",
},
)
responses.add(
responses.GET, url=f"{DiscordClient.base_url}/users/@me", json={"id": self.user_id}
)
result = provider._get_discord_user_id("auth_code", "1")
assert result == self.user_id
@responses.activate
def test_get_discord_user_id_oauth_failure(self) -> None:
provider = self.provider()
responses.add(responses.POST, url=self.token_url, status=500)
with pytest.raises(IntegrationError):
provider._get_discord_user_id("auth_code", "1")
@responses.activate
def test_get_discord_user_id_oauth_no_token(self) -> None:
provider = self.provider()
responses.add(
responses.POST,
url=self.token_url,
json={},
)
with pytest.raises(IntegrationError):
provider._get_discord_user_id("auth_code", "1")
@responses.activate
def test_get_discord_user_id_request_fail(self) -> None:
provider = self.provider()
responses.add(
responses.POST,
url=self.token_url,
json={
"access_token": "access_token",
},
)
responses.add(
responses.GET,
url=f"{DiscordClient.base_url}/users/@me",
status=401,
)
with pytest.raises(IntegrationError):
provider._get_discord_user_id("auth_code", "1")
@responses.activate
@mock.patch("sentry.integrations.discord.client.DiscordClient.set_application_command")
def test_post_install(self, mock_set_application_command: mock.MagicMock) -> None:
provider = self.provider()
responses.add(
responses.GET,
url=f"{DiscordClient.base_url}{APPLICATION_COMMANDS_URL.format(application_id=self.application_id)}",
match=[header_matcher({"Authorization": f"Bot {self.bot_token}"})],
json=[],
)
responses.add(
responses.POST,
url=f"{DiscordClient.base_url}{APPLICATION_COMMANDS_URL.format(application_id=self.application_id)}",
status=200,
)
provider.post_install(
integration=self.integration, organization=self.organization, extra={}
)
assert mock_set_application_command.call_count == 3 # one for each command
@mock.patch("sentry.integrations.discord.client.DiscordClient.set_application_command")
def test_post_install_missing_credentials(
self, mock_set_application_command: mock.MagicMock
) -> None:
provider = self.provider()
provider.application_id = None
provider.post_install(
integration=self.integration, organization=self.organization, extra={}
)
assert mock_set_application_command.call_count == 0
@responses.activate
def test_set_commands_failure(self) -> None:
provider = self.provider()
responses.add(
responses.GET,
url=f"{DiscordClient.base_url}{APPLICATION_COMMANDS_URL.format(application_id=self.application_id)}",
match=[header_matcher({"Authorization": f"Bot {self.bot_token}"})],
json=[],
)
responses.add(
responses.POST,
url=f"{DiscordClient.base_url}{APPLICATION_COMMANDS_URL.format(application_id=self.application_id)}",
body=ApiError("something wrong", 500),
status=500,
)
with pytest.raises(ApiError):
provider.post_install(
integration=self.integration, organization=self.organization, extra={}
)
@responses.activate
def test_get_commands_failure(self) -> None:
provider = self.provider()
responses.add(
responses.GET,
url=f"{DiscordClient.base_url}{APPLICATION_COMMANDS_URL.format(application_id=self.application_id)}",
body=ApiError("something wrong", 500),
status=500,
)
with pytest.raises(ApiError):
provider.post_install(
integration=self.integration, organization=self.organization, extra={}
)
def test_build_integration_invalid_guild_id(self) -> None:
provider = self.provider()
with pytest.raises(
IntegrationError,
match="Invalid guild ID. The Discord guild ID must be entirely numeric.",
):
provider.build_integration(
{
"guild_id": "123abc", # Invalid guild ID (contains non-numeric characters)
"code": "some_auth_code",
}
)
@control_silo_test
| DiscordIntegrationTest |
python | numba__numba | numba/tests/test_struct_ref.py | {
"start": 9967,
"end": 10426
} | class ____(types.StructRef):
def preprocess_fields(self, fields):
# temp name to allow Optional instantiation
self.name = f"numba.PolygonStructType#{id(self)}"
fields = tuple([
('value', types.Optional(types.int64)),
('parent', types.Optional(self)),
])
return fields
polygon_struct_type = PolygonStructType(fields=(
('value', types.Any),
('parent', types.Any)
))
| PolygonStructType |
python | pyqtgraph__pyqtgraph | pyqtgraph/Qt/internals.py | {
"start": 388,
"end": 1931
} | class ____(ctypes.Structure):
pass
if QtVersionInfo[0] == 5:
QArrayData._fields_ = [
("ref", ctypes.c_int),
("size", ctypes.c_int),
("alloc", ctypes.c_uint, 31),
("offset", ctypes.c_ssize_t),
]
QPainterPathPrivate._fields_ = [
("ref", ctypes.c_int),
("adata", ctypes.POINTER(QArrayData)),
]
elif QtVersionInfo[0] == 6:
QArrayData._fields_ = [
("ref", ctypes.c_int),
("flags", ctypes.c_uint),
("alloc", ctypes.c_ssize_t),
]
QPainterPathPrivate._fields_ = [
("ref", ctypes.c_int),
("adata", ctypes.POINTER(QArrayData)),
("data", ctypes.c_void_p),
("size", ctypes.c_ssize_t),
][int(QtVersionInfo >= (6, 10)):]
def get_qpainterpath_element_array(qpath, nelems=None):
resize = nelems is not None
if resize:
qpath.reserve(nelems)
ptr = ctypes.c_void_p.from_address(compat.unwrapinstance(qpath))
if not ptr:
return np.zeros(0, dtype=Element)
ppp = ctypes.cast(ptr, ctypes.POINTER(QPainterPathPrivate)).contents
if QtVersionInfo[0] == 5:
qad = ppp.adata.contents
eptr = ctypes.addressof(qad) + qad.offset
if resize:
qad.size = nelems
elif QtVersionInfo[0] == 6:
eptr = ppp.data
if resize:
ppp.size = nelems
else:
raise NotImplementedError
nelems = qpath.elementCount()
buf = (Element * nelems).from_address(eptr)
return np.frombuffer(buf, dtype=Element)
| QPainterPathPrivate |
python | joke2k__faker | faker/providers/currency/uk_UA/__init__.py | {
"start": 101,
"end": 6795
} | class ____(CurrencyProvider):
# see full list in Ukrainian @ Wiki
# https://uk.wikipedia.org/wiki/%D0%9A%D0%BB%D0%B0%D1%81%D0%B8%D1%84%D1%96%D0%BA%D0%B0%D1%86%D1%96%D1%8F_%D0%B2%D0%B0%D0%BB%D1%8E%D1%82_(ISO_4217)#%D0%9F%D0%B5%D1%80%D0%B5%D0%BB%D1%96%D0%BA_%D0%B4%D1%96%D1%8E%D1%87%D0%B8%D1%85_%D0%BA%D0%BE%D0%B4%D1%96%D0%B2
currencies: ElementsType[Tuple[str, str]] = (
("AED", "Дирхам ОАЕ"),
("AFN", "Афганістанський афгані"),
("ALL", "Албанський лек"),
("AMD", "Вірменський драм"),
("ANG", "Гульден Нідерландських Антилів"),
("AOA", "Ангольська кванза"),
("ARS", "Аргентинське песо"),
("AUD", "Австралійський долар"),
("AWG", "Арубський флорин"),
("AZN", "Азербайджанський манат"),
("BAM", "Конвертовна марка Боснії і Герцоговини"),
("BBD", "Барбадоський долар"),
("BDT", "Бангладешська така"),
("BGN", "Болгарський лев"),
("BHD", "Бахрейнський динар"),
("BIF", "Бурундійський франк"),
("BMD", "Бермудський долар"),
("BND", "Брунейський долар"),
("BOB", "Болівійський болівіано"),
("BRL", "Бразильський реал"),
("BSD", "Багамський долар"),
("BTN", "Бутанський нґултрум"),
("BWP", "Ботсванська пула"),
("BYR", "Білоруський рубль"),
("BZD", "Белізький долар"),
("CAD", "Канадський долар"),
("CDF", "Конголезький франк"),
("CHF", "Швейцарський франк"),
("CLP", "Чилійське песо"),
("CNY", "Китайський юань"),
("COP", "Колумбійське песо"),
("CRC", "Коста-риканський колон"),
("CUP", "Кубинське песо"),
("CVE", "Ескудо Кабо-Верде"),
("CZK", "Чеська крона"),
("DJF", "Джибутійський франк"),
("DKK", "Данська крона"),
("DOP", "Домініканське песо"),
("DZD", "Алжирський динар"),
("EGP", "Єгипетський фунт"),
("ERN", "Еритрейська накфа"),
("ETB", "Ефіопський бир"),
("EUR", "Євро"),
("FJD", "Фіджійський долар"),
("FKP", "Фолклендський фунт"),
("GBP", "Фунт стерлінгів"),
("GEL", "Грузинський ларі"),
("GHS", "Ганський седі"),
("GIP", "Ґібралтарський фунт"),
("GMD", "Гамбійський даласі"),
("GNF", "Гвінейський франк"),
("GTQ", "Ґватемальський кетсаль"),
("GYD", "Гаянський долар"),
("HKD", "Гонконгівський долар"),
("HNL", "Гондураська лемпіра"),
("HTG", "Ґурд Республіки Гаїті"),
("HUF", "Угорський форинт"),
("IDR", "Індонезійська рупія"),
("ILS", "Новий ізраїльський шекель"),
("NIS", "Новий ізраїльський шекель"),
("INR", "Індійська рупія"),
("IQD", "Іракський динар"),
("IRR", "Іранський ріал"),
("ISK", "Ісландська крона"),
("JMD", "Ямайський долар"),
("JOD", "Йорданський динар"),
("JPY", "Японська єна"),
("KES", "Кенійський шилінг"),
("KGS", "Киргизький сом"),
("KHR", "Камбоджійський рієль"),
("KMF", "Коморський франк"),
("KPW", "Північно-корейська вона"),
("KRW", "Південно-корейська вона"),
("KWD", "Кувейтський динар"),
("KYD", "Долар Кайманових островів"),
("KZT", "Казахстанський теньґе"),
("LAK", "Лаоський кіп"),
("LBP", "Ліванський фунт"),
("LKR", "Рупія Шрі-Ланки"),
("LRD", "Ліберійський долар"),
("LSL", "Лоті Королівства Лесото"),
("LTL", "Литовська лита"),
("LYD", "Лівійський динар"),
("MAD", "Марокканський дирхам"),
("MDL", "Молдовський лей"),
("MGA", "Малагасійський аріарі"),
("MKD", "Македонський денар"),
("MMK", "М'янмський к'ят"),
("MNT", "Монгольський тугрик"),
("MOP", "Маканська патака"),
("MRO", "Мавританська уґія"),
("MUR", "Маврикійська рупія"),
("MVR", "Мальдівська руфія"),
("MWK", "Малавійська квача"),
("MXN", "Мексиканське песо"),
("MYR", "Малайзійський рингіт"),
("MZN", "Мозамбіцький метикал"),
("NAD", "Намібійський долар"),
("NGN", "Ніґерійська найра"),
("NIO", "Золота кордоба"),
("NOK", "Норвезька крона"),
("NPR", "Непальська рупія"),
("NZD", "Новозеландський долар"),
("OMR", "Оманський ріал"),
("PAB", "Панамське бальбоа"),
("PEN", "Перуанський соль"),
("PGK", "Папуановогвинейська кіна"),
("PHP", "Філіппінський песо"),
("PKR", "Пакистанська рупія"),
("PLN", "Польский злотий"),
("PYG", "Парагвайський ґуарані"),
("QAR", "Катарський ріал"),
("RON", "Румунський лей"),
("RSD", "Сербський динар"),
("RUB", "Російський рубль"),
("RWF", "Руандійський франк"),
("SAR", "Саудівський ріал"),
("SBD", "Долар Соломонових Островів"),
("SCR", "Сейшельська рупія"),
("SDG", "Суданський фунт"),
("SEK", "Шведська крона"),
("SGD", "Сінгапурський долар"),
("SHP", "Фунт Святої Єлени"),
("SLL", "Леоне Сьєрра-Леоне"),
("SOS", "Сомалійський шилінг"),
("SRD", "Суринамський долар"),
("STD", "Добра Сан-Томе і Принсіпі"),
("SVC", "Сальвадорський колон"),
("SYP", "Сирійський фунт"),
("SZL", "Свазілендський ліланґені"),
("THB", "Таїландський бат"),
("TJS", "Таджицький сомоні"),
("TMT", "Туркменський манат"),
("TND", "Туніський динар"),
("TOP", "Тонґська паанга"),
("TRY", "Турецька ліра"),
("TTD", "Долар Тринідаду і Тобаго"),
("TWD", "Новий тайванський долар"),
("TZS", "Танзанійський шилінг"),
("UAH", "Українська гривня"),
("UGX", "Угандійський шилінг"),
("USD", "Долар США"),
("UYU", "Уругвайське песо"),
("UZS", "Узбецький сум"),
("VEF", "Венесуельский болівар"),
("VND", "В'єтнамський донг"),
("VUV", "Вануатська вану"),
("WST", "Самоанська тала"),
("XAF", "Центральноафриканський франк"),
("XCD", "Східнокарибський долар"),
("XDR", "Спеціальні права запозичення"),
("XOF", "Західноафриканський франк"),
("XPF", "Французький тихоокеанський франк"),
("YER", "Єменський ріал"),
("ZAR", "Південноафриканський ранд"),
("ZMW", "Замбійська квача"),
("ZWD", "Зімбабвійський долар"),
)
price_formats = ["#,##", "%#,##", "%##,##", "% ###,##", "%# ###,##"]
def pricetag(self) -> str:
return self.numerify(self.random_element(self.price_formats)) + "\N{NO-BREAK SPACE}грн."
| Provider |
python | apache__airflow | airflow-core/src/airflow/utils/log/non_caching_file_handler.py | {
"start": 1396,
"end": 2211
} | class ____(FileHandler):
"""
An extension of FileHandler, advises the Kernel to not cache the file in PageCache when it is written.
While there is nothing wrong with such cache (it will be cleaned when memory is needed), it
causes ever-growing memory usage when scheduler is running as it keeps on writing new log
files and the files are not rotated later on. This might lead to confusion for our users,
who are monitoring memory usage of Scheduler - without realising that it is harmless and
expected in this case.
See https://github.com/apache/airflow/issues/14924
Adding the advice to Kernel might help with not generating the cache memory growth in the first place.
"""
def _open(self):
return make_file_io_non_caching(super()._open())
| NonCachingFileHandler |
python | PrefectHQ__prefect | tests/server/models/test_concurrency_limits.py | {
"start": 81,
"end": 1571
} | class ____:
async def test_creating_concurrency_limits(self, session):
concurrency_limit = await models.concurrency_limits.create_concurrency_limit(
session=session,
concurrency_limit=schemas.core.ConcurrencyLimit(
tag="this bad boy", concurrency_limit=100
),
)
assert concurrency_limit.tag == "this bad boy"
assert concurrency_limit.concurrency_limit == 100
async def test_create_concurrency_limit_updates_on_conflict(self, session):
concurrency_limit = await models.concurrency_limits.create_concurrency_limit(
session=session,
concurrency_limit=schemas.core.ConcurrencyLimit(
tag="fits this many concurrent runs", concurrency_limit=100
),
)
assert concurrency_limit.tag == "fits this many concurrent runs"
assert concurrency_limit.concurrency_limit == 100
creation_time = concurrency_limit.updated
time.sleep(0.1)
updated_limit = await models.concurrency_limits.create_concurrency_limit(
session=session,
concurrency_limit=schemas.core.ConcurrencyLimit(
tag="fits this many concurrent runs", concurrency_limit=200
),
)
assert updated_limit.tag == "fits this many concurrent runs"
assert updated_limit.concurrency_limit == 200
assert updated_limit.updated > creation_time
| TestCreatingConcurrencyLimits |
python | pytorch__pytorch | torch/ao/quantization/observer.py | {
"start": 65865,
"end": 66204
} | class ____(Granularity):
"""
Represents row-wise granularity in quantization.
This is a special case of per-axis quantization and is unique to Float8 matmuls
where the input is quantized with a block_size of (1, ..., input.shape[-1]). And the weight
is quantized with a block_size of (1, weight.shape[1]).
"""
| PerRow |
python | realpython__materials | python-protocol/members.py | {
"start": 71,
"end": 541
} | class ____(Protocol):
class_attribute: ClassVar[int]
instance_attribute: str
def instance_method(self, arg: int) -> str: ...
@classmethod
def class_method(cls) -> str: ...
@staticmethod
def static_method(arg: int) -> str: ...
@property
def property_name(self) -> str: ...
@property_name.setter
def property_name(self, value: str) -> None: ...
@abstractmethod
def abstract_method(self) -> str: ...
| ProtocolMembersDemo |
python | run-llama__llama_index | llama-index-core/llama_index/core/query_engine/flare/answer_inserter.py | {
"start": 5896,
"end": 6800
} | class ____(BaseLookaheadAnswerInserter):
"""
Direct lookahead answer inserter.
Simple inserter module that directly inserts answers into
the [Search(query)] tags in the lookahead response.
"""
def _get_prompts(self) -> Dict[str, Any]:
"""Get prompts."""
return {}
def _update_prompts(self, prompts: PromptDictType) -> None:
"""Update prompts."""
def insert(
self,
response: str,
query_tasks: List[QueryTask],
answers: List[str],
prev_response: Optional[str] = None,
) -> str:
"""Insert answers into response."""
for query_task, answer in zip(query_tasks, answers):
response = (
response[: query_task.start_idx]
+ answer
+ response[query_task.end_idx + 1 :]
)
return response
| DirectLookaheadAnswerInserter |
python | airbytehq__airbyte | airbyte-ci/connectors/metadata_service/lib/metadata_service/models/generated/ConnectorMetadataDefinitionV0.py | {
"start": 15033,
"end": 15173
} | class ____(BaseModel):
class Config:
extra = Extra.forbid
metadataSpecVersion: str
data: Data
| ConnectorMetadataDefinitionV0 |
python | tensorflow__tensorflow | tensorflow/python/util/object_identity_test.py | {
"start": 2290,
"end": 3088
} | class ____(test.TestCase):
def testDifference(self):
class Element(object):
pass
a = Element()
b = Element()
c = Element()
set1 = object_identity.ObjectIdentitySet([a, b])
set2 = object_identity.ObjectIdentitySet([b, c])
diff_set = set1.difference(set2)
self.assertIn(a, diff_set)
self.assertNotIn(b, diff_set)
self.assertNotIn(c, diff_set)
def testDiscard(self):
a = object()
b = object()
set1 = object_identity.ObjectIdentitySet([a, b])
set1.discard(a)
self.assertIn(b, set1)
self.assertNotIn(a, set1)
def testClear(self):
a = object()
b = object()
set1 = object_identity.ObjectIdentitySet([a, b])
set1.clear()
self.assertLen(set1, 0)
if __name__ == '__main__':
test.main()
| ObjectIdentitySetTest |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 967660,
"end": 971126
} | class ____(Predicate):
"""
FieldValidPredicate schema wrapper.
Parameters
----------
field : str, :class:`FieldName`
Field to be tested.
valid : bool
If set to true the field's value has to be valid, meaning both not ``null`` and not
`NaN
<https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/NaN>`__.
timeUnit : dict, :class:`TimeUnit`, :class:`MultiTimeUnit`, :class:`BinnedTimeUnit`, :class:`SingleTimeUnit`, :class:`TimeUnitParams`, :class:`UtcMultiTimeUnit`, :class:`UtcSingleTimeUnit`, :class:`LocalMultiTimeUnit`, :class:`LocalSingleTimeUnit`, Literal['binnedyear', 'binnedyearquarter', 'binnedyearquartermonth', 'binnedyearmonth', 'binnedyearmonthdate', 'binnedyearmonthdatehours', 'binnedyearmonthdatehoursminutes', 'binnedyearmonthdatehoursminutesseconds', 'binnedyearweek', 'binnedyearweekday', 'binnedyearweekdayhours', 'binnedyearweekdayhoursminutes', 'binnedyearweekdayhoursminutesseconds', 'binnedyeardayofyear', 'binnedutcyear', 'binnedutcyearquarter', 'binnedutcyearquartermonth', 'binnedutcyearmonth', 'binnedutcyearmonthdate', 'binnedutcyearmonthdatehours', 'binnedutcyearmonthdatehoursminutes', 'binnedutcyearmonthdatehoursminutesseconds', 'binnedutcyearweek', 'binnedutcyearweekday', 'binnedutcyearweekdayhours', 'binnedutcyearweekdayhoursminutes', 'binnedutcyearweekdayhoursminutesseconds', 'binnedutcyeardayofyear', 'utcyear', 'utcquarter', 'utcmonth', 'utcweek', 'utcday', 'utcdayofyear', 'utcdate', 'utchours', 'utcminutes', 'utcseconds', 'utcmilliseconds', 'year', 'quarter', 'month', 'week', 'day', 'dayofyear', 'date', 'hours', 'minutes', 'seconds', 'milliseconds', 'utcyearquarter', 'utcyearquartermonth', 'utcyearmonth', 'utcyearmonthdate', 'utcyearmonthdatehours', 'utcyearmonthdatehoursminutes', 'utcyearmonthdatehoursminutesseconds', 'utcyearweek', 'utcyearweekday', 'utcyearweekdayhours', 'utcyearweekdayhoursminutes', 'utcyearweekdayhoursminutesseconds', 'utcyeardayofyear', 'utcquartermonth', 'utcmonthdate', 'utcmonthdatehours', 'utcmonthdatehoursminutes', 'utcmonthdatehoursminutesseconds', 'utcweekday', 'utcweekdayhours', 'utcweekdayhoursminutes', 'utcweekdayhoursminutesseconds', 'utcdayhours', 'utcdayhoursminutes', 'utcdayhoursminutesseconds', 'utchoursminutes', 'utchoursminutesseconds', 'utcminutesseconds', 'utcsecondsmilliseconds', 'yearquarter', 'yearquartermonth', 'yearmonth', 'yearmonthdate', 'yearmonthdatehours', 'yearmonthdatehoursminutes', 'yearmonthdatehoursminutesseconds', 'yearweek', 'yearweekday', 'yearweekdayhours', 'yearweekdayhoursminutes', 'yearweekdayhoursminutesseconds', 'yeardayofyear', 'quartermonth', 'monthdate', 'monthdatehours', 'monthdatehoursminutes', 'monthdatehoursminutesseconds', 'weekday', 'weekdayhours', 'weekdayhoursminutes', 'weekdayhoursminutesseconds', 'dayhours', 'dayhoursminutes', 'dayhoursminutesseconds', 'hoursminutes', 'hoursminutesseconds', 'minutesseconds', 'secondsmilliseconds']
Time unit for the field to be tested.
"""
_schema = {"$ref": "#/definitions/FieldValidPredicate"}
def __init__(
self,
field: Optional[str | SchemaBase] = Undefined,
valid: Optional[bool] = Undefined,
timeUnit: Optional[
SchemaBase | Map | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T
] = Undefined,
**kwds,
):
super().__init__(field=field, valid=valid, timeUnit=timeUnit, **kwds)
| FieldValidPredicate |
python | walkccc__LeetCode | solutions/3486. Longest Special Path II/3486.py | {
"start": 0,
"end": 1181
} | class ____:
# Similar to 3425. Longest Special Path
def longestSpecialPath(
self,
edges: list[list[int]],
nums: list[int]
) -> list[int]:
maxLength = 0
minNodes = 1
graph = [[] for _ in range(len(nums))]
for u, v, w in edges:
graph[u].append((v, w))
graph[v].append((u, w))
prefix = [0]
lastSeenDepth = {}
def dfs(
u: int,
prev: int,
leftBoundary: list[int],
) -> None:
nonlocal maxLength, minNodes
prevDepth = lastSeenDepth.get(nums[u], 0)
lastSeenDepth[nums[u]] = len(prefix)
if prevDepth != 0:
leftBoundary = sorted(leftBoundary + [prevDepth])[-2:]
length = prefix[-1] - prefix[leftBoundary[0]]
nodes = len(prefix) - leftBoundary[0]
if length > maxLength or (length == maxLength and nodes < minNodes):
maxLength = length
minNodes = nodes
for v, w in graph[u]:
if v == prev:
continue
prefix.append(prefix[-1] + w)
dfs(v, u, leftBoundary)
prefix.pop()
lastSeenDepth[nums[u]] = prevDepth
dfs(0, -1, leftBoundary=[0, 0])
return [maxLength, minNodes]
| Solution |
python | kamyu104__LeetCode-Solutions | Python/hamming-distance.py | {
"start": 29,
"end": 468
} | class ____(object):
def hammingDistance(self, x, y):
"""
:type x: int
:type y: int
:rtype: int
"""
distance = 0
z = x ^ y
while z:
distance += 1
z &= z - 1
return distance
def hammingDistance2(self, x, y):
"""
:type x: int
:type y: int
:rtype: int
"""
return bin(x ^ y).count('1')
| Solution |
python | pytest-dev__pytest | doc/en/example/assertion/failure_demo.py | {
"start": 3219,
"end": 4226
} | class ____:
def test_raises(self):
s = "qwe"
raises(TypeError, int, s)
def test_raises_doesnt(self):
raises(OSError, int, "3")
def test_raise(self):
raise ValueError("demo error")
def test_tupleerror(self):
a, b = [1] # noqa: F841
def test_reinterpret_fails_with_print_for_the_fun_of_it(self):
items = [1, 2, 3]
print(f"items is {items!r}")
a, b = items.pop()
def test_some_error(self):
if namenotexi: # noqa: F821
pass
def func1(self):
assert 41 == 42
# thanks to Matthew Scott for this test
def test_dynamic_compile_shows_nicely():
import importlib.util
import sys
src = "def foo():\n assert 1 == 0\n"
name = "abc-123"
spec = importlib.util.spec_from_loader(name, loader=None)
module = importlib.util.module_from_spec(spec)
code = compile(src, name, "exec")
exec(code, module.__dict__)
sys.modules[name] = module
module.foo()
| TestRaises |
python | donnemartin__interactive-coding-challenges | recursion_dynamic/knapsack_01/test_knapsack.py | {
"start": 18,
"end": 1701
} | class ____(unittest.TestCase):
def test_knapsack_bottom_up(self):
knapsack = Knapsack()
self.assertRaises(TypeError, knapsack.fill_knapsack, None, None)
self.assertEqual(knapsack.fill_knapsack(0, 0), 0)
items = []
items.append(Item(label='a', value=2, weight=2))
items.append(Item(label='b', value=4, weight=2))
items.append(Item(label='c', value=6, weight=4))
items.append(Item(label='d', value=9, weight=5))
total_weight = 8
expected_value = 13
results = knapsack.fill_knapsack(items, total_weight)
self.assertEqual(results[0].label, 'd')
self.assertEqual(results[1].label, 'b')
total_value = 0
for item in results:
total_value += item.value
self.assertEqual(total_value, expected_value)
print('Success: test_knapsack_bottom_up')
def test_knapsack_top_down(self):
knapsack = KnapsackTopDown()
self.assertRaises(TypeError, knapsack.fill_knapsack, None, None)
self.assertEqual(knapsack.fill_knapsack(0, 0), 0)
items = []
items.append(Item(label='a', value=2, weight=2))
items.append(Item(label='b', value=4, weight=2))
items.append(Item(label='c', value=6, weight=4))
items.append(Item(label='d', value=9, weight=5))
total_weight = 8
expected_value = 13
self.assertEqual(knapsack.fill_knapsack(items, total_weight), expected_value)
print('Success: test_knapsack_top_down')
def main():
test = TestKnapsack()
test.test_knapsack_bottom_up()
test.test_knapsack_top_down()
if __name__ == '__main__':
main()
| TestKnapsack |
python | apache__airflow | airflow-ctl/src/airflowctl/api/client.py | {
"start": 2153,
"end": 2935
} | class ____(enum.Enum):
"""Client kind enum."""
CLI = "cli"
AUTH = "auth"
def add_correlation_id(request: httpx.Request):
request.headers["correlation-id"] = str(uuid7())
def get_json_error(response: httpx.Response):
"""Raise a ServerResponseError if we can extract error info from the error."""
err = ServerResponseError.from_response(response)
if err:
# This part is used in integration tests to verify the error message
# If you are updating here don't forget to update the airflow-ctl-tests
log.warning("Server error ", extra=dict(err.response.json()))
raise err
def raise_on_4xx_5xx(response: httpx.Response):
return get_json_error(response) or response.raise_for_status()
# Credentials for the API
| ClientKind |
python | getsentry__sentry | src/sentry/api/endpoints/organization_events_facets_performance.py | {
"start": 8779,
"end": 18834
} | class ____(GenericOffsetPaginator):
def get_result(self, limit, cursor=None):
assert limit > 0
offset = cursor.offset if cursor is not None else 0
# Request 1 more than limit so we can tell if there is another page
# Use raw_limit for the histogram itself so bucket calculations are correct
data = self.data_fn(offset=offset, limit=limit + 1, raw_limit=limit)
if isinstance(data["tags"], list):
has_more = len(data["tags"]) == limit + 1
if has_more:
data["tags"].pop()
else:
raise NotImplementedError
return CursorResult(
data,
prev=Cursor(0, max(0, offset - limit), True, offset > 0),
next=Cursor(0, max(0, offset + limit), False, has_more),
)
def query_tag_data(
snuba_params: SnubaParams,
referrer: str,
filter_query: str | None = None,
aggregate_column: str | None = None,
) -> dict | None:
"""
Fetch general data about all the transactions with this transaction name to feed into the facet query
:return: Returns the row with aggregate and count if the query was successful
Returns None if query was not successful which causes the endpoint to return early
"""
with sentry_sdk.start_span(op="discover.discover", name="facets.filter_transform") as span:
span.set_data("query", filter_query)
tag_query = DiscoverQueryBuilder(
dataset=Dataset.Discover,
params={},
snuba_params=snuba_params,
query=filter_query,
selected_columns=[
"count()",
f"avg({aggregate_column}) as aggregate",
f"max({aggregate_column}) as max",
f"min({aggregate_column}) as min",
],
)
tag_query.where.append(
Condition(tag_query.resolve_column(aggregate_column), Op.IS_NOT_NULL)
)
with sentry_sdk.start_span(op="discover.discover", name="facets.frequent_tags"):
# Get the average and count to use to filter the next request to facets
tag_data = tag_query.run_query(f"{referrer}.all_transactions")
if len(tag_data["data"]) != 1:
return None
counts = [r["count"] for r in tag_data["data"]]
aggregates = [r["aggregate"] for r in tag_data["data"]]
# Return early to avoid doing more queries with 0 count transactions or aggregates for columns that don't exist
if counts[0] == 0 or aggregates[0] is None:
return None
if not tag_data["data"][0]:
return None
return tag_data["data"][0]
def query_top_tags(
snuba_params: SnubaParams,
tag_key: str,
limit: int,
referrer: str,
orderby: list[str] | None,
offset: int | None = None,
aggregate_column: str | None = None,
*,
filter_query: str,
) -> list[Any] | None:
"""
Fetch counts by tag value, finding the top tag values for a tag key by a limit.
:return: Returns the row with the value, the aggregate and the count if the query was successful
Returns None if query was not successful which causes the endpoint to return early
"""
translated_aggregate_column = discover.resolve_discover_column(aggregate_column)
with sentry_sdk.start_span(op="discover.discover", name="facets.top_tags"):
if not orderby:
orderby = ["-count"]
for i, sort in enumerate(orderby):
if "frequency" in sort:
# Replacing frequency as it's the same underlying data dimension, this way we don't have to modify the existing histogram query.
orderby[i] = sort.replace("frequency", "count")
if "tags_value" not in orderby:
orderby = orderby + ["tags_value"]
# Get the average and count to use to filter the next request to facets
tag_data = discover.query(
selected_columns=[
"count()",
f"avg({aggregate_column}) as aggregate",
"array_join(tags.value) as tags_value",
],
query=filter_query,
snuba_params=snuba_params,
orderby=orderby,
conditions=[
Condition(Column(translated_aggregate_column), Op.IS_NOT_NULL),
Condition(Column("tags_key"), Op.EQ, tag_key),
],
functions_acl=["array_join"],
referrer=f"{referrer}.top_tags",
limit=limit,
offset=offset,
)
if len(tag_data["data"]) <= 0:
return None
counts = [r["count"] for r in tag_data["data"]]
# Return early to avoid doing more queries with 0 count transactions or aggregates for columns that don't exist
if counts[0] == 0:
return None
if not tag_data["data"]:
return None
return tag_data["data"]
def query_facet_performance(
snuba_params: SnubaParams,
tag_data: Mapping[str, Any],
referrer: str,
aggregate_column: str | None = None,
filter_query: str | None = None,
orderby: list[str] | None = None,
offset: int | None = None,
all_tag_keys: bool | None = None,
tag_key: str | None = None,
*,
limit: int,
) -> EventsResponse:
# Dynamically sample so at least 50000 transactions are selected
sample_start_count = 50000
transaction_count = tag_data["count"]
sampling_enabled = transaction_count > sample_start_count
# log-e growth starting at 50,000
target_sample = max(
sample_start_count * (math.log(transaction_count) - (math.log(sample_start_count) - 1)),
transaction_count,
)
dynamic_sample_rate = 0 if transaction_count <= 0 else (target_sample / transaction_count)
sample_rate = min(max(dynamic_sample_rate, 0), 1) if sampling_enabled else None
frequency_sample_rate = sample_rate if sample_rate else 1
tag_key_limit = limit if tag_key else 1
with sentry_sdk.start_span(op="discover.discover", name="facets.filter_transform") as span:
span.set_data("query", filter_query)
tag_query = DiscoverQueryBuilder(
dataset=Dataset.Discover,
params={},
snuba_params=snuba_params,
query=filter_query,
selected_columns=["count()", "tags_key", "tags_value"],
sample_rate=sample_rate,
turbo=sample_rate is not None,
limit=limit,
offset=offset,
limitby=("tags_key", tag_key_limit) if not tag_key else None,
)
translated_aggregate_column = tag_query.resolve_column(aggregate_column)
# Aggregate (avg) and count of all transactions for this query
transaction_aggregate = tag_data["aggregate"]
# Exclude tags that have high cardinality are generally unrelated to performance
excluded_tags = Condition(
Column("tags_key"),
Op.NOT_IN,
["trace", "trace.ctx", "trace.span", "project", "browser", "celery_task_id", "url"],
)
with sentry_sdk.start_span(op="discover.discover", name="facets.aggregate_tags"):
span.set_data("sample_rate", sample_rate)
span.set_data("target_sample", target_sample)
aggregate_comparison = transaction_aggregate * 1.005 if transaction_aggregate else 0
aggregate_column = Function("avg", [translated_aggregate_column], "aggregate")
tag_query.where.append(excluded_tags)
if not all_tag_keys and not tag_key:
tag_query.having.append(Condition(aggregate_column, Op.GT, aggregate_comparison))
tag_query.where.append(Condition(translated_aggregate_column, Op.IS_NOT_NULL))
if tag_key:
tag_query.where.append(Condition(Column("tags_key"), Op.IN, [tag_key]))
tag_query.columns.extend(
[
Function(
"divide",
[
Function(
"sum",
[
Function(
"minus", [translated_aggregate_column, transaction_aggregate]
)
],
),
frequency_sample_rate,
],
"sumdelta",
),
Function(
"divide",
[
Function("divide", [Function("count", [], "count"), frequency_sample_rate]),
transaction_count,
],
"frequency",
),
Function("divide", [aggregate_column, transaction_aggregate], "comparison"),
aggregate_column,
]
)
# Need to wait for the custom functions to be added first since they can be orderby options
tag_query.orderby = tag_query.resolve_orderby([*(orderby or []), "tags_key", "tags_value"])
results = tag_query.process_results(tag_query.run_query(f"{referrer}.tag_values"))
return results
def query_facet_performance_key_histogram(
snuba_params: SnubaParams,
top_tags: list[Any],
tag_key: str,
num_buckets_per_key: int,
limit: int,
referrer: str,
aggregate_column: str,
*,
filter_query: str,
) -> dict:
precision = 0
tag_values = [x["tags_value"] for x in top_tags]
results = discover.histogram_query(
fields=[aggregate_column],
user_query=filter_query,
snuba_params=snuba_params,
num_buckets=num_buckets_per_key,
precision=precision,
group_by=["tags_value", "tags_key"],
extra_conditions=[
Condition(Column("tags_key"), Op.EQ, tag_key),
Condition(Column("tags_value"), Op.IN, tag_values),
],
histogram_rows=limit,
referrer="api.organization-events-facets-performance-histogram",
normalize_results=False,
)
return results
| HistogramPaginator |
python | great-expectations__great_expectations | contrib/great_expectations_geospatial_expectations/great_expectations_geospatial_expectations/expectations/expect_column_minimum_bounding_radius_to_be_between.py | {
"start": 2371,
"end": 10756
} | class ____(ColumnAggregateExpectation):
"""Expect that column values as geometry points to be contained within a bounding circle with a given radius (or diameter).
expect_column_values_minimum_bounding_radius_to_be_between is a :func:`column_expectation <great_expectations.dataset.dataset.MetaDataset.column_expectation>`.
Args:
column (str): \
The column name.
Column values must be provided in WKT or WKB format, which are commom formats for GIS Database formats.
WKT can be accessed thhrough the ST_AsText() or ST_AsBinary() functions in queries for PostGIS and MSSQL.
Column values can alternately be given in x,y tuple or list pairs.
The user is responsible for the coordinate reference system and the units. e.g. values may be given in easting-northing pairs.
min_value (float or None): \
The minimum radius (or diameter) that bounds all geometries in the column
max_value (float or None): \
The maximum radius (or diameter) that bounds all geometries in the column
strict_min (boolean): \
If True, the minimal radius must be strictly larger than min_value,
Default: False
strict_max (boolean): \
If True, the maximal radius must be strictly smaller than max_value,
Default: False
Keyword Args:
column_shape_format: str
Geometry format for 'column' (wkt, wkb, xy). Column values can be provided in WKT or WKB format, which are commom formats for GIS Database formats.
xy also supports tuple pairs or list pairs for points only
WKT can be accessed thhrough the ST_AsText() or ST_AsBinary() functions in queries for PostGIS and MSSQL.
Must be one of: [wkt, wkb, xy]
Default: wkt
diameter_flag (boolean): \
If True, the user can specify a diameter as opposed to a radius,
Default: False
Returns:
An ExpectationSuiteValidationResult
Notes:
These fields in the result object are customized for this expectation:
::
{
"observed_value": (list) The actual bounding radius (or diameter)
}
* min_value and max_value are both inclusive unless strict_min or strict_max are set to True.
* If min_value is None, then max_value is treated as an upper bound
* If max_value is None, then min_value is treated as a lower bound
"""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"points_only": [
"POINT(1 1)",
"POINT(2 2)",
"POINT(6 4)",
"POINT(3 9)",
"POINT(5 5)",
],
"points_and_lines": [
"POINT(1 1)",
"POINT(2 2)",
"POINT(6 4)",
"POINT(3 9)",
"LINESTRING(5 5, 8 10)",
],
},
"tests": [
{
"title": "positive_test_with_points",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "points_only",
"column_shape_format": "wkt",
"min_value": None,
"max_value": 5,
"strict_min": False,
"strict_max": False,
"diameter_flag": False,
},
"out": {
"success": True,
# "result":{"observed_value":4.123105625617661}
},
},
{
"title": "positive_test_with_points_and_lines",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "points_and_lines",
"column_shape_format": "wkt",
"min_value": 5,
"max_value": 10,
"strict_min": True,
"strict_max": True,
"diameter_flag": False,
},
"out": {
"success": True,
# "result":{"observed_value":5.70087712549569}
},
},
{
"title": "negative positive_test_with_points_and_lines",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "points_and_lines",
"column_shape_format": "wkt",
"min_value": 1,
"max_value": 10,
"strict_min": False,
"strict_max": True,
"diameter_flag": True,
},
"out": {
"success": False,
# "result":{"observed_value":11.40175425099138}
},
},
],
}
]
# This is a tuple consisting of all Metrics necessary to evaluate the Expectation.
metric_dependencies = ("column.geometry.minimum_bounding_radius",)
# This a tuple of parameter names that can affect whether the Expectation evaluates to True or False.
success_keys = (
"diameter_flag",
"column_shape_format",
"min_value",
"strict_min",
"max_value",
"strict_max",
)
# This dictionary contains default values for any parameters that should have default values.
default_kwarg_values = {
"diameter_flag": False,
"column_shape_format": "wkt",
}
# This method performs a validation of your metrics against your success keys, returning a dict indicating the success or failure of the Expectation.
def _validate( # noqa: C901 - too complex
self,
metrics: Dict,
runtime_configuration: dict = None,
execution_engine: ExecutionEngine = None,
):
radius = metrics.get("column.geometry.minimum_bounding_radius")
success_kwargs = self._get_success_kwargs()
diameter_flag = success_kwargs.get("diameter_flag")
min_value = success_kwargs.get("min_value")
max_value = success_kwargs.get("max_value")
strict_min = success_kwargs.get("strict_min")
strict_max = success_kwargs.get("strict_max")
if diameter_flag:
distance = radius * 2
else:
distance = radius
# Evaluate the between statement (from column_values_between.py)
if min_value is None:
if strict_max:
success = distance < max_value
else:
success = distance <= max_value
elif max_value is None:
if strict_min:
success = min_value < distance
else:
success = min_value <= distance
else:
if strict_min and strict_max:
success = (min_value < distance) & (distance < max_value)
elif strict_min:
success = (min_value < distance) & (distance <= max_value)
elif strict_max:
success = (min_value <= distance) & (distance < max_value)
else:
success = (min_value <= distance) & (distance <= max_value)
return {"success": success, "result": {"observed_value": distance}}
# This object contains metadata for display in the public Gallery
library_metadata = {
"tags": ["hackathon-2022"], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@pjdobson", # Don't forget to add your github handle here!
],
"requirements": ["pygeos"],
}
if __name__ == "__main__":
ExpectColumnMinimumBoundingRadiusToBeBetween().print_diagnostic_checklist()
| ExpectColumnMinimumBoundingRadiusToBeBetween |
python | facebook__pyre-check | pyre_extensions/type_variable_operators.py | {
"start": 314,
"end": 431
} | class ____(type):
def __getitem__(cls, __tparams) -> object:
return Any
| ParameterSpecificationComponentMeta |
python | milvus-io__pymilvus | tests/test_grpc_handler_mutations.py | {
"start": 5447,
"end": 10642
} | class ____:
def test_delete(self, channel: Any, client_thread: Any) -> None:
handler = GrpcHandler(channel=channel)
delete_future = client_thread.submit(
handler.delete,
collection_name="test_collection",
expression="id in [1, 2, 3]",
timeout=10
)
(invocation_metadata, request, rpc) = channel.take_unary_unary(
descriptor.methods_by_name["Delete"]
)
rpc.send_initial_metadata(())
expected_result = milvus_pb2.MutationResult(
status=common_pb2.Status(code=0),
delete_cnt=3,
timestamp=100
)
rpc.terminate(expected_result, (), grpc.StatusCode.OK, "")
result = delete_future.result()
assert isinstance(result, MutationResult)
def test_delete_with_partition(self, channel: Any, client_thread: Any) -> None:
handler = GrpcHandler(channel=channel)
delete_future = client_thread.submit(
handler.delete,
collection_name="test_collection",
expression="id > 10",
partition_name="test_partition",
timeout=10
)
(invocation_metadata, request, rpc) = channel.take_unary_unary(
descriptor.methods_by_name["Delete"]
)
rpc.send_initial_metadata(())
expected_result = milvus_pb2.MutationResult(
status=common_pb2.Status(code=0),
delete_cnt=5,
timestamp=100
)
rpc.terminate(expected_result, (), grpc.StatusCode.OK, "")
result = delete_future.result()
assert isinstance(result, MutationResult)
def test_delete_async(self, channel: Any, client_thread: Any) -> None:
handler = GrpcHandler(channel=channel)
delete_future = client_thread.submit(
handler.delete,
collection_name="test_collection",
expression="id == 1",
timeout=10,
_async=True
)
(invocation_metadata, request, rpc) = channel.take_unary_unary(
descriptor.methods_by_name["Delete"]
)
rpc.send_initial_metadata(())
expected_result = milvus_pb2.MutationResult(
status=common_pb2.Status(code=0),
delete_cnt=1,
timestamp=100
)
rpc.terminate(expected_result, (), grpc.StatusCode.OK, "")
result = delete_future.result()
assert isinstance(result, MutationFuture)
def test_upsert_rows(self, channel: Any, client_thread: Any) -> None:
handler = GrpcHandler(channel=channel)
with patch.object(handler, 'describe_collection') as mock_describe:
mock_describe.return_value = {
"fields": [
{"name": "id", "type": DataType.INT64}
],
"enable_dynamic_field": False,
"update_timestamp": 0
}
entities = [{"id": 1}, {"id": 2}]
upsert_future = client_thread.submit(
handler.upsert_rows,
collection_name="test_collection",
entities=entities,
timeout=10
)
(invocation_metadata, request, rpc) = channel.take_unary_unary(
descriptor.methods_by_name["Upsert"]
)
rpc.send_initial_metadata(())
expected_result = milvus_pb2.MutationResult(
status=common_pb2.Status(code=0),
IDs=schema_pb2.IDs(int_id=schema_pb2.LongArray(data=[1, 2])),
upsert_cnt=2,
timestamp=100
)
rpc.terminate(expected_result, (), grpc.StatusCode.OK, "")
result = upsert_future.result()
assert isinstance(result, MutationResult)
def test_upsert_rows_single_entity(self, channel: Any, client_thread: Any) -> None:
handler = GrpcHandler(channel=channel)
with patch.object(handler, 'describe_collection') as mock_describe:
mock_describe.return_value = {
"fields": [
{"name": "id", "type": DataType.INT64}
],
"enable_dynamic_field": False,
"update_timestamp": 0
}
entity = {"id": 1} # Single dict
upsert_future = client_thread.submit(
handler.upsert_rows,
collection_name="test_collection",
entities=entity,
timeout=10
)
(invocation_metadata, request, rpc) = channel.take_unary_unary(
descriptor.methods_by_name["Upsert"]
)
rpc.send_initial_metadata(())
expected_result = milvus_pb2.MutationResult(
status=common_pb2.Status(code=0),
IDs=schema_pb2.IDs(int_id=schema_pb2.LongArray(data=[1])),
upsert_cnt=1,
timestamp=100
)
rpc.terminate(expected_result, (), grpc.StatusCode.OK, "")
result = upsert_future.result()
assert isinstance(result, MutationResult)
| TestGrpcHandlerDeleteAndUpsertOperations |
python | numpy__numpy | numpy/_core/tests/test_multiarray.py | {
"start": 199983,
"end": 205664
} | class ____:
usg_data = [
([1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], 0),
([3, 3, 3, 3, 2, 2, 2, 2], 0),
([0, 1, 2, 3, 4, 5, 6, 7], 7),
([7, 6, 5, 4, 3, 2, 1, 0], 0)
]
sg_data = usg_data + [
([1, 2, 3, 4, -4, -3, -2, -1], 3),
([1, 2, 3, 4, -1, -2, -3, -4], 3)
]
darr = [(np.array(d[0], dtype=t), d[1]) for d, t in (
itertools.product(usg_data, (
np.uint8, np.uint16, np.uint32, np.uint64
))
)]
darr = darr + [(np.array(d[0], dtype=t), d[1]) for d, t in (
itertools.product(sg_data, (
np.int8, np.int16, np.int32, np.int64, np.float32, np.float64
))
)]
darr = darr + [(np.array(d[0], dtype=t), d[1]) for d, t in (
itertools.product((
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
# To hit the tail of SIMD multi-level(x4, x1) inner loops
# on variant SIMD widths
([1] * (2 * 5 - 1) + [np.nan], 2 * 5 - 1),
([1] * (4 * 5 - 1) + [np.nan], 4 * 5 - 1),
([1] * (8 * 5 - 1) + [np.nan], 8 * 5 - 1),
([1] * (16 * 5 - 1) + [np.nan], 16 * 5 - 1),
([1] * (32 * 5 - 1) + [np.nan], 32 * 5 - 1)
), (
np.float32, np.float64
))
)]
nan_arr = darr + [
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 1),
([complex(1, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(1, 1)], 2),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 5),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2015-11-20T12:20:59'),
np.datetime64('1932-09-23T10:10:13'),
np.datetime64('2014-10-10T03:50:30')], 3),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 0),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 2),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 2),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 0),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 1),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 2),
([False, False, False, False, True], 4),
([False, False, False, True, False], 3),
([True, False, False, False, False], 0),
([True, False, True, False, False], 0),
]
@pytest.mark.parametrize('data', nan_arr)
def test_combinations(self, data):
arr, pos = data
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore',
"invalid value encountered in reduce",
RuntimeWarning)
val = np.max(arr)
assert_equal(np.argmax(arr), pos, err_msg=f"{arr!r}")
assert_equal(arr[np.argmax(arr)], val, err_msg=f"{arr!r}")
# add padding to test SIMD loops
rarr = np.repeat(arr, 129)
rpos = pos * 129
assert_equal(np.argmax(rarr), rpos, err_msg=f"{rarr!r}")
assert_equal(rarr[np.argmax(rarr)], val, err_msg=f"{rarr!r}")
padd = np.repeat(np.min(arr), 513)
rarr = np.concatenate((arr, padd))
rpos = pos
assert_equal(np.argmax(rarr), rpos, err_msg=f"{rarr!r}")
assert_equal(rarr[np.argmax(rarr)], val, err_msg=f"{rarr!r}")
def test_maximum_signed_integers(self):
a = np.array([1, 2**7 - 1, -2**7], dtype=np.int8)
assert_equal(np.argmax(a), 1)
a = a.repeat(129)
assert_equal(np.argmax(a), 129)
a = np.array([1, 2**15 - 1, -2**15], dtype=np.int16)
assert_equal(np.argmax(a), 1)
a = a.repeat(129)
assert_equal(np.argmax(a), 129)
a = np.array([1, 2**31 - 1, -2**31], dtype=np.int32)
assert_equal(np.argmax(a), 1)
a = a.repeat(129)
assert_equal(np.argmax(a), 129)
a = np.array([1, 2**63 - 1, -2**63], dtype=np.int64)
assert_equal(np.argmax(a), 1)
a = a.repeat(129)
assert_equal(np.argmax(a), 129)
| TestArgmax |
python | Netflix__metaflow | test/core/tests/catch_retry.py | {
"start": 101,
"end": 5698
} | class ____(MetaflowTest):
PRIORITY = 2
SKIP_GRAPHS = [
"simple_switch",
"nested_switch",
"branch_in_switch",
"foreach_in_switch",
"switch_in_branch",
"switch_in_foreach",
"recursive_switch",
"recursive_switch_inside_foreach",
]
@tag("retry(times=3,minutes_between_retries=0)")
@steps(0, ["start"])
def step_start(self):
import os
import sys
self.test_attempt = current.retry_count
sys.stdout.write("stdout testing logs %d\n" % self.test_attempt)
sys.stderr.write("stderr testing logs %d\n" % self.test_attempt)
if self.test_attempt < 3:
self.invisible = True
raise TestRetry()
# foreach splits don't support @catch but @retry should work
@tag("retry(times=2,minutes_between_retries=0)")
@steps(0, ["foreach-split", "parallel-split"])
def step_split(self):
import os
if current.retry_count == 2:
self.this_is_split = True
else:
raise TestRetry()
@tag("retry(times=2,minutes_between_retries=0)")
@steps(0, ["join"])
def step_join(self):
import os
if current.retry_count == 2:
self.test_attempt = inputs[0].test_attempt
else:
raise TestRetry()
@tag('catch(var="end_ex", print_exception=False)')
@steps(0, ["end"], required=True)
def step_end(self):
from metaflow.exception import ExternalCommandFailed
# make sure we see the latest attempt version of the artifact
assert_equals(3, self.test_attempt)
# the test uses a non-trivial derived exception on purpose
# which is non-trivial to pickle correctly
self.here = True
raise ExternalCommandFailed("catch me!")
@tag('catch(var="ex", print_exception=False)')
@tag("retry(times=2,minutes_between_retries=0)")
@steps(1, ["all"])
def step_all(self):
# Die a soft death; this should retry and then catch in the end
self.retry_with_catch = current.retry_count
raise TestRetry()
def check_results(self, flow, checker):
checker.assert_log(
"start", "stdout", "stdout testing logs 3\n", exact_match=False
)
checker.assert_log(
"start", "stderr", "stderr testing logs 3\n", exact_match=False
)
for step in flow:
if step.name == "start":
checker.assert_artifact("start", "test_attempt", 3)
try:
for task in checker.artifact_dict("start", "invisible").values():
if task:
raise Exception(
"'invisible' should not be visible in 'start'"
)
except KeyError:
pass
elif step.name == "end":
checker.assert_artifact("end", "test_attempt", 3)
for task in checker.artifact_dict(step.name, "end_ex").values():
assert_equals("catch me!", str(task["end_ex"].exception))
break
else:
raise Exception("No artifact 'end_ex' in step 'end'")
elif flow._graph[step.name].type == "foreach":
checker.assert_artifact(step.name, "this_is_split", True)
elif flow._graph[step.name].type == "join":
checker.assert_artifact("end", "test_attempt", 3)
else:
for task in checker.artifact_dict(step.name, "ex").values():
extype = "metaflow_test.TestRetry"
assert_equals(extype, str(task["ex"].type))
break
else:
raise Exception("No artifact 'ex' in step '%s'" % step.name)
for task in checker.artifact_dict(
step.name, "retry_with_catch"
).values():
assert_equals(task["retry_with_catch"], 2)
break
else:
raise Exception(
"No artifact 'retry_with_catch' in step '%s'" % step.name
)
run = checker.get_run()
if run:
for step in run:
if step.id == "end":
continue
if flow._graph[step.id].type in ("foreach", "join"):
# 1 normal run + 2 retries = 3 attempts
attempts = 3
elif step.id == "start":
attempts = 4 # 1 normal run + 3 retries = 4 attempts
else:
# 1 normal run + 2 retries = 3 attempts
attempts = 3
for task in step:
data = task.data
got = sorted(m.value for m in task.metadata if m.type == "attempt")
assert_equals(list(map(str, range(attempts))), got)
assert_equals(False, "invisible" in run["start"].task.data)
assert_equals(3, run["start"].task.data.test_attempt)
end = run["end"].task
assert_equals(True, end.data.here)
assert_equals(3, end.data.test_attempt)
# task.exception is None since the exception was handled
assert_equals(None, end.exception)
assert_equals("catch me!", end.data.end_ex.exception)
assert_equals(
"metaflow.exception.ExternalCommandFailed", end.data.end_ex.type
)
| CatchRetryTest |
python | pytorch__pytorch | torch/export/pt2_archive/_package.py | {
"start": 6077,
"end": 24179
} | class ____:
"""
Context manager for reading a PT2 archive.
"""
def __init__(self, archive_path_or_buffer: FileLike):
if isinstance(archive_path_or_buffer, str):
archive_path_or_buffer = normalize_path_separator(archive_path_or_buffer)
self.archive_file = torch._C.PyTorchFileReader(archive_path_or_buffer) # type: ignore[arg-type]
assert self.read_string(ARCHIVE_FORMAT_PATH) == ARCHIVE_FORMAT_VALUE, (
"Invalid archive format"
)
def __enter__(self) -> "PT2ArchiveReader":
return self
def __exit__(self, *args: Any) -> None:
# torch._C.PyTorchFileReader doesn't have a close method
pass
def read_bytes(self, name: str) -> bytes:
"""
Read a bytes object from the archive.
name: The source file inside the archive.
"""
return self.archive_file.get_record(name)
def read_string(self, name: str) -> str:
"""
Read a string object from the archive.
name: The source file inside the archive.
"""
data = self.read_bytes(name)
return data.decode()
def archive_version(self) -> int:
"""
Get the archive version.
"""
try:
archive_version = self.read_string(ARCHIVE_VERSION_PATH)
except Exception:
# if archive_version is not found, it means the archive is older than version 0.
# In this case, we assume the archive is version 0.
archive_version = "0"
return int(archive_version)
def get_file_names(self) -> list[str]:
"""
Get the file names in the archive.
"""
return self.archive_file.get_all_records()
is_pt2_package.__module__ = "torch.export.pt2_archive"
PT2ArchiveWriter.__module__ = "torch.export.pt2_archive"
PT2ArchiveReader.__module__ = "torch.export.pt2_archive"
def _package_aoti_files(
archive_writer: PT2ArchiveWriter,
aoti_files: Optional[AOTI_FILES],
pickle_protocol: int = DEFAULT_PICKLE_PROTOCOL,
) -> None:
if aoti_files is None:
return
if isinstance(aoti_files, list):
aoti_files = {"model": aoti_files}
assert isinstance(aoti_files, dict)
all_weights: dict[str, Weights] = {} # model_name -> weight
weights_configs: dict[
str, dict[str, Any]
] = {} # model_name -> (weight_name -> (filename, shape, stride, offset))
for model_name, files in aoti_files.items():
num_so_files = 0
weights_configs[model_name] = {}
for file in files:
if file == "":
continue
if isinstance(file, Weights):
all_weights[model_name] = file
continue
if file.endswith(".so"):
num_so_files += 1
if num_so_files > 1:
raise RuntimeError(
f"Multiple .so files found in {files}. "
"You might need to clear your cache "
"directory before calling aoti_compile again."
)
filename = os.path.basename(file)
if filename.startswith(CUSTOM_OBJ_FILENAME_PREFIX):
new_filepath = os.path.join(CONSTANTS_DIR, filename)
else:
new_filepath = os.path.join(AOTINDUCTOR_DIR, model_name, filename)
logger.debug(
"Saving AOTI generated file %s to archive in %s", file, new_filepath
)
archive_writer.write_file(
str(new_filepath),
file,
)
if len(all_weights) > 0:
# Dedup weights
grouped_tensors: list[OrderedSet[tuple[str, str]]] = group_weights(all_weights)
for idx, group in enumerate(grouped_tensors):
filename = f"{WEIGHT_FILENAME_PREFIX}{idx}"
model_name, weight_name = get_complete(group, all_weights)
complete_tensor, _ = all_weights[model_name].get_weight(weight_name)
buffer = io.BytesIO()
torch.save(complete_tensor, buffer, pickle_protocol=pickle_protocol)
archive_writer.write_bytes(
os.path.join(WEIGHTS_DIR, filename), buffer.getvalue()
)
for model_name, weight_name in group:
_, w_property = all_weights[model_name].get_weight(weight_name)
weights_configs[model_name][weight_name] = (
filename,
w_property.shape,
w_property.stride,
w_property.offset,
)
for model_name, weights_config in weights_configs.items():
archive_writer.write_string(
os.path.join(AOTINDUCTOR_DIR, model_name, "weights_config.json"),
json.dumps(weights_config),
)
logger.debug("packaging weights_config for model %s", model_name)
logger.debug(weights_config)
def _is_fake_tensor(t: torch.Tensor) -> bool:
return isinstance(t, FakeTensor)
def _is_tensor_subclass(t: torch.Tensor) -> bool:
return isinstance(t, torch.Tensor) and type(t.data) is not torch.Tensor
def _get_raw_tensor_bytes(value: torch.Tensor) -> bytes:
"""
Get the raw bytes of a tensor. This is used to save the tensor in pt2 archive.
"""
# NOTE: don't chain .cpu() with .data_ptr(). If an HtoD copy needs to be
# performed, the CPU copy needs to be kept alive when its underlying
# memory is accessed.
import ctypes
if _is_fake_tensor(value):
value_bytes = b""
elif value.data_ptr():
cpu_tensor = value.cpu()
value_untyped_storage = cpu_tensor.untyped_storage()
# we store the raw bytes the untyped storage. Tensor metadata is stored separately
value_bytes = bytes(
ctypes.cast(
value_untyped_storage.data_ptr(),
ctypes.POINTER(ctypes.c_ubyte * value_untyped_storage.size()),
).contents
)
else:
# for empty tensor
value_bytes = b""
return value_bytes
def _should_use_pickle(t: torch.Tensor) -> bool:
return _is_tensor_subclass(t) and not _is_fake_tensor(t)
def _save_pickled_tensors(
pickled_items: list[tuple[str, torch.Tensor]],
archive_writer: PT2ArchiveWriter,
config: dict[str, schema.PayloadMeta],
directory: str,
filename_prefix: str,
idx: int,
pickle_protocol: int = DEFAULT_PICKLE_PROTOCOL,
) -> int:
"""Save pickled tensors and update config. Returns updated index."""
for item_fqn, tensor in pickled_items:
path_name = f"{filename_prefix}{idx}"
archive_path = os.path.join(directory, path_name)
buffer = io.BytesIO()
torch.save(tensor, buffer, pickle_protocol=pickle_protocol)
archive_writer.write_bytes(archive_path, buffer.getvalue())
config[item_fqn] = schema.PayloadMeta(
path_name=path_name,
is_param=isinstance(tensor, torch.nn.Parameter),
use_pickle=True,
tensor_meta=serialize_tensor_meta(tensor),
)
idx += 1
return idx
def _save_raw_tensors(
raw_items: dict[str, tuple[torch.Tensor, TensorProperties]],
model_name: str,
archive_writer: PT2ArchiveWriter,
config: dict[str, schema.PayloadMeta],
directory: str,
filename_prefix: str,
idx: int,
) -> int:
"""Save deduplicated raw tensor bytes and update config. Returns updated index."""
if not raw_items:
return idx
weights_dict = {model_name: Weights(raw_items)}
storage_groups = group_weights(weights_dict)
for group in storage_groups:
# Find the complete tensor that covers all others in this storage group
model_name, complete_item_name = get_complete(group, weights_dict)
complete_tensor, _ = weights_dict[model_name].get_weight(complete_item_name)
path_name = f"{filename_prefix}{idx}"
archive_path = os.path.join(directory, path_name)
tensor_bytes = _get_raw_tensor_bytes(complete_tensor)
archive_writer.write_bytes(archive_path, tensor_bytes)
idx += 1
for _, item_fqn in group:
tensor, _ = weights_dict[model_name].get_weight(item_fqn)
config[item_fqn] = schema.PayloadMeta(
path_name=path_name,
is_param=isinstance(tensor, torch.nn.Parameter),
use_pickle=False,
tensor_meta=serialize_tensor_meta(tensor),
)
return idx
def _package_state_dict(
model_name: str,
exported_program: ExportedProgram,
archive_writer: PT2ArchiveWriter,
pickle_protocol: int = DEFAULT_PICKLE_PROTOCOL,
) -> schema.PayloadConfig:
weights_config: dict[str, schema.PayloadMeta] = {}
pickled_weights: list[tuple[str, torch.Tensor]] = []
raw_weights: dict[str, tuple[torch.Tensor, TensorProperties]] = {}
# Categorize weights
for weight_fqn, weight_tensor in exported_program.state_dict.items():
assert isinstance(weight_tensor, torch.Tensor), (
"only torch.Tensor is allowed in state_dict"
)
if _should_use_pickle(weight_tensor):
pickled_weights.append((weight_fqn, weight_tensor))
else:
raw_weights[weight_fqn] = (weight_tensor, TensorProperties(weight_tensor))
idx = archive_writer.count_prefix(os.path.join(WEIGHTS_DIR, WEIGHT_FILENAME_PREFIX))
# Save weights in pickle format
idx = _save_pickled_tensors(
pickled_weights,
archive_writer,
weights_config,
WEIGHTS_DIR,
WEIGHT_FILENAME_PREFIX,
idx,
pickle_protocol,
)
# Save weights in raw bytes format
_save_raw_tensors(
raw_weights,
model_name,
archive_writer,
weights_config,
WEIGHTS_DIR,
WEIGHT_FILENAME_PREFIX,
idx,
)
return schema.PayloadConfig(config=weights_config)
def _package_constants(
model_name: str,
exported_program: ExportedProgram,
archive_writer: PT2ArchiveWriter,
pickle_protocol: int = DEFAULT_PICKLE_PROTOCOL,
) -> schema.PayloadConfig:
constants_config: dict[str, schema.PayloadMeta] = {}
pickled_constants: list[tuple[str, torch.Tensor]] = []
raw_constants: dict[str, tuple[torch.Tensor, TensorProperties]] = {}
custom_objects: list[tuple[str, torch._C.ScriptObject]] = []
# Categorize constants
for constant_fqn, constant in exported_program.constants.items():
if isinstance(constant, torch.Tensor):
if _should_use_pickle(constant):
pickled_constants.append((constant_fqn, constant))
else:
raw_constants[constant_fqn] = (constant, TensorProperties(constant))
elif isinstance(constant, torch._C.ScriptObject):
custom_objects.append((constant_fqn, constant))
else:
raise RuntimeError(f"Unsupported constant type: {type(constant)}")
tensor_idx = archive_writer.count_prefix(
os.path.join(CONSTANTS_DIR, TENSOR_CONSTANT_FILENAME_PREFIX)
)
custom_obj_idx = archive_writer.count_prefix(
os.path.join(CONSTANTS_DIR, CUSTOM_OBJ_FILENAME_PREFIX)
)
# Save constants in pickle format
tensor_idx = _save_pickled_tensors(
pickled_constants,
archive_writer,
constants_config,
CONSTANTS_DIR,
TENSOR_CONSTANT_FILENAME_PREFIX,
tensor_idx,
pickle_protocol,
)
# Save constants in raw bytes format
_save_raw_tensors(
raw_constants,
model_name,
archive_writer,
constants_config,
CONSTANTS_DIR,
TENSOR_CONSTANT_FILENAME_PREFIX,
tensor_idx,
)
# Handle custom objects
for constant_fqn, constant in custom_objects:
path_name = f"{CUSTOM_OBJ_FILENAME_PREFIX}{custom_obj_idx}"
archive_path = os.path.join(CONSTANTS_DIR, path_name)
custom_obj_bytes = torch._C._pickle_save(constant)
archive_writer.write_bytes(archive_path, custom_obj_bytes)
constants_config[constant_fqn] = schema.PayloadMeta(
path_name=path_name,
is_param=False,
use_pickle=True,
tensor_meta=None,
)
custom_obj_idx += 1
return schema.PayloadConfig(config=constants_config)
def _package_payload_config(
archive_writer: PT2ArchiveWriter,
payload_config: schema.PayloadConfig,
config_file: str,
) -> None:
"""
Save the payload config as json file in the archive.
"""
archive_writer.write_string(
config_file, json.dumps(_dataclass_to_dict(payload_config))
)
def _package_exported_programs(
archive_writer: PT2ArchiveWriter,
exported_programs: Optional[Union[ExportedProgram, dict[str, ExportedProgram]]],
opset_version: Optional[dict[str, int]] = None,
pickle_protocol: int = DEFAULT_PICKLE_PROTOCOL,
) -> None:
if exported_programs is None:
return
if isinstance(exported_programs, ExportedProgram):
exported_programs = {"model": exported_programs}
assert isinstance(exported_programs, dict)
for model_name, ep in exported_programs.items():
weights_config = _package_state_dict(
model_name, ep, archive_writer, pickle_protocol
)
weights_config_file = WEIGHTS_CONFIG_FILENAME_FORMAT.format(model_name)
_package_payload_config(archive_writer, weights_config, weights_config_file)
constants_config = _package_constants(
model_name, ep, archive_writer, pickle_protocol
)
constants_config_file = CONSTANTS_CONFIG_FILENAME_FORMAT.format(model_name)
_package_payload_config(archive_writer, constants_config, constants_config_file)
artifact: SerializedArtifact = serialize(
ep,
opset_version,
pickle_protocol,
)
archive_writer.write_bytes(
MODELS_FILENAME_FORMAT.format(model_name), artifact.exported_program
)
archive_writer.write_bytes(
SAMPLE_INPUTS_FILENAME_FORMAT.format(model_name),
artifact.example_inputs,
)
def _package_extra_files(
archive_writer: PT2ArchiveWriter, extra_files: Optional[dict[str, Any]]
) -> None:
if extra_files is None:
return
for extra_file_name, content in extra_files.items():
archive_writer.write_string(f"{EXTRA_DIR}{extra_file_name}", content)
def _package_executorch_files(
archive_writer: PT2ArchiveWriter, executorch_files: Optional[dict[str, bytes]]
) -> None:
if executorch_files is None:
return
for file_name, content in executorch_files.items():
archive_writer.write_bytes(f"{EXECUTORCH_DIR}{file_name}", content)
def package_pt2(
f: FileLike,
*,
exported_programs: Optional[
Union[ExportedProgram, dict[str, ExportedProgram]]
] = None,
aoti_files: Optional[AOTI_FILES] = None,
extra_files: Optional[dict[str, Any]] = None,
opset_version: Optional[dict[str, int]] = None,
pickle_protocol: int = DEFAULT_PICKLE_PROTOCOL,
executorch_files: Optional[dict[str, bytes]] = None,
) -> FileLike:
r"""
Saves the artifacts to a PT2Archive format. The artifact can then be loaded
using ``load_pt2``.
Args:
f (str | os.PathLike[str] | IO[bytes]): A file-like object (has to
implement write and flush) or a string containing a file name.
exported_programs (Union[ExportedProgram, dict[str, ExportedProgram]]):
The exported program to save, or a dictionary mapping model name to an
exported program to save. The exported program will be saved under
models/\*.json. If only one ExportedProgram is specified, this will
automatically be named "model".
aoti_files (Union[list[str], dict[str, list[str]]]): A list of files
generated by AOTInductor via
``torch._inductor.aot_compile(..., {"aot_inductor.package": True})``,
or a dictionary mapping model name to its AOTInductor generated files.
If only one set of files is specified, this will automatically be named
"model".
extra_files (Optional[Dict[str, Any]]): Map from filename to contents
which will be stored as part of the pt2.
opset_version (Optional[Dict[str, int]]): A map of opset names
to the version of this opset
pickle_protocol: can be specified to override the default protocol
executorch_files (Optional[dict[str, bytes]]): Optional executorch
artifacts to save.
"""
assert not (
exported_programs is None and aoti_files is None and extra_files is None
), (
"No value passed in for `exported_programs`, `aoti_files`, and "
"`extra_files`, implying that you do not plan on saving anything."
)
if not (
(isinstance(f, (io.IOBase, IO)) and f.writable() and f.seekable())
or (isinstance(f, (str, os.PathLike)) and os.fspath(f).endswith(".pt2"))
or (isinstance(f, tempfile._TemporaryFileWrapper) and f.name.endswith(".pt2"))
):
# TODO: turn this into an error
logger.warning(
"Expect archive file to be a file ending in .pt2, or is a buffer. "
"Instead got {%s}",
f,
)
if isinstance(f, (str, os.PathLike)):
f = os.fspath(f)
# pyrefly: ignore [bad-argument-type]
with PT2ArchiveWriter(f) as archive_writer:
_package_exported_programs(
archive_writer, exported_programs, pickle_protocol=pickle_protocol
)
_package_aoti_files(
archive_writer,
aoti_files,
pickle_protocol=pickle_protocol,
)
_package_extra_files(archive_writer, extra_files)
_package_executorch_files(archive_writer, executorch_files)
if isinstance(f, (io.IOBase, IO)):
f.seek(0)
# pyrefly: ignore [bad-return]
return f
| PT2ArchiveReader |
python | pydata__xarray | xarray/core/indexing.py | {
"start": 73579,
"end": 76276
} | class ____(PandasIndexingAdapter):
"""Handles explicit indexing for a pandas.MultiIndex.
This allows creating one instance for each multi-index level while
preserving indexing efficiency (memoized + might reuse another instance with
the same multi-index).
"""
__slots__ = ("_dtype", "adapter", "array", "level")
array: pd.MultiIndex
_dtype: np.dtype | pd.api.extensions.ExtensionDtype
level: str | None
def __init__(
self,
array: pd.MultiIndex,
dtype: DTypeLike | pd.api.extensions.ExtensionDtype | None = None,
level: str | None = None,
):
super().__init__(array, dtype)
self.level = level
def __array__(
self,
dtype: DTypeLike | None = None,
/,
*,
copy: bool | None = None,
) -> np.ndarray:
dtype = self._get_numpy_dtype(dtype)
if self.level is not None:
return np.asarray(
self.array.get_level_values(self.level).values, dtype=dtype
)
else:
return super().__array__(dtype, copy=copy)
@property
def _in_memory(self) -> bool:
# The pd.MultiIndex's data is fully in memory, but it has a different
# layout than the level and dimension coordinate arrays. Marking this
# adapter class as a "lazy" array will prevent costly conversion when,
# e.g., formatting the Xarray reprs.
return False
def _convert_scalar(self, item: Any):
if isinstance(item, tuple) and self.level is not None:
idx = tuple(self.array.names).index(self.level)
item = item[idx]
return super()._convert_scalar(item)
def _index_get(
self, indexer: ExplicitIndexer, func_name: str
) -> PandasIndexingAdapter | np.ndarray:
result = super()._index_get(indexer, func_name)
if isinstance(result, type(self)):
result.level = self.level
return result
def __repr__(self) -> str:
if self.level is None:
return super().__repr__()
else:
props = (
f"(array={self.array!r}, level={self.level!r}, dtype={self.dtype!r})"
)
return f"{type(self).__name__}{props}"
def _repr_inline_(self, max_width: int) -> str:
if self.level is None:
return "MultiIndex"
else:
return super()._repr_inline_(max_width=max_width)
def copy(self, deep: bool = True) -> Self:
# see PandasIndexingAdapter.copy
array = self.array.copy(deep=True) if deep else self.array
return type(self)(array, self._dtype, self.level)
| PandasMultiIndexingAdapter |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-netmind/llama_index/llms/netmind/base.py | {
"start": 98,
"end": 1216
} | class ____(OpenAILike):
"""
Netmind LLM.
Examples:
`pip install llama-index-llms-netmind`
```python
from llama_index.llms.netmind import NetmindLLM
# set api key in env or in llm
# import os
# os.environ["NETMIND_API_KEY"] = "your api key"
llm = NetmindLLM(
model="meta-llama/Llama-3.3-70B-Instruct", api_key="your_api_key"
)
resp = llm.complete("Who is Paul Graham?")
print(resp)
```
"""
def __init__(
self,
model: str,
api_key: Optional[str] = None,
api_base: str = "https://api.netmind.ai/inference-api/openai/v1",
is_chat_model: bool = True,
**kwargs: Any,
) -> None:
api_key = api_key or os.environ.get("NETMIND_API_KEY", None)
super().__init__(
model=model,
api_key=api_key,
api_base=api_base,
is_chat_model=is_chat_model,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "NetmindLLM"
| NetmindLLM |
python | django__django | tests/check_framework/tests.py | {
"start": 675,
"end": 3376
} | class ____(SimpleTestCase):
def test_register_and_run_checks(self):
def f(**kwargs):
calls[0] += 1
return [1, 2, 3]
def f2(**kwargs):
return [4]
def f3(**kwargs):
return [5]
calls = [0]
# test register as decorator
registry = CheckRegistry()
registry.register()(f)
registry.register("tag1", "tag2")(f2)
registry.register("tag2", deploy=True)(f3)
# test register as function
registry2 = CheckRegistry()
registry2.register(f)
registry2.register(f2, "tag1", "tag2")
registry2.register(f3, "tag2", deploy=True)
# check results
errors = registry.run_checks()
errors2 = registry2.run_checks()
self.assertEqual(errors, errors2)
self.assertEqual(sorted(errors), [1, 2, 3, 4])
self.assertEqual(calls[0], 2)
errors = registry.run_checks(tags=["tag1"])
errors2 = registry2.run_checks(tags=["tag1"])
self.assertEqual(errors, errors2)
self.assertEqual(sorted(errors), [4])
errors = registry.run_checks(
tags=["tag1", "tag2"], include_deployment_checks=True
)
errors2 = registry2.run_checks(
tags=["tag1", "tag2"], include_deployment_checks=True
)
self.assertEqual(errors, errors2)
self.assertEqual(sorted(errors), [4, 5])
def test_register_no_kwargs_error(self):
registry = CheckRegistry()
msg = "Check functions must accept keyword arguments (**kwargs)."
with self.assertRaisesMessage(TypeError, msg):
@registry.register
def no_kwargs(app_configs, databases):
pass
def test_register_run_checks_non_iterable(self):
registry = CheckRegistry()
@registry.register
def return_non_iterable(**kwargs):
return Error("Message")
msg = (
"The function %r did not return a list. All functions registered "
"with the checks registry must return a list." % return_non_iterable
)
with self.assertRaisesMessage(TypeError, msg):
registry.run_checks()
def test_run_checks_database_exclusion(self):
registry = CheckRegistry()
database_errors = [checks.Warning("Database Check")]
@registry.register(Tags.database)
def database_system_check(**kwargs):
return database_errors
errors = registry.run_checks()
self.assertEqual(errors, [])
errors = registry.run_checks(databases=["default"])
self.assertEqual(errors, database_errors)
| SystemCheckFrameworkTests |
python | numba__numba | numba/tests/test_caching.py | {
"start": 37141,
"end": 38813
} | class ____(unittest.TestCase):
# Regression test for https://github.com/numba/numba/issues/3658
_numba_parallel_test_ = False
source_text_file1 = """
from file2 import function2
"""
source_text_file2 = """
from numba import njit
@njit('float64(float64)', cache=True)
def function1(x):
return x
@njit('float64(float64)', cache=True)
def function2(x):
return x
"""
def setUp(self):
self.tempdir = temp_directory('test_cache_file_loc')
self.file1 = os.path.join(self.tempdir, 'file1.py')
with open(self.file1, 'w') as fout:
print(self.source_text_file1, file=fout)
self.file2 = os.path.join(self.tempdir, 'file2.py')
with open(self.file2, 'w') as fout:
print(self.source_text_file2, file=fout)
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_caching_mutliple_files_with_signature(self):
# Execute file1.py
popen = subprocess.Popen([sys.executable, self.file1],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = popen.communicate()
msg = f"stdout:\n{out.decode()}\n\nstderr:\n{err.decode()}"
self.assertEqual(popen.returncode, 0, msg=msg)
# Execute file2.py
popen = subprocess.Popen([sys.executable, self.file2],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = popen.communicate()
msg = f"stdout:\n{out.decode()}\n\nstderr:\n{err.decode()}"
self.assertEqual(popen.returncode, 0, msg)
| TestCacheMultipleFilesWithSignature |
python | kamyu104__LeetCode-Solutions | Python/percentage-of-letter-in-string.py | {
"start": 38,
"end": 239
} | class ____(object):
def percentageLetter(self, s, letter):
"""
:type s: str
:type letter: str
:rtype: int
"""
return 100*s.count(letter)//len(s)
| Solution |
python | kamyu104__LeetCode-Solutions | Python/find-all-duplicates-in-an-array.py | {
"start": 29,
"end": 392
} | class ____(object):
def findDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
result = []
for i in nums:
if nums[abs(i)-1] < 0:
result.append(abs(i))
else:
nums[abs(i)-1] *= -1
return result
# Time: O(n)
# Space: O(1)
| Solution |
python | huggingface__transformers | src/transformers/models/granitemoeshared/modeling_granitemoeshared.py | {
"start": 3028,
"end": 4030
} | class ____(nn.Module):
"""
MLP layer for shared experts
Args:
config:
Configuration object with model hyperparameters.
"""
def __init__(self, config: GraniteMoeSharedConfig):
super().__init__()
self.input_size = config.hidden_size
self.hidden_size = config.shared_intermediate_size
self.activation = ACT2FN[config.hidden_act]
self.input_linear = nn.Linear(self.input_size, self.hidden_size * 2, bias=False)
self.output_linear = nn.Linear(self.hidden_size, self.input_size, bias=False)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.input_linear(hidden_states)
chunked_hidden_states = hidden_states.chunk(2, dim=-1)
hidden_states = self.activation(chunked_hidden_states[0]) * chunked_hidden_states[1]
hidden_states = self.output_linear(hidden_states)
return hidden_states
@use_kernel_forward_from_hub("RMSNorm")
| GraniteMoeSharedMLP |
python | pypa__hatch | tests/backend/metadata/test_core.py | {
"start": 41907,
"end": 43189
} | class ____:
def test_dynamic(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"gui-scripts": 9000, "dynamic": ["gui-scripts"]}})
with pytest.raises(
ValueError,
match=(
"Metadata field `gui-scripts` cannot be both statically defined and listed in field `project.dynamic`"
),
):
_ = metadata.core.gui_scripts
def test_not_table(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"gui-scripts": 10}})
with pytest.raises(TypeError, match="Field `project.gui-scripts` must be a table"):
_ = metadata.core.gui_scripts
def test_entry_not_string(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"gui-scripts": {"foo": 7}}})
with pytest.raises(TypeError, match="Object reference `foo` of field `project.gui-scripts` must be a string"):
_ = metadata.core.gui_scripts
def test_correct(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"gui-scripts": {"foo": "bar", "bar": "baz"}}})
assert metadata.core.gui_scripts == metadata.core.gui_scripts == {"bar": "baz", "foo": "bar"}
| TestGUIScripts |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 939971,
"end": 940965
} | class ____(sgqlc.types.Type):
"""Parameters to be used for the repository_name condition"""
__schema__ = github_schema
__field_names__ = ("exclude", "include", "protected")
exclude = sgqlc.types.Field(sgqlc.types.non_null(sgqlc.types.list_of(sgqlc.types.non_null(String))), graphql_name="exclude")
"""Array of repository names or patterns to exclude. The condition
will not pass if any of these patterns match.
"""
include = sgqlc.types.Field(sgqlc.types.non_null(sgqlc.types.list_of(sgqlc.types.non_null(String))), graphql_name="include")
"""Array of repository names or patterns to include. One of these
patterns must match for the condition to pass. Also accepts `~ALL`
to include all repositories.
"""
protected = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="protected")
"""Target changes that match these patterns will be prevented except
by those with bypass permissions.
"""
| RepositoryNameConditionTarget |
python | huggingface__transformers | tests/models/unispeech_sat/test_modeling_unispeech_sat.py | {
"start": 1520,
"end": 13221
} | class ____:
def __init__(
self,
parent,
batch_size=13,
seq_length=1024, # speech is longer
is_training=False,
hidden_size=16,
feat_extract_norm="group",
feat_extract_dropout=0.0,
feat_extract_activation="gelu",
conv_dim=(32, 32, 32),
conv_stride=(4, 4, 4),
conv_kernel=(8, 8, 8),
conv_bias=False,
num_conv_pos_embeddings=16,
num_conv_pos_embedding_groups=2,
num_hidden_layers=2,
num_attention_heads=2,
hidden_dropout_prob=0.1, # this is most likely not correctly set yet
intermediate_size=20,
layer_norm_eps=1e-5,
hidden_act="gelu",
initializer_range=0.02,
mask_time_prob=0.5,
mask_time_length=2,
vocab_size=32,
do_stable_layer_norm=False,
tdnn_dim=(32, 32),
tdnn_kernel=(3, 3),
tdnn_dilation=(1, 1),
xvector_output_dim=32,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.hidden_size = hidden_size
self.feat_extract_norm = feat_extract_norm
self.feat_extract_dropout = feat_extract_dropout
self.feat_extract_activation = feat_extract_activation
self.conv_dim = conv_dim
self.conv_stride = conv_stride
self.conv_kernel = conv_kernel
self.conv_bias = conv_bias
self.num_conv_pos_embeddings = num_conv_pos_embeddings
self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_dropout_prob = hidden_dropout_prob
self.intermediate_size = intermediate_size
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.vocab_size = vocab_size
self.do_stable_layer_norm = do_stable_layer_norm
self.mask_time_prob = mask_time_prob
self.mask_time_length = mask_time_length
self.tdnn_dim = tdnn_dim
self.tdnn_kernel = tdnn_kernel
self.tdnn_dilation = tdnn_dilation
self.xvector_output_dim = xvector_output_dim
self.scope = scope
output_seq_length = self.seq_length
for kernel, stride in zip(self.conv_kernel, self.conv_stride):
output_seq_length = (output_seq_length - (kernel - 1)) / stride
self.output_seq_length = int(math.ceil(output_seq_length))
self.encoder_seq_length = self.output_seq_length
def prepare_config_and_inputs(self):
input_values = floats_tensor([self.batch_size, self.seq_length], scale=1.0)
attention_mask = random_attention_mask([self.batch_size, self.seq_length])
config = self.get_config()
return config, input_values, attention_mask
def get_config(self):
return UniSpeechSatConfig(
hidden_size=self.hidden_size,
feat_extract_norm=self.feat_extract_norm,
feat_extract_dropout=self.feat_extract_dropout,
feat_extract_activation=self.feat_extract_activation,
conv_dim=self.conv_dim,
conv_stride=self.conv_stride,
conv_kernel=self.conv_kernel,
conv_bias=self.conv_bias,
num_conv_pos_embeddings=self.num_conv_pos_embeddings,
num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups,
mask_time_prob=self.mask_time_prob,
mask_time_length=self.mask_time_length,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
hidden_dropout_prob=self.hidden_dropout_prob,
intermediate_size=self.intermediate_size,
layer_norm_eps=self.layer_norm_eps,
hidden_act=self.hidden_act,
initializer_range=self.initializer_range,
vocab_size=self.vocab_size,
tdnn_dim=self.tdnn_dim,
tdnn_kernel=self.tdnn_kernel,
tdnn_dilation=self.tdnn_dilation,
xvector_output_dim=self.xvector_output_dim,
)
def create_and_check_model(self, config, input_values, attention_mask):
model = UniSpeechSatModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_values, attention_mask=attention_mask)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size)
)
def create_and_check_batch_inference(self, config, input_values, *args):
# test does not pass for models making use of `group_norm`
# check: https://github.com/pytorch/fairseq/issues/3227
model = UniSpeechSatModel(config=config)
model.to(torch_device)
model.eval()
input_values = input_values[:3]
attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.bool)
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
attention_mask[i, input_lengths[i] :] = 0.0
batch_outputs = model(input_values, attention_mask=attention_mask).last_hidden_state
for i in range(input_values.shape[0]):
input_slice = input_values[i : i + 1, : input_lengths[i]]
output = model(input_slice).last_hidden_state
batch_output = batch_outputs[i : i + 1, : output.shape[1]]
self.parent.assertTrue(torch.allclose(output, batch_output, atol=1e-3))
def check_ctc_loss(self, config, input_values, *args):
model = UniSpeechSatForCTC(config=config)
model.to(torch_device)
# make sure that dropout is disabled
model.eval()
input_values = input_values[:3]
attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long)
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths))
labels = ids_tensor((input_values.shape[0], min(max_length_labels) - 1), model.config.vocab_size)
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
attention_mask[i, input_lengths[i] :] = 0
model.config.ctc_loss_reduction = "sum"
sum_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item()
model.config.ctc_loss_reduction = "mean"
mean_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item()
self.parent.assertTrue(isinstance(sum_loss, float))
self.parent.assertTrue(isinstance(mean_loss, float))
def check_seq_classifier_loss(self, config, input_values, *args):
model = UniSpeechSatForSequenceClassification(config=config)
model.to(torch_device)
# make sure that dropout is disabled
model.eval()
input_values = input_values[:3]
attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long)
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label))
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
attention_mask[i, input_lengths[i] :] = 0
masked_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item()
unmasked_loss = model(input_values, labels=labels).loss.item()
self.parent.assertTrue(isinstance(masked_loss, float))
self.parent.assertTrue(isinstance(unmasked_loss, float))
self.parent.assertTrue(masked_loss != unmasked_loss)
def check_ctc_training(self, config, input_values, *args):
config.ctc_zero_infinity = True
model = UniSpeechSatForCTC(config=config)
model.to(torch_device)
model.train()
# freeze feature encoder
model.freeze_feature_encoder()
input_values = input_values[:3]
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths))
labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size)
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
if max_length_labels[i] < labels.shape[-1]:
# it's important that we make sure that target lengths are at least
# one shorter than logit lengths to prevent -inf
labels[i, max_length_labels[i] - 1 :] = -100
loss = model(input_values, labels=labels).loss
self.parent.assertFalse(torch.isinf(loss).item())
loss.backward()
def check_seq_classifier_training(self, config, input_values, *args):
config.ctc_zero_infinity = True
model = UniSpeechSatForSequenceClassification(config=config)
model.to(torch_device)
model.train()
# freeze everything but the classification head
model.freeze_base_model()
input_values = input_values[:3]
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label))
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
loss = model(input_values, labels=labels).loss
self.parent.assertFalse(torch.isinf(loss).item())
loss.backward()
def check_xvector_training(self, config, *args):
config.ctc_zero_infinity = True
model = UniSpeechSatForXVector(config=config)
model.to(torch_device)
model.train()
# freeze everything but the classification head
model.freeze_base_model()
# use a longer sequence length to account for TDNN temporal downsampling
input_values = floats_tensor([self.batch_size, self.seq_length * 2], scale=1.0)
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label))
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
loss = model(input_values, labels=labels).loss
self.parent.assertFalse(torch.isinf(loss).item())
loss.backward()
def check_labels_out_of_vocab(self, config, input_values, *args):
model = UniSpeechSatForCTC(config)
model.to(torch_device)
model.train()
input_values = input_values[:3]
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths))
labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size + 100)
with pytest.raises(ValueError):
model(input_values, labels=labels)
def prepare_config_and_inputs_for_common(self):
config, input_values, attention_mask = self.prepare_config_and_inputs()
inputs_dict = {"input_values": input_values, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
| UniSpeechSatModelTester |
python | fastapi__sqlmodel | docs_src/tutorial/create_db_and_table/tutorial001.py | {
"start": 83,
"end": 409
} | class ____(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
secret_name: str
age: Optional[int] = None
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
SQLModel.metadata.create_all(engine)
| Hero |
python | django__django | tests/select_related_onetoone/models.py | {
"start": 601,
"end": 737
} | class ____(models.Model):
base_stats = models.OneToOneField(UserStat, models.CASCADE)
comments = models.IntegerField()
| StatDetails |
python | numpy__numpy | numpy/lib/_version.py | {
"start": 307,
"end": 4783
} | class ____:
"""Parse and compare numpy version strings.
NumPy has the following versioning scheme (numbers given are examples; they
can be > 9 in principle):
- Released version: '1.8.0', '1.8.1', etc.
- Alpha: '1.8.0a1', '1.8.0a2', etc.
- Beta: '1.8.0b1', '1.8.0b2', etc.
- Release candidates: '1.8.0rc1', '1.8.0rc2', etc.
- Development versions: '1.8.0.dev-f1234afa' (git commit hash appended)
- Development versions after a1: '1.8.0a1.dev-f1234afa',
'1.8.0b2.dev-f1234afa', '1.8.1rc1.dev-f1234afa', etc.
- Development versions (no git hash available): '1.8.0.dev-Unknown'
Comparing needs to be done against a valid version string or other
`NumpyVersion` instance. Note that all development versions of the same
(pre-)release compare equal.
Parameters
----------
vstring : str
NumPy version string (``np.__version__``).
Examples
--------
>>> from numpy.lib import NumpyVersion
>>> if NumpyVersion(np.__version__) < '1.7.0':
... print('skip')
>>> # skip
>>> NumpyVersion('1.7') # raises ValueError, add ".0"
Traceback (most recent call last):
...
ValueError: Not a valid numpy version string
"""
__module__ = "numpy.lib"
def __init__(self, vstring):
self.vstring = vstring
ver_main = re.match(r'\d+\.\d+\.\d+', vstring)
if not ver_main:
raise ValueError("Not a valid numpy version string")
self.version = ver_main.group()
self.major, self.minor, self.bugfix = [int(x) for x in
self.version.split('.')]
if len(vstring) == ver_main.end():
self.pre_release = 'final'
else:
alpha = re.match(r'a\d', vstring[ver_main.end():])
beta = re.match(r'b\d', vstring[ver_main.end():])
rc = re.match(r'rc\d', vstring[ver_main.end():])
pre_rel = [m for m in [alpha, beta, rc] if m is not None]
if pre_rel:
self.pre_release = pre_rel[0].group()
else:
self.pre_release = ''
self.is_devversion = bool(re.search(r'.dev', vstring))
def _compare_version(self, other):
"""Compare major.minor.bugfix"""
if self.major == other.major:
if self.minor == other.minor:
if self.bugfix == other.bugfix:
vercmp = 0
elif self.bugfix > other.bugfix:
vercmp = 1
else:
vercmp = -1
elif self.minor > other.minor:
vercmp = 1
else:
vercmp = -1
elif self.major > other.major:
vercmp = 1
else:
vercmp = -1
return vercmp
def _compare_pre_release(self, other):
"""Compare alpha/beta/rc/final."""
if self.pre_release == other.pre_release:
vercmp = 0
elif self.pre_release == 'final':
vercmp = 1
elif other.pre_release == 'final':
vercmp = -1
elif self.pre_release > other.pre_release:
vercmp = 1
else:
vercmp = -1
return vercmp
def _compare(self, other):
if not isinstance(other, (str, NumpyVersion)):
raise ValueError("Invalid object to compare with NumpyVersion.")
if isinstance(other, str):
other = NumpyVersion(other)
vercmp = self._compare_version(other)
if vercmp == 0:
# Same x.y.z version, check for alpha/beta/rc
vercmp = self._compare_pre_release(other)
if vercmp == 0:
# Same version and same pre-release, check if dev version
if self.is_devversion is other.is_devversion:
vercmp = 0
elif self.is_devversion:
vercmp = -1
else:
vercmp = 1
return vercmp
def __lt__(self, other):
return self._compare(other) < 0
def __le__(self, other):
return self._compare(other) <= 0
def __eq__(self, other):
return self._compare(other) == 0
def __ne__(self, other):
return self._compare(other) != 0
def __gt__(self, other):
return self._compare(other) > 0
def __ge__(self, other):
return self._compare(other) >= 0
def __repr__(self):
return f"NumpyVersion({self.vstring})"
| NumpyVersion |
python | getsentry__sentry | src/sentry/search/events/fields.py | {
"start": 2067,
"end": 26786
} | class ____:
def __init__(self, name, alias, expression=None, expression_fn=None, result_type=None):
self.name = name
self.alias = alias
self.expression = expression
self.expression_fn = expression_fn
self.result_type = result_type
self.validate()
def get_expression(self, params) -> list[Any] | None:
if self.expression is not None:
return deepcopy(self.expression)
elif self.expression_fn is not None:
return self.expression_fn(params)
else:
return None
def get_field(self, params=None):
expression = self.get_expression(params)
if expression is not None:
expression.append(self.alias)
return expression
return self.alias
def validate(self) -> None:
assert self.alias is not None, f"{self.name}: alias is required"
assert (
self.expression is None or self.expression_fn is None
), f"{self.name}: only one of expression, expression_fn is allowed"
def project_threshold_config_expression(
organization_id: int | None, project_ids: list[int] | None
) -> list[object]:
"""
This function returns a column with the threshold and threshold metric
for each transaction based on project level settings. If no project level
thresholds are set, the will fallback to the default values. This column
is used in the new `count_miserable` and `user_misery` aggregates.
"""
if organization_id is None or project_ids is None:
raise InvalidSearchQuery("Missing necessary data for project threshold config")
project_threshold_configs = (
ProjectTransactionThreshold.objects.filter(
organization_id=organization_id,
project_id__in=project_ids,
)
.order_by("project_id")
.values("project_id", "threshold", "metric")
)
transaction_threshold_configs = (
ProjectTransactionThresholdOverride.objects.filter(
organization_id=organization_id,
project_id__in=project_ids,
)
.order_by("project_id")
.values("transaction", "project_id", "threshold", "metric")
)
num_project_thresholds = project_threshold_configs.count()
sentry_sdk.set_tag("project_threshold.count", num_project_thresholds)
sentry_sdk.set_tag(
"project_threshold.count.grouped",
format_grouped_length(num_project_thresholds, [10, 100, 250, 500]),
)
set_span_attribute("project_threshold.count", num_project_thresholds)
num_transaction_thresholds = transaction_threshold_configs.count()
sentry_sdk.set_tag("txn_threshold.count", num_transaction_thresholds)
sentry_sdk.set_tag(
"txn_threshold.count.grouped",
format_grouped_length(num_transaction_thresholds, [10, 100, 250, 500]),
)
set_span_attribute("txn_threshold.count", num_transaction_thresholds)
if num_project_thresholds + num_transaction_thresholds == 0:
return ["tuple", [f"'{DEFAULT_PROJECT_THRESHOLD_METRIC}'", DEFAULT_PROJECT_THRESHOLD]]
elif num_project_thresholds + num_transaction_thresholds > MAX_QUERYABLE_TRANSACTION_THRESHOLDS:
raise InvalidSearchQuery(
f"Exceeded {MAX_QUERYABLE_TRANSACTION_THRESHOLDS} configured transaction thresholds limit, try with fewer Projects."
)
project_threshold_config_index = [
"indexOf",
[
[
"array",
[["toUInt64", [config["project_id"]]] for config in project_threshold_configs],
],
"project_id",
],
PROJECT_THRESHOLD_CONFIG_INDEX_ALIAS,
]
project_transaction_override_config_index = [
"indexOf",
[
[
"array",
[
[
"tuple",
[
["toUInt64", [config["project_id"]]],
"'{}'".format(config["transaction"]),
],
]
for config in transaction_threshold_configs
],
],
["tuple", ["project_id", "transaction"]],
],
PROJECT_THRESHOLD_OVERRIDE_CONFIG_INDEX_ALIAS,
]
project_config_query: list[object] = (
[
"if",
[
[
"equals",
[
project_threshold_config_index,
0,
],
],
["tuple", [f"'{DEFAULT_PROJECT_THRESHOLD_METRIC}'", DEFAULT_PROJECT_THRESHOLD]],
[
"arrayElement",
[
[
"array",
[
[
"tuple",
[
"'{}'".format(TRANSACTION_METRICS[config["metric"]]),
config["threshold"],
],
]
for config in project_threshold_configs
],
],
project_threshold_config_index,
],
],
],
]
if project_threshold_configs
else ["tuple", [f"'{DEFAULT_PROJECT_THRESHOLD_METRIC}'", DEFAULT_PROJECT_THRESHOLD]]
)
if transaction_threshold_configs:
return [
"if",
[
[
"equals",
[
project_transaction_override_config_index,
0,
],
],
project_config_query,
[
"arrayElement",
[
[
"array",
[
[
"tuple",
[
"'{}'".format(TRANSACTION_METRICS[config["metric"]]),
config["threshold"],
],
]
for config in transaction_threshold_configs
],
],
project_transaction_override_config_index,
],
],
],
]
return project_config_query
def team_key_transaction_expression(organization_id, team_ids, project_ids):
if organization_id is None or team_ids is None or project_ids is None:
raise TypeError("Team key transactions parameters cannot be None")
team_key_transactions = (
TeamKeyTransaction.objects.filter(
organization_id=organization_id,
project_team__in=ProjectTeam.objects.filter(
project_id__in=project_ids, team_id__in=team_ids
),
)
.order_by("transaction", "project_team__project_id")
.values("transaction", "project_team__project_id")
.distinct("transaction", "project_team__project_id")[:MAX_QUERYABLE_TEAM_KEY_TRANSACTIONS]
)
count = len(team_key_transactions)
# NOTE: this raw count is not 100% accurate because if it exceeds
# `MAX_QUERYABLE_TEAM_KEY_TRANSACTIONS`, it will not be reflected
sentry_sdk.set_tag("team_key_txns.count", count)
sentry_sdk.set_tag(
"team_key_txns.count.grouped", format_grouped_length(count, [10, 100, 250, 500])
)
set_span_attribute("team_key_txns.count", count)
# There are no team key transactions marked, so hard code false into the query.
if count == 0:
return ["toInt8", [0]]
return [
"in",
[
["tuple", ["project_id", "transaction"]],
[
"tuple",
[
[
"tuple",
[
transaction["project_team__project_id"],
"'{}'".format(transaction["transaction"]),
],
]
for transaction in team_key_transactions
],
],
],
]
def normalize_count_if_condition(args: Mapping[str, str]) -> float | str | int:
"""Ensures that the condition is compatible with the column type"""
column = args["column"]
condition = args["condition"]
if column in ARRAY_FIELDS and condition not in ["equals", "notEquals"]:
raise InvalidSearchQuery(f"{condition} is not supported by count_if for {column}")
return condition
def normalize_count_if_value(args: Mapping[str, str]) -> float | str | int:
"""Ensures that the type of the third parameter is compatible with the first
and cast the value if needed
eg. duration = numeric_value, and not duration = string_value
"""
column = args["column"]
value = args["value"]
normalized_value: float | str | int
if (
column == "transaction.duration"
or is_duration_measurement(column)
or is_span_op_breakdown(column)
):
duration_match = DURATION_PATTERN.match(value.strip("'"))
if duration_match:
try:
normalized_value = parse_duration(*duration_match.groups())
except InvalidQuery as exc:
raise InvalidSearchQuery(str(exc))
else:
try:
normalized_value = float(value.strip("'"))
except Exception:
raise InvalidSearchQuery(f"{value} is not a valid value to compare with {column}")
# The non duration measurement
elif column == "measurements.cls":
try:
normalized_value = float(value)
except Exception:
raise InvalidSearchQuery(f"{value} is not a valid value to compare with {column}")
elif column == "transaction.status":
code = SPAN_STATUS_NAME_TO_CODE.get(value.strip("'"))
if code is None:
raise InvalidSearchQuery(f"{value} is not a valid value for transaction.status")
try:
normalized_value = int(code)
except Exception:
raise InvalidSearchQuery(f"{value} is not a valid value for transaction.status")
# TODO: not supporting field aliases
elif column in FIELD_ALIASES:
raise InvalidSearchQuery(f"{column} is not supported by count_if")
else:
normalized_value = value
return normalized_value
# When updating this list, also check if the following need to be updated:
# - convert_search_filter_to_snuba_query (otherwise aliased field will be treated as tag)
# - static/app/utils/discover/fields.tsx FIELDS (for discover column list and search box autocomplete)
# TODO: I think I have to support the release stage alias here maybe?
FIELD_ALIASES = {
field.name: field
for field in [
PseudoField("project", "project.id"),
PseudoField("issue", "issue.id"),
PseudoField(
"timestamp.to_hour", "timestamp.to_hour", expression=["toStartOfHour", ["timestamp"]]
),
PseudoField(
"timestamp.to_day", "timestamp.to_day", expression=["toStartOfDay", ["timestamp"]]
),
PseudoField(ERROR_UNHANDLED_ALIAS, ERROR_UNHANDLED_ALIAS, expression=["notHandled", []]),
PseudoField(
USER_DISPLAY_ALIAS,
USER_DISPLAY_ALIAS,
expression=["coalesce", ["user.email", "user.username", "user.id", "user.ip"]],
),
PseudoField(
PROJECT_THRESHOLD_CONFIG_ALIAS,
PROJECT_THRESHOLD_CONFIG_ALIAS,
expression_fn=lambda params: project_threshold_config_expression(
params.get("organization_id"),
params.get("project_id"),
),
),
# the team key transaction field is intentionally not added to the discover/fields list yet
# because there needs to be some work on the front end to integrate this into discover
PseudoField(
TEAM_KEY_TRANSACTION_ALIAS,
TEAM_KEY_TRANSACTION_ALIAS,
expression_fn=lambda params: team_key_transaction_expression(
params.get("organization_id"),
params.get("team_id"),
params.get("project_id"),
),
result_type="boolean",
),
PseudoField(
MEASUREMENTS_FRAMES_SLOW_RATE,
MEASUREMENTS_FRAMES_SLOW_RATE,
expression=[
"if",
[
["greater", ["measurements.frames_total", 0]],
["divide", ["measurements.frames_slow", "measurements.frames_total"]],
None,
],
],
result_type="percentage",
),
PseudoField(
MEASUREMENTS_FRAMES_FROZEN_RATE,
MEASUREMENTS_FRAMES_FROZEN_RATE,
expression=[
"if",
[
["greater", ["measurements.frames_total", 0]],
["divide", ["measurements.frames_frozen", "measurements.frames_total"]],
None,
],
],
result_type="percentage",
),
PseudoField(
MEASUREMENTS_STALL_PERCENTAGE,
MEASUREMENTS_STALL_PERCENTAGE,
expression=[
"if",
[
["greater", ["transaction.duration", 0]],
["divide", ["measurements.stall_total_time", "transaction.duration"]],
None,
],
],
result_type="percentage",
),
]
}
def format_column_arguments(column_args, arguments) -> None:
for i in range(len(column_args)):
if isinstance(column_args[i], (list, tuple)):
if isinstance(column_args[i][0], ArgValue):
column_args[i][0] = arguments[column_args[i][0].arg]
format_column_arguments(column_args[i][1], arguments)
elif isinstance(column_args[i], str):
column_args[i] = column_args[i].format(**arguments)
elif isinstance(column_args[i], ArgValue):
column_args[i] = arguments[column_args[i].arg]
def _lookback(columns, j, string):
"""For parse_arguments, check that the current character is preceeded by string"""
if j < len(string):
return False
return columns[j - len(string) : j] == string
def parse_arguments(_function: str, columns: str) -> list[str]:
"""
Some functions take a quoted string for their arguments that may contain commas,
which requires special handling.
This function attempts to be identical with the similarly named parse_arguments
found in static/app/utils/discover/fields.tsx
"""
args = []
quoted = False
in_tag = False
escaped = False
i, j = 0, 0
while j < len(columns):
if not in_tag and i == j and columns[j] == '"':
# when we see a quote at the beginning of
# an argument, then this is a quoted string
quoted = True
elif not quoted and columns[j] == "[" and _lookback(columns, j, "tags"):
# when the argument begins with tags[,
# then this is the beginning of the tag that may contain commas
in_tag = True
elif i == j and columns[j] == " ":
# argument has leading spaces, skip over them
i += 1
elif quoted and not escaped and columns[j] == "\\":
# when we see a slash inside a quoted string,
# the next character is an escape character
escaped = True
elif quoted and not escaped and columns[j] == '"':
# when we see a non-escaped quote while inside
# of a quoted string, we should end it
quoted = False
elif in_tag and not escaped and columns[j] == "]":
# when we see a non-escaped quote while inside
# of a quoted string, we should end it
in_tag = False
elif quoted and escaped:
# when we are inside a quoted string and have
# begun an escape character, we should end it
escaped = False
elif (quoted or in_tag) and columns[j] == ",":
# when we are inside a quoted string or tag and see
# a comma, it should not be considered an
# argument separator
pass
elif columns[j] == ",":
# when we see a comma outside of a quoted string
# it is an argument separator
args.append(columns[i:j].strip())
i = j + 1
j += 1
if i != j:
# add in the last argument if any
args.append(columns[i:].strip())
return [arg for arg in args if arg]
def resolve_field(field, params=None, functions_acl=None):
if not isinstance(field, str):
raise InvalidSearchQuery("Field names must be strings")
match = is_function(field)
if match:
return resolve_function(field, match, params, functions_acl)
if field in FIELD_ALIASES:
special_field = FIELD_ALIASES[field]
return ResolvedFunction(None, special_field.get_field(params), None)
tag_match = TAG_KEY_RE.search(field)
tag_field = tag_match.group("tag") if tag_match else field
if VALID_FIELD_PATTERN.match(tag_field):
return ResolvedFunction(None, field, None)
else:
raise InvalidSearchQuery(f"Invalid characters in field {field}")
def resolve_function(field, match=None, params=None, functions_acl=False):
if params is not None and field in params.get("aliases", {}):
alias = params["aliases"][field]
return ResolvedFunction(
FunctionDetails(field, FUNCTIONS["percentage"], {}),
None,
alias.aggregate,
)
function_name, columns, alias = parse_function(field, match)
function = FUNCTIONS[function_name]
if not function.is_accessible(functions_acl):
raise InvalidSearchQuery(f"{function.name}: no access to private function")
arguments = function.format_as_arguments(field, columns, params)
details = FunctionDetails(field, function, arguments)
if function.transform is not None:
snuba_string = function.transform.format(**arguments)
if alias is None:
alias = get_function_alias_with_columns(function.name, columns)
return ResolvedFunction(
details,
None,
[snuba_string, None, alias],
)
elif function.conditional_transform is not None:
condition, match, fallback = function.conditional_transform
if alias is None:
alias = get_function_alias_with_columns(function.name, columns)
if arguments[condition.arg] is not None:
snuba_string = match.format(**arguments)
else:
snuba_string = fallback.format(**arguments)
return ResolvedFunction(
details,
None,
[snuba_string, None, alias],
)
elif function.aggregate is not None:
aggregate = deepcopy(function.aggregate)
aggregate[0] = aggregate[0].format(**arguments)
if isinstance(aggregate[1], (list, tuple)):
format_column_arguments(aggregate[1], arguments)
elif isinstance(aggregate[1], ArgValue):
arg = aggregate[1].arg
# The aggregate function has only a single argument
# however that argument is an expression, so we have
# to make sure to nest it so it doesn't get treated
# as a list of arguments by snuba.
if isinstance(arguments[arg], (list, tuple)):
aggregate[1] = [arguments[arg]]
else:
aggregate[1] = arguments[arg]
if alias is not None:
aggregate[2] = alias
elif aggregate[2] is None:
aggregate[2] = get_function_alias_with_columns(function.name, columns)
return ResolvedFunction(details, None, aggregate)
elif function.column is not None:
# These can be very nested functions, so we need to iterate through all the layers
addition = deepcopy(function.column)
addition[0] = addition[0].format(**arguments)
if isinstance(addition[1], (list, tuple)):
format_column_arguments(addition[1], arguments)
if len(addition) < 3:
if alias is not None:
addition.append(alias)
else:
addition.append(get_function_alias_with_columns(function.name, columns))
elif len(addition) == 3:
if alias is not None:
addition[2] = alias
elif addition[2] is None:
addition[2] = get_function_alias_with_columns(function.name, columns)
else:
addition[2] = addition[2].format(**arguments)
return ResolvedFunction(details, addition, None)
def parse_combinator(function: str) -> tuple[str, str | None]:
for combinator in COMBINATORS:
kind = combinator.kind
if function.endswith(kind):
return function[: -len(kind)], kind
return function, None
def parse_function(field, match=None, err_msg=None):
if not match:
match = is_function(field)
if not match or match.group("function") not in FUNCTIONS:
if err_msg is None:
err_msg = f"{field} is not a valid function"
raise InvalidSearchQuery(err_msg)
function = match.group("function")
return (
function,
parse_arguments(function, match.group("columns")),
match.group("alias"),
)
def is_function(field: str) -> Match[str] | None:
return FUNCTION_PATTERN.search(field)
def is_typed_numeric_tag(key: str) -> bool:
match = TYPED_TAG_KEY_RE.search(key)
if match and match.group("type") == "number":
return True
return False
def get_function_alias(field: str) -> str:
match = FUNCTION_PATTERN.search(field)
if match is None:
return field
if match.group("alias") is not None:
return match.group("alias")
function = match.group("function")
columns = parse_arguments(function, match.group("columns"))
return get_function_alias_with_columns(function, columns)
def get_function_alias_with_columns(function_name, columns, prefix=None) -> str:
columns = re.sub(
r"[^\w]",
"_",
"_".join(
# Encode to ascii with backslashreplace so unicode chars become \u1234, then decode cause encode gives bytes
str(col).encode("ascii", errors="backslashreplace").decode()
for col in columns
),
)
alias = f"{function_name}_{columns}".rstrip("_")
if prefix:
alias = prefix + alias
return alias
def get_json_meta_type(field_alias, snuba_type, builder=None):
if builder:
function = builder.function_alias_map.get(field_alias)
else:
function = None
alias_definition = FIELD_ALIASES.get(field_alias)
if alias_definition and alias_definition.result_type is not None:
return alias_definition.result_type
snuba_json = get_json_type(snuba_type)
if snuba_json not in ["string", "null"]:
if function is not None:
result_type = function.instance.get_result_type(function.field, function.arguments)
if result_type is not None:
return result_type
function_match = FUNCTION_ALIAS_PATTERN.match(field_alias)
if function_match:
function_definition = FUNCTIONS.get(function_match.group(1))
if function_definition:
result_type = function_definition.get_result_type()
if result_type is not None:
return result_type
if field_alias == "transaction.status":
return "string"
# The builder will have Custom Measurement info etc.
field_type = builder.get_field_type(field_alias)
if field_type is not None:
return field_type
return snuba_json
def reflective_result_type(index=0):
def result_type_fn(function_arguments, parameter_values):
argument = function_arguments[index]
value = parameter_values[argument.name]
return argument.get_type(value)
return result_type_fn
| PseudoField |
python | pydantic__pydantic | tests/test_forward_ref.py | {
"start": 40715,
"end": 42770
} | class ____[T](BaseModel):
type T = int
t: 'T'
"""
)
# 'T' should resolve to the `TypeAliasType` instance, not the type variable:
assert mod_1.Model[int].model_fields['t'].annotation.__value__ is int
@pytest.mark.skipif(sys.version_info < (3, 12), reason='Test related to PEP 695 syntax.')
def test_annotation_scope_skipped(create_module) -> None:
# Documentation:
# https://docs.python.org/3/reference/executionmodel.html#annotation-scopes
# https://docs.python.org/3/reference/compound_stmts.html#generic-classes
# Under the hood, `parent_frame_namespace` skips the annotation scope so that
# we still properly fetch the namespace of `func` containing `Alias`.
mod_1 = create_module(
"""
from pydantic import BaseModel
def func() -> None:
Alias = int
class Model[T](BaseModel):
a: 'Alias'
return Model
Model = func()
"""
)
assert mod_1.Model.model_fields['a'].annotation is int
@pytest.mark.skipif(
platform.python_implementation() == 'PyPy' and sys.version_info < (3, 11),
reason='Flaky on PyPy',
)
def test_implicit_type_alias_recursive_error_message() -> None:
Json = list['Json']
with pytest.raises(RecursionError, match='.*If you made use of an implicit recursive type alias.*'):
TypeAdapter(Json)
def test_none_converted_as_none_type() -> None:
"""https://github.com/pydantic/pydantic/issues/12368.
In Python 3.14, `None` was not converted as `type(None)` by `typing._eval_type()`.
"""
class Model(BaseModel):
a: 'None' = None
assert Model.model_fields['a'].annotation is type(None)
assert Model(a=None).a is None
def test_typeddict_parent_from_other_module(create_module) -> None:
"""https://github.com/pydantic/pydantic/issues/12421."""
@create_module
def mod_1():
from typing_extensions import TypedDict
Int = int
class Base(TypedDict):
f: 'Int'
mod_2 = create_module(
f"""
from {mod_1.__name__} import Base
| Model |
python | pydantic__pydantic | pydantic/plugin/__init__.py | {
"start": 818,
"end": 1085
} | class ____(NamedTuple):
"""Path defining where `schema_type` was defined, or where `TypeAdapter` was called."""
module: str
name: str
SchemaKind: TypeAlias = Literal['BaseModel', 'TypeAdapter', 'dataclass', 'create_model', 'validate_call']
| SchemaTypePath |
python | conda__conda | conda/exceptions.py | {
"start": 12483,
"end": 12687
} | class ____(CondaError):
def __init__(self, location: PathType):
message = "Not a conda environment: %(location)s"
super().__init__(message, location=location)
| EnvironmentLocationNotFound |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyupgrade/UP046_0.py | {
"start": 379,
"end": 413
} | class ____(Generic[P]):
var: P
| C |
python | django__django | tests/auth_tests/test_views.py | {
"start": 57114,
"end": 68590
} | class ____(MessagesTestMixin, AuthViewsTestCase):
@classmethod
def setUpTestData(cls):
super().setUpTestData()
# Make me a superuser before logging in.
User.objects.filter(username="testclient").update(
is_staff=True, is_superuser=True
)
def setUp(self):
self.login()
# Get the latest last_login value.
self.admin = User.objects.get(pk=self.u1.pk)
def get_user_data(self, user):
return {
"username": user.username,
"password": user.password,
"email": user.email,
"is_active": user.is_active,
"is_staff": user.is_staff,
"is_superuser": user.is_superuser,
"last_login_0": user.last_login.strftime("%Y-%m-%d"),
"last_login_1": user.last_login.strftime("%H:%M:%S"),
"initial-last_login_0": user.last_login.strftime("%Y-%m-%d"),
"initial-last_login_1": user.last_login.strftime("%H:%M:%S"),
"date_joined_0": user.date_joined.strftime("%Y-%m-%d"),
"date_joined_1": user.date_joined.strftime("%H:%M:%S"),
"initial-date_joined_0": user.date_joined.strftime("%Y-%m-%d"),
"initial-date_joined_1": user.date_joined.strftime("%H:%M:%S"),
"first_name": user.first_name,
"last_name": user.last_name,
}
# #20078 - users shouldn't be allowed to guess password hashes via
# repeated password__startswith queries.
def test_changelist_disallows_password_lookups(self):
# A lookup that tries to filter on password isn't OK
with self.assertLogs("django.security.DisallowedModelAdminLookup", "ERROR"):
response = self.client.get(
reverse("auth_test_admin:auth_user_changelist")
+ "?password__startswith=sha1$"
)
self.assertEqual(response.status_code, 400)
def test_user_change_email(self):
data = self.get_user_data(self.admin)
data["email"] = "new_" + data["email"]
response = self.client.post(
reverse("auth_test_admin:auth_user_change", args=(self.admin.pk,)), data
)
self.assertRedirects(response, reverse("auth_test_admin:auth_user_changelist"))
row = LogEntry.objects.latest("id")
self.assertEqual(row.get_change_message(), "Changed Email address.")
def test_user_not_change(self):
response = self.client.post(
reverse("auth_test_admin:auth_user_change", args=(self.admin.pk,)),
self.get_user_data(self.admin),
)
self.assertRedirects(response, reverse("auth_test_admin:auth_user_changelist"))
row = LogEntry.objects.latest("id")
self.assertEqual(row.get_change_message(), "No fields changed.")
def test_user_with_usable_password_change_password(self):
user_change_url = reverse(
"auth_test_admin:auth_user_change", args=(self.admin.pk,)
)
password_change_url = reverse(
"auth_test_admin:auth_user_password_change", args=(self.admin.pk,)
)
response = self.client.get(user_change_url)
# Test the link inside password field help_text.
rel_link = re.search(
r'<a role="button" class="button" href="([^"]*)">Reset password</a>',
response.text,
)[1]
self.assertEqual(urljoin(user_change_url, rel_link), password_change_url)
response = self.client.get(password_change_url)
# Test the form title with original (usable) password
self.assertContains(
response, f"<h1>Change password: {self.admin.username}</h1>"
)
# Breadcrumb.
self.assertContains(
response,
f'{self.admin.username}</a></li>\n<li aria-current="page">'
"Change password</li>",
)
# Usable password field.
self.assertContains(
response,
'<fieldset class="flex-container">'
"<legend>Password-based authentication:</legend>",
)
# Submit buttons
self.assertContains(response, '<input type="submit" name="set-password"')
self.assertContains(response, '<input type="submit" name="unset-password"')
# Password change.
response = self.client.post(
password_change_url,
{
"password1": "password1",
"password2": "password1",
},
)
self.assertRedirects(response, user_change_url)
self.assertMessages(
response, [Message(level=25, message="Password changed successfully.")]
)
row = LogEntry.objects.latest("id")
self.assertEqual(row.get_change_message(), "Changed password.")
self.logout()
self.login(password="password1")
# Disable password-based authentication without proper submit button.
response = self.client.post(
password_change_url,
{
"password1": "password1",
"password2": "password1",
"usable_password": "false",
},
)
self.assertRedirects(response, password_change_url)
self.assertMessages(
response,
[
Message(
level=40,
message="Conflicting form data submitted. Please try again.",
)
],
)
# No password change yet.
self.login(password="password1")
# Disable password-based authentication with proper submit button.
response = self.client.post(
password_change_url,
{
"password1": "password1",
"password2": "password1",
"usable_password": "false",
"unset-password": 1,
},
)
self.assertRedirects(response, user_change_url)
self.assertMessages(
response,
[Message(level=25, message="Password-based authentication was disabled.")],
)
row = LogEntry.objects.latest("id")
self.assertEqual(row.get_change_message(), "Changed password.")
self.logout()
# Password-based authentication was disabled.
with self.assertRaises(AssertionError):
self.login(password="password1")
self.admin.refresh_from_db()
self.assertIs(self.admin.has_usable_password(), False)
def test_user_with_unusable_password_change_password(self):
# Test for title with unusable password with a test user
test_user = User.objects.get(email="staffmember@example.com")
test_user.set_unusable_password()
test_user.save()
user_change_url = reverse(
"auth_test_admin:auth_user_change", args=(test_user.pk,)
)
password_change_url = reverse(
"auth_test_admin:auth_user_password_change", args=(test_user.pk,)
)
response = self.client.get(user_change_url)
# Test the link inside password field help_text.
rel_link = re.search(
r'<a role="button" class="button" href="([^"]*)">Set password</a>',
response.text,
)[1]
self.assertEqual(urljoin(user_change_url, rel_link), password_change_url)
response = self.client.get(password_change_url)
# Test the form title with original (usable) password
self.assertContains(response, f"<h1>Set password: {test_user.username}</h1>")
# Breadcrumb.
self.assertContains(
response,
f'{test_user.username}</a></li>\n<li aria-current="page">'
"Set password</li>",
)
# Submit buttons
self.assertContains(response, '<input type="submit" name="set-password"')
self.assertNotContains(response, '<input type="submit" name="unset-password"')
response = self.client.post(
password_change_url,
{
"password1": "password1",
"password2": "password1",
},
)
self.assertRedirects(response, user_change_url)
self.assertMessages(
response, [Message(level=25, message="Password changed successfully.")]
)
row = LogEntry.objects.latest("id")
self.assertEqual(row.get_change_message(), "Changed password.")
def test_user_change_different_user_password(self):
u = User.objects.get(email="staffmember@example.com")
response = self.client.post(
reverse("auth_test_admin:auth_user_password_change", args=(u.pk,)),
{
"password1": "password1",
"password2": "password1",
},
)
self.assertRedirects(
response, reverse("auth_test_admin:auth_user_change", args=(u.pk,))
)
row = LogEntry.objects.latest("id")
self.assertEqual(row.user_id, self.admin.pk)
self.assertEqual(row.object_id, str(u.pk))
self.assertEqual(row.get_change_message(), "Changed password.")
def test_password_change_bad_url(self):
response = self.client.get(
reverse("auth_test_admin:auth_user_password_change", args=("foobar",))
)
self.assertEqual(response.status_code, 404)
@mock.patch("django.contrib.auth.admin.UserAdmin.has_change_permission")
def test_user_change_password_passes_user_to_has_change_permission(
self, has_change_permission
):
url = reverse(
"auth_test_admin:auth_user_password_change", args=(self.admin.pk,)
)
self.client.post(url, {"password1": "password1", "password2": "password1"})
(_request, user), _kwargs = has_change_permission.call_args
self.assertEqual(user.pk, self.admin.pk)
def test_view_user_password_is_readonly(self):
u = User.objects.get(username="testclient")
u.is_superuser = False
u.save()
original_password = u.password
u.user_permissions.add(get_perm(User, "view_user"))
response = self.client.get(
reverse("auth_test_admin:auth_user_change", args=(u.pk,)),
)
algo, salt, hash_string = u.password.split("$")
self.assertContains(response, '<div class="readonly">testclient</div>')
# The password value is hashed.
self.assertContains(
response,
"<strong>algorithm</strong>: <bdi>%s</bdi>\n\n"
"<strong>salt</strong>: <bdi>%s********************</bdi>\n\n"
"<strong>hash</strong>: <bdi>%s**************************</bdi>\n\n"
% (
algo,
salt[:2],
hash_string[:6],
),
html=True,
)
self.assertNotContains(
response,
'<a role="button" class="button" href="../password/">Reset password</a>',
)
# Value in POST data is ignored.
data = self.get_user_data(u)
data["password"] = "shouldnotchange"
change_url = reverse("auth_test_admin:auth_user_change", args=(u.pk,))
response = self.client.post(change_url, data)
self.assertEqual(response.status_code, 403)
u.refresh_from_db()
self.assertEqual(u.password, original_password)
@override_settings(
AUTH_USER_MODEL="auth_tests.UUIDUser",
ROOT_URLCONF="auth_tests.urls_custom_user_admin",
)
| ChangelistTests |
python | pandas-dev__pandas | pandas/tests/extension/test_string.py | {
"start": 10335,
"end": 11021
} | class ____(base.Dim2CompatTests):
@pytest.fixture(autouse=True)
def arrow_not_supported(self, data):
if isinstance(data, ArrowStringArray):
pytest.skip(reason="2D support not implemented for ArrowStringArray")
def test_searchsorted_with_na_raises(data_for_sorting, as_series):
# GH50447
b, c, a = data_for_sorting
arr = data_for_sorting.take([2, 0, 1]) # to get [a, b, c]
arr[-1] = pd.NA
if as_series:
arr = pd.Series(arr)
msg = (
"searchsorted requires array to be sorted, "
"which is impossible with NAs present."
)
with pytest.raises(ValueError, match=msg):
arr.searchsorted(b)
| Test2DCompat |
python | sympy__sympy | sympy/physics/mechanics/system.py | {
"start": 1230,
"end": 40985
} | class ____(_Methods):
"""Class to define a multibody system and form its equations of motion.
Explanation
===========
A ``System`` instance stores the different objects associated with a model,
including bodies, joints, constraints, and other relevant information. With
all the relationships between components defined, the ``System`` can be used
to form the equations of motion using a backend, such as ``KanesMethod``.
The ``System`` has been designed to be compatible with third-party
libraries for greater flexibility and integration with other tools.
Attributes
==========
frame : ReferenceFrame
Inertial reference frame of the system.
fixed_point : Point
A fixed point in the inertial reference frame.
x : Vector
Unit vector fixed in the inertial reference frame.
y : Vector
Unit vector fixed in the inertial reference frame.
z : Vector
Unit vector fixed in the inertial reference frame.
q : ImmutableMatrix
Matrix of all the generalized coordinates, i.e. the independent
generalized coordinates stacked upon the dependent.
u : ImmutableMatrix
Matrix of all the generalized speeds, i.e. the independent generealized
speeds stacked upon the dependent.
q_ind : ImmutableMatrix
Matrix of the independent generalized coordinates.
q_dep : ImmutableMatrix
Matrix of the dependent generalized coordinates.
u_ind : ImmutableMatrix
Matrix of the independent generalized speeds.
u_dep : ImmutableMatrix
Matrix of the dependent generalized speeds.
u_aux : ImmutableMatrix
Matrix of auxiliary generalized speeds.
kdes : ImmutableMatrix
Matrix of the kinematical differential equations as expressions equated
to the zero matrix.
bodies : tuple of BodyBase subclasses
Tuple of all bodies that make up the system.
joints : tuple of Joint
Tuple of all joints that connect bodies in the system.
loads : tuple of LoadBase subclasses
Tuple of all loads that have been applied to the system.
actuators : tuple of ActuatorBase subclasses
Tuple of all actuators present in the system.
holonomic_constraints : ImmutableMatrix
Matrix with the holonomic constraints as expressions equated to the zero
matrix.
nonholonomic_constraints : ImmutableMatrix
Matrix with the nonholonomic constraints as expressions equated to the
zero matrix.
velocity_constraints : ImmutableMatrix
Matrix with the velocity constraints as expressions equated to the zero
matrix. These are by default derived as the time derivatives of the
holonomic constraints extended with the nonholonomic constraints.
eom_method : subclass of KanesMethod or LagrangesMethod
Backend for forming the equations of motion.
Examples
========
In the example below a cart with a pendulum is created. The cart moves along
the x axis of the rail and the pendulum rotates about the z axis. The length
of the pendulum is ``l`` with the pendulum represented as a particle. To
move the cart a time dependent force ``F`` is applied to the cart.
We first need to import some functions and create some of our variables.
>>> from sympy import symbols, simplify
>>> from sympy.physics.mechanics import (
... mechanics_printing, dynamicsymbols, RigidBody, Particle,
... ReferenceFrame, PrismaticJoint, PinJoint, System)
>>> mechanics_printing(pretty_print=False)
>>> g, l = symbols('g l')
>>> F = dynamicsymbols('F')
The next step is to create bodies. It is also useful to create a frame for
locating the particle with respect to the pin joint later on, as a particle
does not have a body-fixed frame.
>>> rail = RigidBody('rail')
>>> cart = RigidBody('cart')
>>> bob = Particle('bob')
>>> bob_frame = ReferenceFrame('bob_frame')
Initialize the system, with the rail as the Newtonian reference. The body is
also automatically added to the system.
>>> system = System.from_newtonian(rail)
>>> print(system.bodies[0])
rail
Create the joints, while immediately also adding them to the system.
>>> system.add_joints(
... PrismaticJoint('slider', rail, cart, joint_axis=rail.x),
... PinJoint('pin', cart, bob, joint_axis=cart.z,
... child_interframe=bob_frame,
... child_point=l * bob_frame.y)
... )
>>> system.joints
(PrismaticJoint: slider parent: rail child: cart,
PinJoint: pin parent: cart child: bob)
While adding the joints, the associated generalized coordinates, generalized
speeds, kinematic differential equations and bodies are also added to the
system.
>>> system.q
Matrix([
[q_slider],
[ q_pin]])
>>> system.u
Matrix([
[u_slider],
[ u_pin]])
>>> system.kdes
Matrix([
[u_slider - q_slider'],
[ u_pin - q_pin']])
>>> [body.name for body in system.bodies]
['rail', 'cart', 'bob']
With the kinematics established, we can now apply gravity and the cart force
``F``.
>>> system.apply_uniform_gravity(-g * system.y)
>>> system.add_loads((cart.masscenter, F * rail.x))
>>> system.loads
((rail_masscenter, - g*rail_mass*rail_frame.y),
(cart_masscenter, - cart_mass*g*rail_frame.y),
(bob_masscenter, - bob_mass*g*rail_frame.y),
(cart_masscenter, F*rail_frame.x))
With the entire system defined, we can now form the equations of motion.
Before forming the equations of motion, one can also run some checks that
will try to identify some common errors.
>>> system.validate_system()
>>> system.form_eoms()
Matrix([
[bob_mass*l*u_pin**2*sin(q_pin) - bob_mass*l*cos(q_pin)*u_pin'
- (bob_mass + cart_mass)*u_slider' + F],
[ -bob_mass*g*l*sin(q_pin) - bob_mass*l**2*u_pin'
- bob_mass*l*cos(q_pin)*u_slider']])
>>> simplify(system.mass_matrix)
Matrix([
[ bob_mass + cart_mass, bob_mass*l*cos(q_pin)],
[bob_mass*l*cos(q_pin), bob_mass*l**2]])
>>> system.forcing
Matrix([
[bob_mass*l*u_pin**2*sin(q_pin) + F],
[ -bob_mass*g*l*sin(q_pin)]])
The complexity of the above example can be increased if we add a constraint
to prevent the particle from moving in the horizontal (x) direction. This
can be done by adding a holonomic constraint. After which we should also
redefine what our (in)dependent generalized coordinates and speeds are.
>>> system.add_holonomic_constraints(
... bob.masscenter.pos_from(rail.masscenter).dot(system.x)
... )
>>> system.q_ind = system.get_joint('pin').coordinates
>>> system.q_dep = system.get_joint('slider').coordinates
>>> system.u_ind = system.get_joint('pin').speeds
>>> system.u_dep = system.get_joint('slider').speeds
With the updated system the equations of motion can be formed again.
>>> system.validate_system()
>>> system.form_eoms()
Matrix([[-bob_mass*g*l*sin(q_pin)
- bob_mass*l**2*u_pin'
- bob_mass*l*cos(q_pin)*u_slider'
- l*(bob_mass*l*u_pin**2*sin(q_pin)
- bob_mass*l*cos(q_pin)*u_pin'
- (bob_mass + cart_mass)*u_slider')*cos(q_pin)
- l*F*cos(q_pin)]])
>>> simplify(system.mass_matrix)
Matrix([
[bob_mass*l**2*sin(q_pin)**2, -cart_mass*l*cos(q_pin)],
[ l*cos(q_pin), 1]])
>>> simplify(system.forcing)
Matrix([
[-l*(bob_mass*g*sin(q_pin) + bob_mass*l*u_pin**2*sin(2*q_pin)/2
+ F*cos(q_pin))],
[
l*u_pin**2*sin(q_pin)]])
"""
def __init__(self, frame=None, fixed_point=None):
"""Initialize the system.
Parameters
==========
frame : ReferenceFrame, optional
The inertial frame of the system. If none is supplied, a new frame
will be created.
fixed_point : Point, optional
A fixed point in the inertial reference frame. If none is supplied,
a new fixed_point will be created.
"""
if frame is None:
frame = ReferenceFrame('inertial_frame')
elif not isinstance(frame, ReferenceFrame):
raise TypeError('Frame must be an instance of ReferenceFrame.')
self._frame = frame
if fixed_point is None:
fixed_point = Point('inertial_point')
elif not isinstance(fixed_point, Point):
raise TypeError('Fixed point must be an instance of Point.')
self._fixed_point = fixed_point
self._fixed_point.set_vel(self._frame, 0)
self._q_ind = ImmutableMatrix(1, 0, []).T
self._q_dep = ImmutableMatrix(1, 0, []).T
self._u_ind = ImmutableMatrix(1, 0, []).T
self._u_dep = ImmutableMatrix(1, 0, []).T
self._u_aux = ImmutableMatrix(1, 0, []).T
self._kdes = ImmutableMatrix(1, 0, []).T
self._hol_coneqs = ImmutableMatrix(1, 0, []).T
self._nonhol_coneqs = ImmutableMatrix(1, 0, []).T
self._vel_constrs = None
self._bodies = []
self._joints = []
self._loads = []
self._actuators = []
self._eom_method = None
@classmethod
def from_newtonian(cls, newtonian):
"""Constructs the system with respect to a Newtonian body."""
if isinstance(newtonian, Particle):
raise TypeError('A Particle has no frame so cannot act as '
'the Newtonian.')
system = cls(frame=newtonian.frame, fixed_point=newtonian.masscenter)
system.add_bodies(newtonian)
return system
@property
def fixed_point(self):
"""Fixed point in the inertial reference frame."""
return self._fixed_point
@property
def frame(self):
"""Inertial reference frame of the system."""
return self._frame
@property
def x(self):
"""Unit vector fixed in the inertial reference frame."""
return self._frame.x
@property
def y(self):
"""Unit vector fixed in the inertial reference frame."""
return self._frame.y
@property
def z(self):
"""Unit vector fixed in the inertial reference frame."""
return self._frame.z
@property
def bodies(self):
"""Tuple of all bodies that have been added to the system."""
return tuple(self._bodies)
@bodies.setter
@_reset_eom_method
def bodies(self, bodies):
bodies = self._objects_to_list(bodies)
self._check_objects(bodies, [], BodyBase, 'Bodies', 'bodies')
self._bodies = bodies
@property
def joints(self):
"""Tuple of all joints that have been added to the system."""
return tuple(self._joints)
@joints.setter
@_reset_eom_method
def joints(self, joints):
joints = self._objects_to_list(joints)
self._check_objects(joints, [], Joint, 'Joints', 'joints')
self._joints = []
self.add_joints(*joints)
@property
def loads(self):
"""Tuple of loads that have been applied on the system."""
return tuple(self._loads)
@loads.setter
@_reset_eom_method
def loads(self, loads):
loads = self._objects_to_list(loads)
self._loads = [_parse_load(load) for load in loads]
@property
def actuators(self):
"""Tuple of actuators present in the system."""
return tuple(self._actuators)
@actuators.setter
@_reset_eom_method
def actuators(self, actuators):
actuators = self._objects_to_list(actuators)
self._check_objects(actuators, [], ActuatorBase, 'Actuators',
'actuators')
self._actuators = actuators
@property
def q(self):
"""Matrix of all the generalized coordinates with the independent
stacked upon the dependent."""
return self._q_ind.col_join(self._q_dep)
@property
def u(self):
"""Matrix of all the generalized speeds with the independent stacked
upon the dependent."""
return self._u_ind.col_join(self._u_dep)
@property
def q_ind(self):
"""Matrix of the independent generalized coordinates."""
return self._q_ind
@q_ind.setter
@_reset_eom_method
def q_ind(self, q_ind):
self._q_ind, self._q_dep = self._parse_coordinates(
self._objects_to_list(q_ind), True, [], self.q_dep, 'coordinates')
@property
def q_dep(self):
"""Matrix of the dependent generalized coordinates."""
return self._q_dep
@q_dep.setter
@_reset_eom_method
def q_dep(self, q_dep):
self._q_ind, self._q_dep = self._parse_coordinates(
self._objects_to_list(q_dep), False, self.q_ind, [], 'coordinates')
@property
def u_ind(self):
"""Matrix of the independent generalized speeds."""
return self._u_ind
@u_ind.setter
@_reset_eom_method
def u_ind(self, u_ind):
self._u_ind, self._u_dep = self._parse_coordinates(
self._objects_to_list(u_ind), True, [], self.u_dep, 'speeds')
@property
def u_dep(self):
"""Matrix of the dependent generalized speeds."""
return self._u_dep
@u_dep.setter
@_reset_eom_method
def u_dep(self, u_dep):
self._u_ind, self._u_dep = self._parse_coordinates(
self._objects_to_list(u_dep), False, self.u_ind, [], 'speeds')
@property
def u_aux(self):
"""Matrix of auxiliary generalized speeds."""
return self._u_aux
@u_aux.setter
@_reset_eom_method
def u_aux(self, u_aux):
self._u_aux = self._parse_coordinates(
self._objects_to_list(u_aux), True, [], [], 'u_auxiliary')[0]
@property
def kdes(self):
"""Kinematical differential equations as expressions equated to the zero
matrix. These equations describe the coupling between the generalized
coordinates and the generalized speeds."""
return self._kdes
@kdes.setter
@_reset_eom_method
def kdes(self, kdes):
kdes = self._objects_to_list(kdes)
self._kdes = self._parse_expressions(
kdes, [], 'kinematic differential equations')
@property
def holonomic_constraints(self):
"""Matrix with the holonomic constraints as expressions equated to the
zero matrix."""
return self._hol_coneqs
@holonomic_constraints.setter
@_reset_eom_method
def holonomic_constraints(self, constraints):
constraints = self._objects_to_list(constraints)
self._hol_coneqs = self._parse_expressions(
constraints, [], 'holonomic constraints')
@property
def nonholonomic_constraints(self):
"""Matrix with the nonholonomic constraints as expressions equated to
the zero matrix."""
return self._nonhol_coneqs
@nonholonomic_constraints.setter
@_reset_eom_method
def nonholonomic_constraints(self, constraints):
constraints = self._objects_to_list(constraints)
self._nonhol_coneqs = self._parse_expressions(
constraints, [], 'nonholonomic constraints')
@property
def velocity_constraints(self):
"""Matrix with the velocity constraints as expressions equated to the
zero matrix. The velocity constraints are by default derived from the
holonomic and nonholonomic constraints unless they are explicitly set.
"""
if self._vel_constrs is None:
return self.holonomic_constraints.diff(dynamicsymbols._t).col_join(
self.nonholonomic_constraints)
return self._vel_constrs
@velocity_constraints.setter
@_reset_eom_method
def velocity_constraints(self, constraints):
if constraints is None:
self._vel_constrs = None
return
constraints = self._objects_to_list(constraints)
self._vel_constrs = self._parse_expressions(
constraints, [], 'velocity constraints')
@property
def eom_method(self):
"""Backend for forming the equations of motion."""
return self._eom_method
@staticmethod
def _objects_to_list(lst):
"""Helper to convert passed objects to a list."""
if not iterable(lst): # Only one object
return [lst]
return list(lst[:]) # converts Matrix and tuple to flattened list
@staticmethod
def _check_objects(objects, obj_lst, expected_type, obj_name, type_name):
"""Helper to check the objects that are being added to the system.
Explanation
===========
This method checks that the objects that are being added to the system
are of the correct type and have not already been added. If any of the
objects are not of the correct type or have already been added, then
an error is raised.
Parameters
==========
objects : iterable
The objects that would be added to the system.
obj_lst : list
The list of objects that are already in the system.
expected_type : type
The type that the objects should be.
obj_name : str
The name of the category of objects. This string is used to
formulate the error message for the user.
type_name : str
The name of the type that the objects should be. This string is used
to formulate the error message for the user.
"""
seen = set(obj_lst)
duplicates = set()
wrong_types = set()
for obj in objects:
if not isinstance(obj, expected_type):
wrong_types.add(obj)
if obj in seen:
duplicates.add(obj)
else:
seen.add(obj)
if wrong_types:
raise TypeError(f'{obj_name} {wrong_types} are not {type_name}.')
if duplicates:
raise ValueError(f'{obj_name} {duplicates} have already been added '
f'to the system.')
def _parse_coordinates(self, new_coords, independent, old_coords_ind,
old_coords_dep, coord_type='coordinates'):
"""Helper to parse coordinates and speeds."""
# Construct lists of the independent and dependent coordinates
coords_ind, coords_dep = old_coords_ind[:], old_coords_dep[:]
if not iterable(independent):
independent = [independent] * len(new_coords)
for coord, indep in zip(new_coords, independent):
if indep:
coords_ind.append(coord)
else:
coords_dep.append(coord)
# Check types and duplicates
current = {'coordinates': self.q_ind[:] + self.q_dep[:],
'speeds': self.u_ind[:] + self.u_dep[:],
'u_auxiliary': self._u_aux[:],
coord_type: coords_ind + coords_dep}
_validate_coordinates(**current)
return (ImmutableMatrix(1, len(coords_ind), coords_ind).T,
ImmutableMatrix(1, len(coords_dep), coords_dep).T)
@staticmethod
def _parse_expressions(new_expressions, old_expressions, name,
check_negatives=False):
"""Helper to parse expressions like constraints."""
old_expressions = old_expressions[:]
new_expressions = list(new_expressions) # Converts a possible tuple
if check_negatives:
check_exprs = old_expressions + [-expr for expr in old_expressions]
else:
check_exprs = old_expressions
System._check_objects(new_expressions, check_exprs, Basic, name,
'expressions')
for expr in new_expressions:
if expr == 0:
raise ValueError(f'Parsed {name} are zero.')
return ImmutableMatrix(1, len(old_expressions) + len(new_expressions),
old_expressions + new_expressions).T
@_reset_eom_method
def add_coordinates(self, *coordinates, independent=True):
"""Add generalized coordinate(s) to the system.
Parameters
==========
*coordinates : dynamicsymbols
One or more generalized coordinates to be added to the system.
independent : bool or list of bool, optional
Boolean whether a coordinate is dependent or independent. The
default is True, so the coordinates are added as independent by
default.
"""
self._q_ind, self._q_dep = self._parse_coordinates(
coordinates, independent, self.q_ind, self.q_dep, 'coordinates')
@_reset_eom_method
def add_speeds(self, *speeds, independent=True):
"""Add generalized speed(s) to the system.
Parameters
==========
*speeds : dynamicsymbols
One or more generalized speeds to be added to the system.
independent : bool or list of bool, optional
Boolean whether a speed is dependent or independent. The default is
True, so the speeds are added as independent by default.
"""
self._u_ind, self._u_dep = self._parse_coordinates(
speeds, independent, self.u_ind, self.u_dep, 'speeds')
@_reset_eom_method
def add_auxiliary_speeds(self, *speeds):
"""Add auxiliary speed(s) to the system.
Parameters
==========
*speeds : dynamicsymbols
One or more auxiliary speeds to be added to the system.
"""
self._u_aux = self._parse_coordinates(
speeds, True, self._u_aux, [], 'u_auxiliary')[0]
@_reset_eom_method
def add_kdes(self, *kdes):
"""Add kinematic differential equation(s) to the system.
Parameters
==========
*kdes : Expr
One or more kinematic differential equations.
"""
self._kdes = self._parse_expressions(
kdes, self.kdes, 'kinematic differential equations',
check_negatives=True)
@_reset_eom_method
def add_holonomic_constraints(self, *constraints):
"""Add holonomic constraint(s) to the system.
Parameters
==========
*constraints : Expr
One or more holonomic constraints, which are expressions that should
be zero.
"""
self._hol_coneqs = self._parse_expressions(
constraints, self._hol_coneqs, 'holonomic constraints',
check_negatives=True)
@_reset_eom_method
def add_nonholonomic_constraints(self, *constraints):
"""Add nonholonomic constraint(s) to the system.
Parameters
==========
*constraints : Expr
One or more nonholonomic constraints, which are expressions that
should be zero.
"""
self._nonhol_coneqs = self._parse_expressions(
constraints, self._nonhol_coneqs, 'nonholonomic constraints',
check_negatives=True)
@_reset_eom_method
def add_bodies(self, *bodies):
"""Add body(ies) to the system.
Parameters
==========
bodies : Particle or RigidBody
One or more bodies.
"""
self._check_objects(bodies, self.bodies, BodyBase, 'Bodies', 'bodies')
self._bodies.extend(bodies)
@_reset_eom_method
def add_loads(self, *loads):
"""Add load(s) to the system.
Parameters
==========
*loads : Force or Torque
One or more loads.
"""
loads = [_parse_load(load) for load in loads] # Checks the loads
self._loads.extend(loads)
@_reset_eom_method
def apply_uniform_gravity(self, acceleration):
"""Apply uniform gravity to all bodies in the system by adding loads.
Parameters
==========
acceleration : Vector
The acceleration due to gravity.
"""
self.add_loads(*gravity(acceleration, *self.bodies))
@_reset_eom_method
def add_actuators(self, *actuators):
"""Add actuator(s) to the system.
Parameters
==========
*actuators : subclass of ActuatorBase
One or more actuators.
"""
self._check_objects(actuators, self.actuators, ActuatorBase,
'Actuators', 'actuators')
self._actuators.extend(actuators)
@_reset_eom_method
def add_joints(self, *joints):
"""Add joint(s) to the system.
Explanation
===========
This methods adds one or more joints to the system including its
associated objects, i.e. generalized coordinates, generalized speeds,
kinematic differential equations and the bodies.
Parameters
==========
*joints : subclass of Joint
One or more joints.
Notes
=====
For the generalized coordinates, generalized speeds and bodies it is
checked whether they are already known by the system instance. If they
are, then they are not added. The kinematic differential equations are
however always added to the system, so you should not also manually add
those on beforehand.
"""
self._check_objects(joints, self.joints, Joint, 'Joints', 'joints')
self._joints.extend(joints)
coordinates, speeds, kdes, bodies = (OrderedSet() for _ in range(4))
for joint in joints:
coordinates.update(joint.coordinates)
speeds.update(joint.speeds)
kdes.update(joint.kdes)
bodies.update((joint.parent, joint.child))
coordinates = coordinates.difference(self.q)
speeds = speeds.difference(self.u)
kdes = kdes.difference(self.kdes[:] + (-self.kdes)[:])
bodies = bodies.difference(self.bodies)
self.add_coordinates(*tuple(coordinates))
self.add_speeds(*tuple(speeds))
self.add_kdes(*(kde for kde in tuple(kdes) if not kde == 0))
self.add_bodies(*tuple(bodies))
def get_body(self, name):
"""Retrieve a body from the system by name.
Parameters
==========
name : str
The name of the body to retrieve.
Returns
=======
RigidBody or Particle
The body with the given name, or None if no such body exists.
"""
for body in self._bodies:
if body.name == name:
return body
def get_joint(self, name):
"""Retrieve a joint from the system by name.
Parameters
==========
name : str
The name of the joint to retrieve.
Returns
=======
subclass of Joint
The joint with the given name, or None if no such joint exists.
"""
for joint in self._joints:
if joint.name == name:
return joint
def _form_eoms(self):
return self.form_eoms()
def form_eoms(self, eom_method=KanesMethod, **kwargs):
"""Form the equations of motion of the system.
Parameters
==========
eom_method : subclass of KanesMethod or LagrangesMethod
Backend class to be used for forming the equations of motion. The
default is ``KanesMethod``.
Returns
========
ImmutableMatrix
Vector of equations of motions.
Examples
========
This is a simple example for a one degree of freedom translational
spring-mass-damper.
>>> from sympy import S, symbols
>>> from sympy.physics.mechanics import (
... LagrangesMethod, dynamicsymbols, PrismaticJoint, Particle,
... RigidBody, System)
>>> q = dynamicsymbols('q')
>>> qd = dynamicsymbols('q', 1)
>>> m, k, b = symbols('m k b')
>>> wall = RigidBody('W')
>>> system = System.from_newtonian(wall)
>>> bob = Particle('P', mass=m)
>>> bob.potential_energy = S.Half * k * q**2
>>> system.add_joints(PrismaticJoint('J', wall, bob, q, qd))
>>> system.add_loads((bob.masscenter, b * qd * system.x))
>>> system.form_eoms(LagrangesMethod)
Matrix([[-b*Derivative(q(t), t) + k*q(t) + m*Derivative(q(t), (t, 2))]])
We can also solve for the states using the 'rhs' method.
>>> system.rhs()
Matrix([
[ Derivative(q(t), t)],
[(b*Derivative(q(t), t) - k*q(t))/m]])
"""
# KanesMethod does not accept empty iterables
loads = self.loads + tuple(
load for act in self.actuators for load in act.to_loads())
loads = loads if loads else None
if issubclass(eom_method, KanesMethod):
disallowed_kwargs = {
"frame", "q_ind", "u_ind", "kd_eqs", "q_dependent",
"u_dependent", "u_auxiliary", "configuration_constraints",
"velocity_constraints", "forcelist", "bodies"}
wrong_kwargs = disallowed_kwargs.intersection(kwargs)
if wrong_kwargs:
raise ValueError(
f"The following keyword arguments are not allowed to be "
f"overwritten in {eom_method.__name__}: {wrong_kwargs}.")
kwargs = {"frame": self.frame, "q_ind": self.q_ind,
"u_ind": self.u_ind, "kd_eqs": self.kdes,
"q_dependent": self.q_dep, "u_dependent": self.u_dep,
"configuration_constraints": self.holonomic_constraints,
"velocity_constraints": self.velocity_constraints,
"u_auxiliary": self.u_aux,
"forcelist": loads, "bodies": self.bodies,
"explicit_kinematics": False, **kwargs}
self._eom_method = eom_method(**kwargs)
elif issubclass(eom_method, LagrangesMethod):
disallowed_kwargs = {
"frame", "qs", "forcelist", "bodies", "hol_coneqs",
"nonhol_coneqs", "Lagrangian"}
wrong_kwargs = disallowed_kwargs.intersection(kwargs)
if wrong_kwargs:
raise ValueError(
f"The following keyword arguments are not allowed to be "
f"overwritten in {eom_method.__name__}: {wrong_kwargs}.")
kwargs = {"frame": self.frame, "qs": self.q, "forcelist": loads,
"bodies": self.bodies,
"hol_coneqs": self.holonomic_constraints,
"nonhol_coneqs": self.nonholonomic_constraints, **kwargs}
if "Lagrangian" not in kwargs:
kwargs["Lagrangian"] = Lagrangian(kwargs["frame"],
*kwargs["bodies"])
self._eom_method = eom_method(**kwargs)
else:
raise NotImplementedError(f'{eom_method} has not been implemented.')
return self.eom_method._form_eoms()
def rhs(self, inv_method=None):
"""Compute the equations of motion in the explicit form.
Parameters
==========
inv_method : str
The specific sympy inverse matrix calculation method to use. For a
list of valid methods, see
:meth:`~sympy.matrices.matrixbase.MatrixBase.inv`
Returns
========
ImmutableMatrix
Equations of motion in the explicit form.
See Also
========
sympy.physics.mechanics.kane.KanesMethod.rhs:
KanesMethod's ``rhs`` function.
sympy.physics.mechanics.lagrange.LagrangesMethod.rhs:
LagrangesMethod's ``rhs`` function.
"""
return self.eom_method.rhs(inv_method=inv_method)
@property
def mass_matrix(self):
r"""The mass matrix of the system.
Explanation
===========
The mass matrix $M_d$ and the forcing vector $f_d$ of a system describe
the system's dynamics according to the following equations:
.. math::
M_d \dot{u} = f_d
where $\dot{u}$ is the time derivative of the generalized speeds.
"""
return self.eom_method.mass_matrix
@property
def mass_matrix_full(self):
r"""The mass matrix of the system, augmented by the kinematic
differential equations in explicit or implicit form.
Explanation
===========
The full mass matrix $M_m$ and the full forcing vector $f_m$ of a system
describe the dynamics and kinematics according to the following
equation:
.. math::
M_m \dot{x} = f_m
where $x$ is the state vector stacking $q$ and $u$.
"""
return self.eom_method.mass_matrix_full
@property
def forcing(self):
"""The forcing vector of the system."""
return self.eom_method.forcing
@property
def forcing_full(self):
"""The forcing vector of the system, augmented by the kinematic
differential equations in explicit or implicit form."""
return self.eom_method.forcing_full
def validate_system(self, eom_method=KanesMethod, check_duplicates=False):
"""Validates the system using some basic checks.
Explanation
===========
This method validates the system based on the following checks:
- The number of dependent generalized coordinates should equal the
number of holonomic constraints.
- All generalized coordinates defined by the joints should also be known
to the system.
- If ``KanesMethod`` is used as a ``eom_method``:
- All generalized speeds and kinematic differential equations
defined by the joints should also be known to the system.
- The number of dependent generalized speeds should equal the number
of velocity constraints.
- The number of generalized coordinates should be less than or equal
to the number of generalized speeds.
- The number of generalized coordinates should equal the number of
kinematic differential equations.
- If ``LagrangesMethod`` is used as ``eom_method``:
- There should not be any generalized speeds that are not
derivatives of the generalized coordinates (this includes the
generalized speeds defined by the joints).
Parameters
==========
eom_method : subclass of KanesMethod or LagrangesMethod
Backend class that will be used for forming the equations of motion.
There are different checks for the different backends. The default
is ``KanesMethod``.
check_duplicates : bool
Boolean whether the system should be checked for duplicate
definitions. The default is False, because duplicates are already
checked when adding objects to the system.
Notes
=====
This method is not guaranteed to be backwards compatible as it may
improve over time. The method can become both more and less strict in
certain areas. However a well-defined system should always pass all
these tests.
"""
msgs = []
# Save some data in variables
n_hc = self.holonomic_constraints.shape[0]
n_vc = self.velocity_constraints.shape[0]
n_q_dep, n_u_dep = self.q_dep.shape[0], self.u_dep.shape[0]
q_set, u_set = set(self.q), set(self.u)
n_q, n_u = len(q_set), len(u_set)
# Check number of holonomic constraints
if n_q_dep != n_hc:
msgs.append(filldedent(f"""
The number of dependent generalized coordinates {n_q_dep} should be
equal to the number of holonomic constraints {n_hc}."""))
# Check if all joint coordinates and speeds are present
missing_q = set()
for joint in self.joints:
missing_q.update(set(joint.coordinates).difference(q_set))
if missing_q:
msgs.append(filldedent(f"""
The generalized coordinates {missing_q} used in joints are not added
to the system."""))
# Method dependent checks
if issubclass(eom_method, KanesMethod):
n_kdes = len(self.kdes)
missing_kdes, missing_u = set(), set()
for joint in self.joints:
missing_u.update(set(joint.speeds).difference(u_set))
missing_kdes.update(set(joint.kdes).difference(
self.kdes[:] + (-self.kdes)[:]))
if missing_u:
msgs.append(filldedent(f"""
The generalized speeds {missing_u} used in joints are not added
to the system."""))
if missing_kdes:
msgs.append(filldedent(f"""
The kinematic differential equations {missing_kdes} used in
joints are not added to the system."""))
if n_u_dep != n_vc:
msgs.append(filldedent(f"""
The number of dependent generalized speeds {n_u_dep} should be
equal to the number of velocity constraints {n_vc}."""))
if n_q > n_u:
msgs.append(filldedent(f"""
The number of generalized coordinates {n_q} should be less than
or equal to the number of generalized speeds {n_u}."""))
if n_u != n_kdes:
msgs.append(filldedent(f"""
The number of generalized speeds {n_u} should be equal to the
number of kinematic differential equations {n_kdes}."""))
elif issubclass(eom_method, LagrangesMethod):
not_qdots = set(self.u).difference(self.q.diff(dynamicsymbols._t))
for joint in self.joints:
not_qdots.update(set(
joint.speeds).difference(self.q.diff(dynamicsymbols._t)))
if not_qdots:
msgs.append(filldedent(f"""
The generalized speeds {not_qdots} are not supported by this
method. Only derivatives of the generalized coordinates are
supported. If these symbols are used in your expressions, then
this will result in wrong equations of motion."""))
if self.u_aux:
msgs.append(filldedent(f"""
This method does not support auxiliary speeds. If these symbols
are used in your expressions, then this will result in wrong
equations of motion. The auxiliary speeds are {self.u_aux}."""))
else:
raise NotImplementedError(f'{eom_method} has not been implemented.')
if check_duplicates: # Should be redundant
duplicates_to_check = [('generalized coordinates', self.q),
('generalized speeds', self.u),
('auxiliary speeds', self.u_aux),
('bodies', self.bodies),
('joints', self.joints)]
for name, lst in duplicates_to_check:
seen = set()
duplicates = {x for x in lst if x in seen or seen.add(x)}
if duplicates:
msgs.append(filldedent(f"""
The {name} {duplicates} exist multiple times within the
system."""))
if msgs:
raise ValueError('\n'.join(msgs))
| System |
python | tensorflow__tensorflow | tensorflow/python/feature_column/feature_column_v2.py | {
"start": 4180,
"end": 7341
} | class ____(object):
"""Manages the state associated with FeatureColumns.
Some `FeatureColumn`s create variables or resources to assist their
computation. The `StateManager` is responsible for creating and storing these
objects since `FeatureColumn`s are supposed to be stateless configuration
only.
"""
def create_variable(self,
feature_column,
name,
shape,
dtype=None,
trainable=True,
use_resource=True,
initializer=None):
"""Creates a new variable.
Args:
feature_column: A `FeatureColumn` object this variable corresponds to.
name: variable name.
shape: variable shape.
dtype: The type of the variable. Defaults to `self.dtype` or `float32`.
trainable: Whether this variable is trainable or not.
use_resource: If true, we use resource variables. Otherwise we use
RefVariable.
initializer: initializer instance (callable).
Returns:
The created variable.
"""
del feature_column, name, shape, dtype, trainable, use_resource, initializer
raise NotImplementedError('StateManager.create_variable')
def add_variable(self, feature_column, var):
"""Adds an existing variable to the state.
Args:
feature_column: A `FeatureColumn` object to associate this variable with.
var: The variable.
"""
del feature_column, var
raise NotImplementedError('StateManager.add_variable')
def get_variable(self, feature_column, name):
"""Returns an existing variable.
Args:
feature_column: A `FeatureColumn` object this variable corresponds to.
name: variable name.
"""
del feature_column, name
raise NotImplementedError('StateManager.get_var')
def add_resource(self, feature_column, name, resource):
"""Creates a new resource.
Resources can be things such as tables, variables, trackables, etc.
Args:
feature_column: A `FeatureColumn` object this resource corresponds to.
name: Name of the resource.
resource: The resource.
Returns:
The created resource.
"""
del feature_column, name, resource
raise NotImplementedError('StateManager.add_resource')
def has_resource(self, feature_column, name):
"""Returns true iff a resource with same name exists.
Resources can be things such as tables, variables, trackables, etc.
Args:
feature_column: A `FeatureColumn` object this variable corresponds to.
name: Name of the resource.
"""
del feature_column, name
raise NotImplementedError('StateManager.has_resource')
def get_resource(self, feature_column, name):
"""Returns an already created resource.
Resources can be things such as tables, variables, trackables, etc.
Args:
feature_column: A `FeatureColumn` object this variable corresponds to.
name: Name of the resource.
"""
del feature_column, name
raise NotImplementedError('StateManager.get_resource')
@tf_export('__internal__.feature_column.StateManager', v1=[])
| StateManager |
python | ApeWorX__ape | src/ape/plugins/_utils.py | {
"start": 2281,
"end": 2822
} | class ____(str, Enum):
DEFAULT = "default"
PREFIXED = "prefixed"
FREEZE = "freeze"
def _filter_plugins_from_dists(dists: Iterable) -> Iterator[str]:
for dist in dists:
if name := getattr(dist, "name", ""):
# Python 3.10 or greater.
if name.startswith("ape-"):
yield name
elif metadata := getattr(dist, "metadata", {}):
# Python 3.9.
name = metadata.get("Name", "")
if name.startswith("ape-"):
yield name
| OutputFormat |
python | google__pytype | pytype/tests/test_annotations.py | {
"start": 110,
"end": 32869
} | class ____(test_base.BaseTest):
"""Tests for PEP 484 style inline annotations."""
def test_none_unpacking_is(self):
"""Tests that is works with None."""
self.Check("""
from typing import Optional
def f(x: Optional[str]) -> str:
if x is None:
return ""
return x
""")
def test_none_unpacking_is_not(self):
"""Tests that is not works with None."""
self.Check("""
from typing import Optional
def f(x: Optional[str]) -> str:
if x is not None:
return x
return ""
""")
def test_only_annotations(self):
ty = self.Infer("""
def bar(p1: str, p2: complex) -> int:
return 0
""")
self.assertTypesMatchPytd(
ty,
"""
def bar(p1: str, p2: complex) -> int: ...
""",
)
def test_deep(self):
ty = self.Infer("""
def bar(p1: str, p2: complex) -> None:
pass
""")
self.assertTypesMatchPytd(
ty,
"""
def bar(p1: str, p2: complex) -> None: ...
""",
)
def test_union(self):
ty = self.Infer("""
import typing
def foo(x: typing.Union[int, float], y: int):
return x + y
""")
self.assertTypesMatchPytd(
ty,
"""
import typing
from typing import Union
def foo(x: Union[int, float], y:int) -> Union[int, float]: ...
""",
)
def test_call_error(self):
errors = self.CheckWithErrors("""
s = {1}
def foo(x: int):
s.intersection([x])
foo(3.0) # wrong-arg-types[e]
""")
self.assertErrorRegexes(errors, {"e": r"x: int.*x: float"})
def test_ambiguous_arg(self):
self.Check("""
def f(x: int):
return x
def g(y, z):
if y:
x = 3
elif z:
x = 3j
else:
x = "foo"
f(x) # TODO(b/63407497): should be wrong-arg-types
""")
# The error should be ["Expected: (x: int)",
# "Actually passed: (x: Union[complex, int, str])"]
def test_inner_error(self):
errors = self.CheckWithErrors("""
def foo(x: int):
return x.upper() # attribute-error[e]
""")
self.assertErrorRegexes(errors, {"e": r"upper.*int"})
def test_list(self):
ty = self.Infer("""
from typing import List
def foo(l1: List[int], l2: List[str], b):
if b:
x = l1
y = 3
else:
x = l2
y = "foo"
x.append(y)
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import List
def foo(l1: List[int], l2: List[str], b) -> None: ...
""",
)
def test_analyze_init(self):
ty = self.Infer("""
from typing import List
class Foo:
def f(self, x: List[int]):
pass
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import List
class Foo:
def f(self, x: List[int]) -> None: ...
""",
)
def test_string_annotation(self):
ty = self.Infer("""
def f(c: "int") -> "None":
c += 1
return
""")
self.assertTypesMatchPytd(
ty,
"""
def f(c: int) -> None: ...
""",
)
def test_unicode_annotation(self):
ty = self.Infer("""
def f(c: u"int") -> u"None":
c += 1
return
""")
self.assertTypesMatchPytd(
ty,
"""
def f(c: int) -> None: ...
""",
)
def test_future_unicode_literal_annotation(self):
ty = self.Infer("""
from __future__ import unicode_literals
def f(c: "int") -> "None":
c += 1
return
""")
self.assertTypesMatchPytd(
ty,
"""
def f(c: int) -> None: ...
""",
)
def test_typing_only_import(self):
ty = self.Infer("""
import typing
if typing.TYPE_CHECKING:
import calendar
def f(c: "calendar.Calendar") -> int:
return c.getfirstweekday()
""")
self.assertTypesMatchPytd(
ty,
"""
import calendar
import typing
def f(c: calendar.Calendar) -> int: ...
""",
)
def test_ambiguous_annotation(self):
errors = self.CheckWithErrors("""
def foo(x: int if __random__ else float): # invalid-annotation[e1]
return x
def foo(x: "int if __random__ else float"): # invalid-annotation[e2]
return x
""")
self.assertErrorRegexes(
errors,
{
"e1": r"float or int.*x.*constant",
# For a late annotation, we print the string literal, which is why the
# types below are not in alphabetical order.
"e2": r"int.*float.*x.*constant",
},
)
def test_bad_string_annotation(self):
errors = self.CheckWithErrors("""
def foo(x: str()): # invalid-annotation[e]
return x
""")
self.assertErrorRegexes(errors, {"e": r"x.*constant"})
def test_bad_return(self):
self.CheckWithErrors("""
def foo(x: str, y: str) -> int:
return "foo" # bad-return-type
""")
def test_multiple_returns(self):
errors = self.CheckWithErrors("""
def foo(x: str, y: str) -> int:
if x:
return "foo" # bad-return-type[e1]
else:
return 3j # bad-return-type[e2]
""")
self.assertErrorRegexes(
errors,
{
"e1": r"Expected.*int.*Actual.*str",
"e2": r"Expected.*int.*Actual.*complex",
},
)
@test_utils.skipIfPy(
(3, 10),
(3, 12),
reason="Logs one error for all bad returns in <=3.9, =3.11",
)
def test_ambiguous_return(self):
errors = self.CheckWithErrors("""
def foo(x: str) -> int:
if x:
y = "foo"
else:
y = 3j
return y # bad-return-type[e]
""")
self.assertErrorRegexes(
errors, {"e": r"Expected.*int.*Actual.*Union(?=.*complex).*str"}
)
@test_utils.skipUnlessPy(
(3, 10),
(3, 12),
reason="Logs one error per bad return in 3.10 and 3.12",
)
def test_ambiguous_return_310_312(self):
errors = self.CheckWithErrors("""
def foo(x: str) -> int:
if x:
y = "foo"
else:
y = 3j
return y # bad-return-type[e1] # bad-return-type[e2]
""")
self.assertErrorSequences(
errors,
{
"e1": ["Expected: int", "Actually returned: str"],
"e2": ["Expected: int", "Actually returned: complex"],
},
)
def test_default_return(self):
ty = self.Infer("""
class Foo:
def bar(self, x: float, default="") -> str:
default.upper
return default
""")
self.assertTypesMatchPytd(
ty,
"""
class Foo:
def bar(self, x: float, default=...) -> str: ...
""",
)
def test_nocompat_bool(self):
self.CheckWithErrors("""
def bar(x: bool) -> bool:
return None # bad-return-type
bar(None) # wrong-arg-types
""")
def test_compat_float(self):
self.Check("""
def bar(x: float) -> float:
return 1
bar(42)
""")
def test_compat_unicode_str(self):
# Use str to be identical in py2 and py3
self.Check("""
from typing import Text
def bar(x: Text) -> Text:
return str("foo")
bar(str("bar"))
""")
def test_unsolvable(self):
self.assertNoCrash(
self.Check,
"""
import unknown_module
def f(x: unknown_module.Iterable):
pass
""",
)
def test_any(self):
ty = self.Infer("""
from typing import Any
def f(x: Any):
pass
x = f(3)
""")
self.assertTypesMatchPytd(
ty,
"""
def f(x) -> None: ...
x = ... # type: None
""",
)
def test_dict(self):
self.CheckWithErrors("""
from typing import Dict, List
def keys(d: Dict[str, int]):
return
keys({"foo": 3})
keys({}) # ok
keys({3: 3}) # wrong-arg-types
""")
def test_sequence(self):
self.CheckWithErrors("""
from typing import Sequence
def f(s: Sequence):
return s
f([1,2,3])
f((1,2,3))
f({1,2,3}) # wrong-arg-types
f(1) # wrong-arg-types
""")
def test_optional(self):
self.CheckWithErrors("""
from typing import Optional
def f(s: Optional[int]):
return s
f(1)
f(None)
f("foo") # wrong-arg-types
""")
def test_set(self):
self.CheckWithErrors("""
from typing import Set
def f(d: Set[str]):
return
f({"foo"}) # ok
f(set()) # ok
f({}) # {} isn't a set # wrong-arg-types
f({3}) # wrong-arg-types
""")
def test_frozenset(self):
self.CheckWithErrors("""
from typing import FrozenSet
def f(d: FrozenSet[str]):
return
f(frozenset(["foo"])) # ok
f(frozenset()) # ok
f(frozenset([3])) # wrong-arg-types
""")
def test_generic_and_typevar(self):
self.assertNoCrash(
self.Check,
"""
import typing
_T = typing.TypeVar("_T")
class A(typing.Generic[_T]):
...
""",
)
def test_generic_and_double_typevar(self):
self.assertNoCrash(
self.Check,
"""
import typing
_T = typing.TypeVar("_T")
_S = typing.TypeVar("_S")
class A(typing.Generic[_T, _S]):
...
""",
)
def test_jump_into_class_through_annotation(self):
self.Check("""
class Foo:
def __init__(self) -> None:
self.myset = set()
def qux(self):
self.myset.add("foo")
def bar(foo: "Foo"):
foo.qux()
""")
def test_forward_declarations(self):
self.Check("""
def f(a: "B"):
return a
class B:
pass
""")
self.Check("""
def f(a) -> "B":
return B()
class B:
pass
""")
def test_without_forward_decl(self):
errorlog = self.CheckWithErrors("""
def f(a) -> Bar: # name-error[e]
return Bar()
class Bar:
pass
""")
self.assertErrorRegexes(errorlog, {"e": r"Bar"})
def test_invalid_forward_decl(self):
self.Check("""
def f(a) -> "Foo":
return Foo()
class Foo:
pass
""")
errorlog = self.CheckWithErrors("""
def f(a: "Foo"): # name-error[e]
return B()
class B:
pass
""")
self.assertErrorRegexes(errorlog, {"e": r"Foo"})
def test_forward_decl_bad_return(self):
errorlog = self.CheckWithErrors("""
def f() -> "Foo":
return 1 # bad-return-type[e]
class Foo:
pass
""")
# Error message along the lines: No attribute 'bar' on Foo
self.assertErrorRegexes(errorlog, {"e": r"return type.*int"})
def test_confusing_forward_decl(self):
errorlog = self.CheckWithErrors("""
class Foo:
def foo(self):
return 4
def f() -> "Foo":
return Foo()
class Foo:
def bar(self):
return 2
def g():
return f().bar() # attribute-error[e]
""")
# Error message along the lines: No attribute 'bar' on Foo
self.assertErrorRegexes(errorlog, {"e": r"\'bar\'.*Foo"})
def test_return_type_error(self):
errors = self.CheckWithErrors("""
class FooBar: pass
def f() -> FooBar:
return 3 # bad-return-type[e]
""")
self.assertErrorRegexes(errors, {"e": r"Expected: FooBar"})
def test_unknown_argument(self):
with test_utils.Tempdir() as d:
d.create_file(
"a.pyi",
"""
def factory() -> type: ...
""",
)
ty = self.Infer(
"""
import a
A = a.factory()
def f(x: A):
return x.name
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import a
from typing import Any
A = ... # type: Any
def f(x) -> Any: ...
""",
)
def test_bad_call_no_kwarg(self):
ty, errors = self.InferWithErrors("""
def foo():
labels = {
'baz': None
}
labels['baz'] = bar( # wrong-arg-types[e]
labels['baz'])
def bar(path: str, **kwargs):
return path
""")
self.assertTypesMatchPytd(
ty,
"""
def foo() -> None: ...
def bar(path: str, **kwargs) -> str: ...
""",
)
error = r"Actually passed:.*path: None"
self.assertErrorRegexes(errors, {"e": error})
def test_bad_call_with_kwarg(self):
ty, errors = self.InferWithErrors("""
def foo():
labels = {
'baz': None
}
labels['baz'] = bar( # wrong-arg-types[e]
labels['baz'], x=42)
def bar(path: str, **kwargs):
return path
""")
self.assertTypesMatchPytd(
ty,
"""
def foo() -> None: ...
def bar(path: str, **kwargs) -> str: ...
""",
)
error = r"Actually passed:.*path: None"
self.assertErrorRegexes(errors, {"e": error})
def test_skip_functions_with_annotations(self):
ty = self.Infer(
"""
_analyzed_baz = None
class Foo:
def __init__(self):
self._executed_init = True
def bar(self, x: int) -> None:
self._analyzed_bar = True
def baz(x: int) -> None:
global _analyzed_baz
_analyzed_baz = 3
""",
analyze_annotated=False,
)
self.assertTypesMatchPytd(
ty,
"""
_analyzed_baz = ... # type: None
class Foo:
# We expect to *not* see _analyzed_bar here, because it's an attribute
# initialized by a function we're not analyzing.
_executed_init = ... # type: bool
def __init__(self) -> None: ...
def bar(self, x: int) -> None: ...
def baz(x: int) -> None: ...
""",
)
def test_annotated_init(self):
ty = self.Infer("""
class A:
def __init__(self, x: str):
self.x = x
""")
self.assertTypesMatchPytd(
ty,
"""
class A:
x = ... # type: str
def __init__(self, x: str) -> None: ...
""",
)
def test_union_instantiation(self):
# If unions are not instantiated properly, the call to x.value will
# cause an error and Infer will fail.
self.Infer("""
from typing import Union
class Container1:
def __init__(self, value):
self.value1 = value
class Container2:
def __init__(self, value):
self.value2 = value
def func(x: Union[Container1, Container2]):
if isinstance(x, Container1):
return x.value1
else:
return x.value2
""")
def test_imprecise_annotation(self):
ty, errors = self.InferWithErrors("""
from typing import Union
class A: pass
class B:
x = 42
def f(v: Union[A, B]):
return v.x # attribute-error[e]
f(A())
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Union
class A: ...
class B:
x = ... # type: int
def f(v: Union[A, B]) -> int: ...
""",
)
self.assertErrorRegexes(errors, {"e": r"x.*A"})
def test_tuple(self):
ty = self.Infer("""
def f():
return (0, "")
def g(x: str):
return x
x = g(f()[1])
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Tuple
def f() -> Tuple[int, str]: ...
def g(x: str) -> str: ...
x = ... # type: str
""",
)
def test_optional_arg(self):
self.Check("""
def f(x: str, y: bool=False):
pass
f("", y=True)
""")
def test_empty(self):
self.Check("""
from typing import Any, List
def f(x: List[Any]):
pass
f([])
""")
def test_inner_string(self):
self.Check("""
from typing import List, Union
def f(x: List["int"]):
pass
def g(x: Union["int"]):
pass
""")
def test_ambiguous_inner_annotation(self):
errors = self.CheckWithErrors("""
from typing import List, Union
def f(x: List[int if __random__ else str]): # invalid-annotation[e1]
pass
def g(x: Union[int if __random__ else str]): # invalid-annotation[e2]
pass
def h(x: List[Union[int, str]]): # okay
pass
""")
self.assertErrorRegexes(
errors,
{
"e1": r"list\[int\] or list\[str\].*constant",
"e2": r"int or str.*constant",
},
)
def test_kwargs(self):
ty, errors = self.InferWithErrors("""
from typing import Dict
def f(x, **kwargs: int):
return kwargs
def g() -> Dict[str, float]:
return __any_object__
def h() -> Dict[float, int]:
return __any_object__
f("", y=42)
f("", **{})
f("", **{"y": 42})
f("", **g()) # wrong-arg-types[e1]
f("", **h()) # wrong-arg-types[e2]
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Dict
def f(x, **kwargs: int) -> Dict[str, int]: ...
def g() -> Dict[str, float]: ...
def h() -> Dict[float, int]: ...
""",
)
error1 = (
r"Expected.*Mapping\[str, int\].*"
r"Actually passed.*dict\[str, float\]"
)
error2 = (
r"Expected.*Mapping\[str, int\].*"
r"Actually passed.*dict\[float, int\]"
)
self.assertErrorRegexes(errors, {"e1": error1, "e2": error2})
@test_base.skip("Types not checked due to function.Args.simplify")
def test_simplified_varargs_and_kwargs(self):
errors = self.CheckWithErrors("""
def f(x, *args: int):
pass
def g(x, **kwargs: int):
pass
f("", 42.0) # wrong-arg-types[e1]
g("", y=42.0) # wrong-arg-types[e2]
g("", **{"y": 42.0}) # wrong-arg-types[e3]
""")
error = r"Expected.*int.*Actually passed.*float"
self.assertErrorRegexes(errors, {"e1": error, "e2": error, "e3": error})
def test_use_varargs_and_kwargs(self):
ty = self.Infer("""
class A:
pass
def f(*args: A):
return args[0]
def g(**kwargs: A):
return kwargs["x"]
v1 = f()
v2 = g()
""")
self.assertTypesMatchPytd(
ty,
"""
class A: ...
def f(*args: A) -> A: ...
def g(**kwargs: A) -> A: ...
v1 = ... # type: A
v2 = ... # type: A
""",
)
def test_use_varargs_and_kwargs_in_forward_references(self):
self.Check("""
class Foo:
def f(self, *args: "Foo", **kwargs: "Foo"):
for a in args:
pass
for a in kwargs:
pass
def Bar():
Foo().f()
""")
def test_nested_none_type(self):
ty, errors = self.InferWithErrors("""
from typing import List, Union
class A:
x = 42
def f() -> Union[A, None]:
pass
def g() -> List[None]:
return [None]
v1 = f().x # attribute-error[e]
v2 = g()[0]
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import List, Union
class A:
x = ... # type: int
def f() -> Union[A, None]: ...
def g() -> List[None]: ...
v1 = ... # type: int
v2 = ... # type: None
""",
)
self.assertErrorRegexes(errors, {"e": r"x.*None"})
def test_match_late_annotation(self):
errors = self.CheckWithErrors("""
class A:
def f(self, x: "A"):
pass
def f():
A().f(42) # wrong-arg-types[e]
""")
self.assertErrorRegexes(errors, {"e": r"A.*int"})
def test_recursive_forward_reference(self):
errors = self.CheckWithErrors("""
class A:
def __init__(self, x: "A"):
self.foo = x.foo
f(x) # wrong-arg-types[e1]
def method1(self):
self.foo
def method2(self):
self.bar # attribute-error[e2]
def f(x: int):
pass
""")
self.assertErrorRegexes(errors, {"e1": r"int.*A", "e2": r"bar"})
def test_module_level_forward_reference_error(self):
errors = self.CheckWithErrors("""
class A:
def f(self, x: "A"):
pass
A().f(42) # wrong-arg-types[e]
""")
self.assertErrorRegexes(errors, {"e": r"A.*int"})
def test_return_annotation1(self):
ty = self.Infer("""
class A:
def __init__(self):
self.x = 42
@staticmethod
def New() -> "A":
return A()
x = A.New().x
""")
self.assertTypesMatchPytd(
ty,
"""
class A:
x = ... # type: int
def __init__(self) -> None: ...
@staticmethod
def New() -> A: ...
x = ... # type: int
""",
)
def test_return_annotation2(self):
ty = self.Infer("""
class A:
def __init__(self):
self.x = 42
@staticmethod
def New() -> "A":
return A()
def f():
return A.New().x
""")
self.assertTypesMatchPytd(
ty,
"""
class A:
x = ... # type: int
def __init__(self) -> None: ...
@staticmethod
def New() -> A: ...
def f() -> int: ...
""",
)
def test_deeply_nested_annotation(self):
self.Check("""
from typing import Any, Dict, List, Optional
def G(x: Optional[List[Dict[str, Any]]]):
if x:
pass
def F(x: Optional[List[Dict[str, Any]]]):
G(x)
""")
def test_nested_late_annotation(self):
self.Check("""
from typing import List
Type = "int"
def f(x: "List[Type]"):
pass
""")
def test_late_annotation(self):
ty = self.Infer("""
def new_x() -> 'X':
return X()
class X:
def __init__(self) -> None:
self.foo = 1
def get_foo() -> int:
return new_x().foo
""")
self.assertTypesMatchPytd(
ty,
"""
def new_x() -> X: ...
def get_foo() -> int: ...
class X:
foo = ... # type: int
def __init__(self) -> None: ...
""",
)
def test_change_annotated_arg(self):
ty, _ = self.InferWithErrors("""
from typing import Dict
def f(x: Dict[str, str]):
x[True] = 42 # container-type-mismatch
return x
v = f({"a": "b"})
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Dict, Union
def f(x: Dict[str, str]) -> Dict[Union[str, bool], Union[str, int]]: ...
v = ... # type: Dict[Union[str, bool], Union[str, int]]
""",
)
def test_inner_string_annotation(self):
ty = self.Infer("""
from typing import List
def f(x: List["A"]) -> int:
return 0
class A:
pass
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import List
def f(x: List[A]) -> int: ...
class A: ...
""",
)
def test_type_alias_annotation(self):
ty = self.Infer("""
from typing import List
TypeA = "A"
ListA = "List[A]"
def f(x: "ListA") -> int:
return 0
class A:
pass
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import List
ListA = ... # type: str
TypeA = ... # type: str
def f(x: typing.List[A]) -> int: ...
class A:
pass
""",
)
def test_double_string(self):
ty = self.Infer("""
from typing import List
def f(x: "List[\\"int\\"]") -> int:
return 0
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import List
def f(x: List[int]) -> int: ...
""",
)
def test_duplicate_identifier(self):
ty = self.Infer("""
t = int
def f(x: t) -> int: return 0
def g(x: "t") -> int: return 0
t = float
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Type
t: Type[float]
def f(x: int) -> int: ...
def g(x: int) -> int: ...
""",
)
def test_ellipsis(self):
ty, errors = self.InferWithErrors("""
from typing import Dict, Tuple
def f(x: ...): pass # experimental "inferred type": see b/213607272
def g(x: Tuple[str, ...]): pass
def h(x: Dict[..., int]): pass # invalid-annotation[e]
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, Dict, Tuple
def f(x) -> None: ...
def g(x: Tuple[str, ...]) -> None: ...
def h(x: Dict[Any, int]) -> None: ...
""",
)
self.assertErrorRegexes(errors, {"e": r"Ellipsis.*Dict"})
def test_custom_container(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import Generic
T = TypeVar("T")
T2 = TypeVar("T2")
class Foo(Generic[T]):
def __init__(self, x: T2):
self = Foo[T2]
""",
)
errors = self.CheckWithErrors(
"""
import foo
def f(x: foo.Foo[int]):
pass
f(foo.Foo(42))
f(foo.Foo("")) # wrong-arg-types[e]
""",
pythonpath=[d.path],
)
self.assertErrorRegexes(errors, {"e": r"Foo\[int\].*Foo\[str\]"})
def test_no_implicit_optional(self):
ty, _ = self.InferWithErrors("""
from typing import Optional, Union
def f1(x: str = None): # annotation-type-mismatch
pass
def f2(x: Optional[str] = None):
pass
def f3(x: Union[str, None] = None):
pass
def f4(x: Union[str, int] = None): # annotation-type-mismatch
pass
f1(None) # wrong-arg-types
f2(None)
f3(None)
f4(None) # wrong-arg-types
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Optional, Union
def f1(x: str = ...) -> None: ...
def f2(x: Optional[str] = ...) -> None: ...
def f3(x: Optional[str] = ...) -> None: ...
def f4(x: Union[str, int] = ...) -> None: ...
""",
)
def test_infer_return(self):
ty = self.Infer(
"""
def f(x: int):
return x
""",
analyze_annotated=False,
)
self.assertTypesMatchPytd(
ty,
"""
def f(x: int) -> int: ...
""",
)
def test_return_abstract_dict(self):
self.Check("""
from typing import Dict
def f(x, y):
pass
def g() -> Dict:
return {"y": None}
def h():
f(x=None, **g())
""")
def test_forward_reference_in_type_alias(self):
self.Check("""
from typing import List
X = List["Y"]
Y = List["Z"]
Z = List[int]
""")
@test_utils.skipBeforePy((3, 12), "type aliases are new in 3.12")
def test_use_builtin_type_alias(self):
self.Check("""
type MyType = list[str]
using_mytype: MyType = ['foo', 'bar']
""")
def test_fully_quoted_annotation(self):
self.Check("""
from typing import Optional
class A:
OBJ = ()
def __init__(self, parent: "Optional[A]"):
self.parent = (self.OBJ, parent)
""")
def test_quoted_generic_parameter(self):
ty = self.Infer("""
from typing import Callable, List
def f(x: List["Callable[[int], str]"]):
pass
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Callable, List
def f(x: List[Callable[[int], str]]) -> None: ...
""",
)
def test_late_annotation_non_name_error(self):
self.CheckWithErrors("""
class Foo:
pass
def f(x: "Foo.Bar"): # attribute-error
pass
""")
def test_keep_container_with_error(self):
ty, _ = self.InferWithErrors("""
from typing import Dict
def f(x: "Dict[str, int.error]"): # attribute-error
pass
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, Dict
def f(x: Dict[str, Any]) -> None: ...
""",
)
def test_count_type_parameters(self):
self.Check("""
from typing import Callable, TypeVar
T = TypeVar('T')
def f() -> Callable[[Callable[..., T]], Callable[..., T]]:
return __any_object__
""")
def test_set_annotated_attribute(self):
self.Check("""
from typing import Optional
class A:
def __init__(self):
self.x = None # type: Optional[str]
def Set(self, x: str) -> None:
if self.x is None:
self.x = x
x = None # type: Optional[A]
""")
def test_nested_class_forward_ref(self):
self.Check("""
from typing import List
def f():
class Foo:
X = List['int']
""")
def test_nested_forward_ref_to_import(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
class Foo: ...
""",
)
self.Check(
"""
import foo
from typing import Tuple
def f(x: Tuple[str, 'foo.Foo']):
pass
""",
pythonpath=[d.path],
)
def test_tuple_container_check(self):
# Regression test for a container_type_mismatch crash that was caused by
# two tuples having the same type key and one of them therefore being
# omitted from argument views.
self.Check("""
from typing import Dict, Tuple
_FilesMap = Dict[Tuple[int, int], int]
class ShardinfoGen:
def _GenerateFiles(self):
def _GenerateService():
d2f = {} # type: _FilesMap
d2f[(0, 1)] = 3
d2f[(4, 5)] = 6
files.update(d2f)
files = {} # type: _FilesMap
for _ in __any_object__:
_GenerateService()
""")
def test_newtype_container_check(self):
errors = self.CheckWithErrors("""
from typing import Dict, NewType, Set
ClusterInfoConfig = NewType('ClusterInfoConfig', Dict[str, int])
class CommonConfigBuilder:
def _AddMachines(self, cluster_info_config: ClusterInfoConfig):
cluster_info_config[''] = {} # container-type-mismatch[e]
""")
self.assertErrorRegexes(
errors, {"e": r"Container: dict\[_K, _V\].*_V: int.*_V: dict"}
)
def test_check_defaults(self):
# Because 0.0 == False in Python, previously buggy caching led to `False`
# being converted to the cached abstract value for `0.0`.
self.Check("""
def f(x=0.0):
pass
def g(y: bool = False):
pass
""")
def test_circular_ref(self):
with self.DepTree([(
"foo.pyi",
"""
from typing import Callable, Generic, Sequence, TypeVar
T = TypeVar('T')
class BaseRegion(Generic[T]):
@property
def zones(self) -> Sequence[T]: ...
class BaseZone(Generic[T]):
@property
def region(self) -> T: ...
""",
)]):
ty = self.Infer("""
import foo
class Region(foo.BaseRegion['Zone']):
pass
class Zone(foo.BaseZone['Region']):
pass
""")
self.assertTypesMatchPytd(
ty,
"""
import foo
class Region(foo.BaseRegion[Zone]): ...
class Zone(foo.BaseZone[Region]): ...
""",
)
def test_recursion_in_parent(self):
self.Check("""
class C(dict[str, tuple['C', 'C']]):
def f(self):
pass
C()
""")
def test_recursion_in_imported_class(self):
with self.DepTree([(
"foo.pyi",
"""
from typing import MutableMapping, TypeVar, Union
T = TypeVar('T')
class NestedDict(MutableMapping[str, Union[T, "NestedDict"]]): ...
class Array: ...
class SpecDict(NestedDict[Array]): ...
""",
)]):
self.Check("""
import foo
def f() -> foo.SpecDict:
return foo.SpecDict()
""")
def test_forward_ref_determinism(self):
# Repeat this test 20 times to check that the result is deterministic.
for _ in range(20):
self.Check("""
import dataclasses
from typing import List
@dataclasses.dataclass
class ChatMessage:
speaker: 'ChatUser'
class ChatUser:
def __init__(self, name: str, chat_room: 'ChatRoom'):
self.name = name
self.chat_room = chat_room
if self.name in self.chat_room.user_map:
raise ValueError()
class ChatRoom:
def __init__(self, users: List[ChatUser]):
self.user_map = {u.name: u for u in users}
""")
| AnnotationTest |
python | google__jax | jax/_src/state/types.py | {
"start": 4096,
"end": 6365
} | class ____:
dtype: dtypes.DType
shape: tuple[int, ...]
@classmethod
def from_ref_new_shape(cls, ref_or_view: Any, *shape: Any) -> RefReshaper:
if len(shape) == 1 and isinstance(shape[0], tuple):
shape = shape[0]
if not shape:
raise ValueError("Cannot reshape ref to empty shape")
if any(s == -1 for s in shape):
num_elements = math.prod(ref_or_view.shape)
defined_dims = [d for d in shape if d != -1]
if len(defined_dims) != len(shape) - 1:
raise ValueError(f"At most one dimension can be -1, but got {shape}")
if num_elements % math.prod(defined_dims):
raise ValueError(
f"Specified dims {shape} do not evenly divide the size of the "
f"ref ({num_elements})."
)
remaining_dim = num_elements // math.prod(defined_dims)
shape = tuple(d if d != -1 else remaining_dim for d in shape)
if np.prod(shape) != np.prod(ref_or_view.shape):
raise TypeError(
f"cannot reshape ref of shape {ref_or_view.shape} into shape {shape}"
)
if isinstance(ref_or_view, TransformedRef):
if ref_or_view.is_dynamic_size:
raise NotImplementedError(
"Reshape ref with dynamic size is not supported."
)
dtype = dtypes.dtype(ref_or_view.dtype)
return cls(dtype, shape)
@property
def is_dynamic_size(self):
return False
def tree_flatten(self):
return (), (self.dtype, self.shape)
@classmethod
def tree_unflatten(cls, metadata, arrays):
assert not arrays
return cls(*metadata)
def transform_shape(
self, shape: tuple[int | Array, ...] | None
) -> tuple[int | Array, ...] | None:
del shape # Unused
return self.shape
def transform_dtype(self, dtype: DTypeLike | None) -> DTypeLike | None:
del dtype # Unused
return self.dtype
def transform_sharding(self, sharding):
# If there are no explicit axes, do nothing.
if all(p is None for p in sharding.spec):
return sharding
raise NotImplementedError
def pretty_print(self, context: core.JaxprPpContext) -> pp.Doc:
del context # Unused.
return pp.text(f"{{reshape({self.dtype}{list(self.shape)})}}")
@tree_util.register_dataclass
@dataclasses.dataclass(frozen=True)
| RefReshaper |
python | pypa__pip | src/pip/_internal/exceptions.py | {
"start": 11894,
"end": 13171
} | class ____(DiagnosticPipError, InstallationError):
"""A subprocess call failed."""
reference = "subprocess-exited-with-error"
def __init__(
self,
*,
command_description: str,
exit_code: int,
output_lines: list[str] | None,
) -> None:
if output_lines is None:
output_prompt = Text("No available output.")
else:
output_prompt = (
Text.from_markup(f"[red][{len(output_lines)} lines of output][/]\n")
+ Text("".join(output_lines))
+ Text.from_markup(R"[red]\[end of output][/]")
)
super().__init__(
message=(
f"[green]{escape(command_description)}[/] did not run successfully.\n"
f"exit code: {exit_code}"
),
context=output_prompt,
hint_stmt=None,
note_stmt=(
"This error originates from a subprocess, and is likely not a "
"problem with pip."
),
)
self.command_description = command_description
self.exit_code = exit_code
def __str__(self) -> str:
return f"{self.command_description} exited with {self.exit_code}"
| InstallationSubprocessError |
python | huggingface__transformers | src/transformers/generation/candidate_generator.py | {
"start": 32071,
"end": 33359
} | class ____(nn.Module):
def __init__(self, original_embedding: nn.Embedding, assistant_overlap_token_ids):
"""
Wraps an existing embedding layer and remaps token IDs before lookup.
Args:
original_embedding (nn.Embedding): Pre-trained or existing embedding layer.
assistant_overlap_token_ids (dict): Mapping from original token IDs to new token IDs.
Example: {old_id: new_id}
"""
super().__init__()
self.original_embedding = original_embedding
self.weight = original_embedding.weight
self.assistant_overlap_token_ids = assistant_overlap_token_ids
self.map = False
def forward(self, input_ids: torch.LongTensor) -> torch.FloatTensor:
"""
Args:
input_ids (torch.LongTensor): Tensor of token IDs (batch_size, seq_len).
Returns:
torch.FloatTensor: Corresponding input embeddings.
"""
if self.map:
# Get the last item from input_ids
my_input_ids = self.assistant_overlap_token_ids[input_ids[0, -1]].unsqueeze(0).unsqueeze(0)
else:
self.map = True
my_input_ids = input_ids
return self.original_embedding(my_input_ids)
| _MapInputEmbedding |
python | pytorch__pytorch | test/inductor/test_compile.py | {
"start": 1567,
"end": 1659
} | class ____(MyModule):
def forward(self, x):
return (super().forward(x),)
| MyModule3 |
python | scipy__scipy | scipy/signal/tests/test_filter_design.py | {
"start": 63481,
"end": 63934
} | class ____:
def test_basic(self, xp):
b = xp.asarray([0.25059432325190018])
a = xp.asarray(
[1, 0.59724041654134863, 0.92834805757524175, 0.25059432325190018]
)
b_hp, a_hp = lp2hp(b, a, 2*math.pi*5000)
xp_assert_close(b_hp, xp.asarray([1.0, 0, 0, 0]))
xp_assert_close(
a_hp, xp.asarray([1, 1.1638e5, 2.3522e9, 1.2373e14]), rtol=1e-4
)
@make_xp_test_case(lp2bp)
| TestLp2hp |
python | PyCQA__bandit | tests/unit/core/test_config.py | {
"start": 727,
"end": 2037
} | class ____(testtools.TestCase):
def test_settings(self):
# Can initialize a BanditConfig.
example_key = uuid.uuid4().hex
example_value = self.getUniqueString()
contents = f"{example_key}: {example_value}"
f = self.useFixture(TempFile(contents))
b_config = config.BanditConfig(f.name)
# After initialization, can get settings.
self.assertEqual("*.py", b_config.get_setting("plugin_name_pattern"))
self.assertEqual({example_key: example_value}, b_config.config)
self.assertEqual(example_value, b_config.get_option(example_key))
def test_file_does_not_exist(self):
# When the config file doesn't exist, ConfigFileUnopenable is raised.
cfg_file = os.path.join(os.getcwd(), "notafile")
self.assertRaisesRegex(
utils.ConfigError, cfg_file, config.BanditConfig, cfg_file
)
def test_yaml_invalid(self):
# When the config yaml file isn't valid, sys.exit(2) is called.
# The following is invalid because it starts a sequence and doesn't
# end it.
invalid_yaml = "- [ something"
f = self.useFixture(TempFile(invalid_yaml))
self.assertRaisesRegex(
utils.ConfigError, f.name, config.BanditConfig, f.name
)
| TestInit |
python | pytorch__pytorch | torch/export/dynamic_shapes.py | {
"start": 15078,
"end": 15645
} | class ____:
"""
This represents the root of a derived Dim where the root does not directly
specify the shape of any input dimension, but the derived Dim does.
e.g., the input shapes 2*dim and dim + 1 are related via a "phantom" dim.
The fields `name`, `constraint_range`, and `val` carried by a phantom root
help create a symbol for it. Any derived dims with this phantom root are
backed by expressions over this symbol.
"""
name: str
constraint_range: "StrictMinMaxConstraint"
val: int
@dataclasses.dataclass
| _PhantomRoot |
python | MorvanZhou__Reinforcement-learning-with-tensorflow | contents/12_Proximal_Policy_Optimization/discrete_DPPO.py | {
"start": 4814,
"end": 8817
} | class ____(object):
def __init__(self, wid):
self.wid = wid
self.env = gym.make(GAME).unwrapped
self.ppo = GLOBAL_PPO
def work(self):
global GLOBAL_EP, GLOBAL_RUNNING_R, GLOBAL_UPDATE_COUNTER
while not COORD.should_stop():
s = self.env.reset()
ep_r = 0
buffer_s, buffer_a, buffer_r = [], [], []
for t in range(EP_LEN):
if not ROLLING_EVENT.is_set(): # while global PPO is updating
ROLLING_EVENT.wait() # wait until PPO is updated
buffer_s, buffer_a, buffer_r = [], [], [] # clear history buffer, use new policy to collect data
a = self.ppo.choose_action(s)
s_, r, done, _ = self.env.step(a)
if done: r = -10
buffer_s.append(s)
buffer_a.append(a)
buffer_r.append(r-1) # 0 for not down, -11 for down. Reward engineering
s = s_
ep_r += r
GLOBAL_UPDATE_COUNTER += 1 # count to minimum batch size, no need to wait other workers
if t == EP_LEN - 1 or GLOBAL_UPDATE_COUNTER >= MIN_BATCH_SIZE or done:
if done:
v_s_ = 0 # end of episode
else:
v_s_ = self.ppo.get_v(s_)
discounted_r = [] # compute discounted reward
for r in buffer_r[::-1]:
v_s_ = r + GAMMA * v_s_
discounted_r.append(v_s_)
discounted_r.reverse()
bs, ba, br = np.vstack(buffer_s), np.vstack(buffer_a), np.array(discounted_r)[:, None]
buffer_s, buffer_a, buffer_r = [], [], []
QUEUE.put(np.hstack((bs, ba, br))) # put data in the queue
if GLOBAL_UPDATE_COUNTER >= MIN_BATCH_SIZE:
ROLLING_EVENT.clear() # stop collecting data
UPDATE_EVENT.set() # globalPPO update
if GLOBAL_EP >= EP_MAX: # stop training
COORD.request_stop()
break
if done: break
# record reward changes, plot later
if len(GLOBAL_RUNNING_R) == 0: GLOBAL_RUNNING_R.append(ep_r)
else: GLOBAL_RUNNING_R.append(GLOBAL_RUNNING_R[-1]*0.9+ep_r*0.1)
GLOBAL_EP += 1
print('{0:.1f}%'.format(GLOBAL_EP/EP_MAX*100), '|W%i' % self.wid, '|Ep_r: %.2f' % ep_r,)
if __name__ == '__main__':
GLOBAL_PPO = PPONet()
UPDATE_EVENT, ROLLING_EVENT = threading.Event(), threading.Event()
UPDATE_EVENT.clear() # not update now
ROLLING_EVENT.set() # start to roll out
workers = [Worker(wid=i) for i in range(N_WORKER)]
GLOBAL_UPDATE_COUNTER, GLOBAL_EP = 0, 0
GLOBAL_RUNNING_R = []
COORD = tf.train.Coordinator()
QUEUE = queue.Queue() # workers putting data in this queue
threads = []
for worker in workers: # worker threads
t = threading.Thread(target=worker.work, args=())
t.start() # training
threads.append(t)
# add a PPO updating thread
threads.append(threading.Thread(target=GLOBAL_PPO.update,))
threads[-1].start()
COORD.join(threads)
# plot reward change and test
plt.plot(np.arange(len(GLOBAL_RUNNING_R)), GLOBAL_RUNNING_R)
plt.xlabel('Episode'); plt.ylabel('Moving reward'); plt.ion(); plt.show()
env = gym.make('CartPole-v0')
while True:
s = env.reset()
for t in range(1000):
env.render()
s, r, done, info = env.step(GLOBAL_PPO.choose_action(s))
if done:
break
| Worker |
python | numpy__numpy | numpy/distutils/system_info.py | {
"start": 108716,
"end": 108852
} | class ____(_pkg_config_info):
section = 'gtkp_2'
append_config_exe = 'gtk+-2.0'
version_macro_name = 'GTK_VERSION'
| gtkp_2_info |
python | sphinx-doc__sphinx | sphinx/parsers.py | {
"start": 2060,
"end": 3877
} | class ____(docutils.parsers.rst.Parser, Parser):
"""A reST parser for Sphinx."""
def get_transforms(self) -> list[type[Transform]]:
"""Sphinx's reST parser replaces a transform class for smart-quotes by its own
refs: sphinx.io.SphinxStandaloneReader
"""
transforms = super(RSTParser, RSTParser()).get_transforms()
transforms.remove(SmartQuotes)
return transforms
def parse(self, inputstring: str | StringList, document: nodes.document) -> None:
"""Parse text and generate a document tree."""
self.setup_parse(inputstring, document) # type: ignore[arg-type]
self.statemachine = states.RSTStateMachine(
state_classes=self.state_classes,
initial_state=self.initial_state,
debug=document.reporter.debug_flag,
)
# preprocess inputstring
if isinstance(inputstring, str):
lines = docutils.statemachine.string2lines(
inputstring,
tab_width=document.settings.tab_width,
convert_whitespace=True,
)
inputlines = StringList(lines, document.current_source)
else:
inputlines = inputstring
self.decorate(inputlines)
self.statemachine.run(inputlines, document, inliner=self.inliner)
self.finish_parse()
def decorate(self, content: StringList) -> None:
"""Preprocess reStructuredText content before parsing."""
_prepend_prologue(content, self._config.rst_prolog)
_append_epilogue(content, self._config.rst_epilog)
def setup(app: Sphinx) -> ExtensionMetadata:
app.add_source_parser(RSTParser)
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| RSTParser |
python | tornadoweb__tornado | tornado/test/web_test.py | {
"start": 82405,
"end": 83136
} | class ____(SimpleHandlerTestCase):
class Handler(RequestHandler):
def other(self):
# Even though this method exists, it won't get called automatically
# because it is not in SUPPORTED_METHODS.
self.write("other")
def test_unimplemented_patch(self):
# PATCH is recently standardized; Tornado supports it by default
# but wsgiref.validate doesn't like it.
response = self.fetch("/", method="PATCH", body=b"")
self.assertEqual(response.code, 405)
def test_unimplemented_other(self):
response = self.fetch("/", method="OTHER", allow_nonstandard_methods=True)
self.assertEqual(response.code, 405)
| UnimplementedNonStandardMethodsTest |
python | encode__django-rest-framework | tests/test_validators.py | {
"start": 34540,
"end": 34692
} | class ____(models.Model):
slug = models.CharField(max_length=100, unique_for_month='published')
published = models.DateField()
| UniqueForMonthModel |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-mixpanel/source_mixpanel/components.py | {
"start": 5961,
"end": 7133
} | class ____(DpathExtractor):
def extract_records(self, response: requests.Response) -> List[Mapping[str, Any]]:
"""
response.json() example:
{
'computed_at': '2021-07-03T12:43:48.889421+00:00',
'results': {
'$overall': { <-- should be skipped
'amount': 0.0,
'count': 124,
'paid_count': 0
},
'2021-06-01': {
'amount': 0.0,
'count': 124,
'paid_count': 0
},
'2021-06-02': {
'amount': 0.0,
'count': 124,
'paid_count': 0
},
...
},
'session_id': '162...',
'status': 'ok'
}
"""
new_records = []
for record in super().extract_records(response):
for date_entry in record:
if date_entry != "$overall":
list.append(new_records, {"date": date_entry, **record[date_entry]})
return new_records
| RevenueDpathExtractor |
python | html5lib__html5lib-python | html5lib/html5parser.py | {
"start": 92858,
"end": 94229
} | class ____(Phase):
__slots__ = tuple()
def processEOF(self):
self.parser.phases["inSelect"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inSelect"].processCharacters(token)
def startTagTable(self, token):
self.parser.parseError("unexpected-table-element-start-tag-in-select-in-table", {"name": token["name"]})
self.endTagOther(impliedTagToken("select"))
return token
def startTagOther(self, token):
return self.parser.phases["inSelect"].processStartTag(token)
def endTagTable(self, token):
self.parser.parseError("unexpected-table-element-end-tag-in-select-in-table", {"name": token["name"]})
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagOther(impliedTagToken("select"))
return token
def endTagOther(self, token):
return self.parser.phases["inSelect"].processEndTag(token)
startTagHandler = _utils.MethodDispatcher([
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
startTagTable)
])
startTagHandler.default = startTagOther
endTagHandler = _utils.MethodDispatcher([
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
endTagTable)
])
endTagHandler.default = endTagOther
| InSelectInTablePhase |
python | getsentry__sentry | src/sentry/consumers/dlq.py | {
"start": 2886,
"end": 5475
} | class ____(ProcessingStrategy[KafkaPayload]):
def __init__(
self,
stale_threshold_sec: int,
next_step: ProcessingStrategy[KafkaPayload | FilteredPayload],
) -> None:
self.stale_threshold_sec = stale_threshold_sec
self.next_step = next_step
# A filtered message is created so we commit periodically if all are stale.
self.last_forwarded_offsets = time.time()
self.offsets_to_forward: MutableMapping[Partition, int] = {}
def submit(self, message: Message[KafkaPayload]) -> None:
min_accepted_timestamp = datetime.now(timezone.utc) - timedelta(
seconds=self.stale_threshold_sec
)
if isinstance(message.value, BrokerValue):
# Normalize the message timezone to be UTC
if message.value.timestamp.tzinfo is None:
message_timestamp = message.value.timestamp.replace(tzinfo=timezone.utc)
else:
message_timestamp = message.value.timestamp
if message_timestamp < min_accepted_timestamp:
self.offsets_to_forward[message.value.partition] = message.value.next_offset
metrics.incr(key="consumer.stale-messages.routed", sample_rate=1.0)
raise InvalidMessage(
message.value.partition,
message.value.offset,
reason=RejectReason.STALE.value,
log_exception=False,
)
# If we get a valid message for a partition later, don't emit a filtered message for it
if self.offsets_to_forward:
for partition in message.committable:
self.offsets_to_forward.pop(partition, None)
self.next_step.submit(message)
def poll(self) -> None:
self.next_step.poll()
# Ensure we commit frequently even if all messages are invalid
if self.offsets_to_forward:
if time.time() > self.last_forwarded_offsets + 1:
filtered_message = Message(Value(FILTERED_PAYLOAD, self.offsets_to_forward))
try:
self.next_step.submit(filtered_message)
self.offsets_to_forward = {}
self.last_forwarded_offsets = time.time()
except MessageRejected:
pass
def join(self, timeout: float | None = None) -> None:
self.next_step.join(timeout)
def close(self) -> None:
self.next_step.close()
def terminate(self) -> None:
self.next_step.terminate()
| DlqStaleMessages |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/hooks/appflow.py | {
"start": 1089,
"end": 5263
} | class ____(AwsGenericHook["AppflowClient"]):
"""
Interact with Amazon AppFlow.
Provide thin wrapper around :external+boto3:py:class:`boto3.client("appflow") <Appflow.Client>`.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
- `Amazon Appflow API Reference <https://docs.aws.amazon.com/appflow/1.0/APIReference/Welcome.html>`__
"""
def __init__(self, *args, **kwargs) -> None:
kwargs["client_type"] = "appflow"
super().__init__(*args, **kwargs)
def run_flow(
self,
flow_name: str,
poll_interval: int = 20,
wait_for_completion: bool = True,
max_attempts: int = 60,
) -> str:
"""
Execute an AppFlow run.
:param flow_name: The flow name
:param poll_interval: Time (seconds) to wait between two consecutive calls to check the run status
:param wait_for_completion: whether to wait for the run to end to return
:param max_attempts: the number of polls to do before timing out/returning a failure.
:return: The run execution ID
"""
response_start = self.conn.start_flow(flowName=flow_name)
execution_id = response_start["executionId"]
self.log.info("executionId: %s", execution_id)
if wait_for_completion:
wait(
waiter=self.get_waiter("run_complete", {"EXECUTION_ID": execution_id}),
waiter_delay=poll_interval,
waiter_max_attempts=max_attempts,
args={"flowName": flow_name},
failure_message="error while waiting for flow to complete",
status_message="waiting for flow completion, status",
status_args=[
f"flowExecutions[?executionId=='{execution_id}'].executionStatus",
f"flowExecutions[?executionId=='{execution_id}'].executionResult.errorInfo",
],
)
self._log_execution_description(flow_name, execution_id)
return execution_id
def _log_execution_description(self, flow_name: str, execution_id: str):
response_desc = self.conn.describe_flow_execution_records(flowName=flow_name)
last_execs = {fe["executionId"]: fe for fe in response_desc["flowExecutions"]}
exec_details = last_execs[execution_id]
self.log.info("Run complete, execution details: %s", exec_details)
def update_flow_filter(self, flow_name: str, filter_tasks, set_trigger_ondemand: bool = False) -> None:
"""
Update the flow task filter; all filters will be removed if an empty array is passed to filter_tasks.
:param flow_name: The flow name
:param filter_tasks: List flow tasks to be added
:param set_trigger_ondemand: If True, set the trigger to on-demand; otherwise, keep the trigger as is
:return: None
"""
response = self.conn.describe_flow(flowName=flow_name)
connector_type = response["sourceFlowConfig"]["connectorType"]
tasks = []
# cleanup old filter tasks
for task in response["tasks"]:
if (
task["taskType"] == "Filter"
and task.get("connectorOperator", {}).get(connector_type) != "PROJECTION"
):
self.log.info("Removing task: %s", task)
else:
tasks.append(task) # List of non-filter tasks
tasks += filter_tasks # Add the new filter tasks
if set_trigger_ondemand:
# Clean up attribute to force on-demand trigger
del response["triggerConfig"]["triggerProperties"]
self.conn.update_flow(
flowName=response["flowName"],
destinationFlowConfigList=response["destinationFlowConfigList"],
sourceFlowConfig=response["sourceFlowConfig"],
triggerConfig=response["triggerConfig"],
description=response.get("description", "Flow description."),
tasks=tasks,
)
| AppflowHook |
python | ray-project__ray | python/ray/llm/_internal/serve/core/configs/openai_api_models.py | {
"start": 2410,
"end": 2522
} | class ____(vLLMCompletionResponse):
model_config = ConfigDict(arbitrary_types_allowed=True)
| CompletionResponse |
python | pypa__warehouse | tests/unit/admin/views/test_organizations.py | {
"start": 2877,
"end": 10031
} | class ____:
@pytest.mark.usefixtures("_enable_organizations")
def test_no_query(self, db_request):
organizations = sorted(
OrganizationFactory.create_batch(30),
key=lambda o: o.normalized_name,
)
result = views.organization_list(db_request)
assert result == {"organizations": organizations[:25], "query": "", "terms": []}
@pytest.mark.usefixtures("_enable_organizations")
def test_with_page(self, db_request):
organizations = sorted(
OrganizationFactory.create_batch(30),
key=lambda o: o.normalized_name,
)
db_request.GET["page"] = "2"
result = views.organization_list(db_request)
assert result == {"organizations": organizations[25:], "query": "", "terms": []}
@pytest.mark.usefixtures("_enable_organizations")
def test_with_invalid_page(self):
request = pretend.stub(
flags=pretend.stub(enabled=lambda *a: False),
params={"page": "not an integer"},
)
with pytest.raises(HTTPBadRequest):
views.organization_list(request)
@pytest.mark.usefixtures("_enable_organizations")
def test_basic_query(self, db_request):
organizations = sorted(
OrganizationFactory.create_batch(5),
key=lambda o: o.normalized_name,
)
db_request.GET["q"] = organizations[0].name
result = views.organization_list(db_request)
assert organizations[0] in result["organizations"]
assert result["query"] == organizations[0].name
assert result["terms"] == [organizations[0].name]
@pytest.mark.usefixtures("_enable_organizations")
def test_name_query(self, db_request):
organizations = sorted(
OrganizationFactory.create_batch(5),
key=lambda o: o.normalized_name,
)
db_request.GET["q"] = f"name:{organizations[0].name}"
result = views.organization_list(db_request)
assert organizations[0] in result["organizations"]
assert result["query"] == f"name:{organizations[0].name}"
assert result["terms"] == [f"name:{organizations[0].name}"]
@pytest.mark.usefixtures("_enable_organizations")
def test_organization_query(self, db_request):
organizations = sorted(
OrganizationFactory.create_batch(5),
key=lambda o: o.normalized_name,
)
db_request.GET["q"] = f"organization:{organizations[0].display_name}"
result = views.organization_list(db_request)
assert organizations[0] in result["organizations"]
assert result["query"] == f"organization:{organizations[0].display_name}"
assert result["terms"] == [f"organization:{organizations[0].display_name}"]
@pytest.mark.usefixtures("_enable_organizations")
def test_url_query(self, db_request):
organizations = sorted(
OrganizationFactory.create_batch(5),
key=lambda o: o.normalized_name,
)
db_request.GET["q"] = f"url:{organizations[0].link_url}"
result = views.organization_list(db_request)
assert organizations[0] in result["organizations"]
assert result["query"] == f"url:{organizations[0].link_url}"
assert result["terms"] == [f"url:{organizations[0].link_url}"]
@pytest.mark.usefixtures("_enable_organizations")
def test_description_query(self, db_request):
organizations = sorted(
OrganizationFactory.create_batch(5),
key=lambda o: o.normalized_name,
)
db_request.GET["q"] = f"description:'{organizations[0].description}'"
result = views.organization_list(db_request)
assert organizations[0] in result["organizations"]
assert result["query"] == f"description:'{organizations[0].description}'"
assert result["terms"] == [f"description:{organizations[0].description}"]
@pytest.mark.usefixtures("_enable_organizations")
def test_is_active_query(self, db_request):
organizations = sorted(
OrganizationFactory.create_batch(5),
key=lambda o: o.normalized_name,
)
organizations[0].is_active = True
organizations[1].is_active = True
organizations[2].is_active = False
organizations[3].is_active = False
organizations[4].is_active = False
db_request.GET["q"] = "is:active"
result = views.organization_list(db_request)
assert result == {
"organizations": organizations[:2],
"query": "is:active",
"terms": ["is:active"],
}
@pytest.mark.usefixtures("_enable_organizations")
def test_is_inactive_query(self, db_request):
organizations = sorted(
OrganizationFactory.create_batch(5),
key=lambda o: o.normalized_name,
)
organizations[0].is_active = True
organizations[1].is_active = True
organizations[2].is_active = False
organizations[3].is_active = False
organizations[4].is_active = False
db_request.GET["q"] = "is:inactive"
result = views.organization_list(db_request)
assert result == {
"organizations": organizations[2:],
"query": "is:inactive",
"terms": ["is:inactive"],
}
@pytest.mark.usefixtures("_enable_organizations")
def test_type_query(self, db_request):
company_org = OrganizationFactory.create(orgtype=OrganizationType.Company)
community_org = OrganizationFactory.create(orgtype=OrganizationType.Community)
db_request.GET["q"] = "type:company"
result = views.organization_list(db_request)
assert result == {
"organizations": [company_org],
"query": "type:company",
"terms": ["type:company"],
}
db_request.GET["q"] = "type:community"
result = views.organization_list(db_request)
assert result == {
"organizations": [community_org],
"query": "type:community",
"terms": ["type:community"],
}
@pytest.mark.usefixtures("_enable_organizations")
def test_invalid_type_query(self, db_request):
company_org = OrganizationFactory.create(orgtype=OrganizationType.Company)
db_request.GET["q"] = "type:invalid"
result = views.organization_list(db_request)
assert result == {
"organizations": [company_org],
"query": "type:invalid",
"terms": ["type:invalid"],
}
@pytest.mark.usefixtures("_enable_organizations")
def test_is_invalid_query(self, db_request):
organizations = sorted(
OrganizationFactory.create_batch(5),
key=lambda o: o.normalized_name,
)
db_request.GET["q"] = "is:not-actually-a-valid-query"
result = views.organization_list(db_request)
assert result == {
"organizations": organizations[:25],
"query": "is:not-actually-a-valid-query",
"terms": ["is:not-actually-a-valid-query"],
}
| TestOrganizationList |
python | PyCQA__pyflakes | pyflakes/test/test_builtin.py | {
"start": 134,
"end": 582
} | class ____(TestCase):
def test_builtin_unbound_local(self):
self.flakes('''
def foo():
a = range(1, 10)
range = a
return range
foo()
print(range)
''', m.UndefinedLocal)
def test_global_shadowing_builtin(self):
self.flakes('''
def f():
global range
range = None
print(range)
f()
''')
| TestBuiltins |
python | encode__django-rest-framework | tests/test_model_serializer.py | {
"start": 37369,
"end": 39264
} | class ____(TestCase):
def test_meta_class_fields_option(self):
class ExampleSerializer(serializers.ModelSerializer):
class Meta:
model = MetaClassTestModel
fields = 'text'
msginitial = "The `fields` option must be a list or tuple"
with self.assertRaisesMessage(TypeError, msginitial):
ExampleSerializer().fields
def test_meta_class_exclude_option(self):
class ExampleSerializer(serializers.ModelSerializer):
class Meta:
model = MetaClassTestModel
exclude = 'text'
msginitial = "The `exclude` option must be a list or tuple"
with self.assertRaisesMessage(TypeError, msginitial):
ExampleSerializer().fields
def test_meta_class_fields_and_exclude_options(self):
class ExampleSerializer(serializers.ModelSerializer):
class Meta:
model = MetaClassTestModel
fields = ('text',)
exclude = ('text',)
msginitial = "Cannot set both 'fields' and 'exclude' options on serializer ExampleSerializer."
with self.assertRaisesMessage(AssertionError, msginitial):
ExampleSerializer().fields
def test_declared_fields_with_exclude_option(self):
class ExampleSerializer(serializers.ModelSerializer):
text = serializers.CharField()
class Meta:
model = MetaClassTestModel
exclude = ('text',)
expected = (
"Cannot both declare the field 'text' and include it in the "
"ExampleSerializer 'exclude' option. Remove the field or, if "
"inherited from a parent serializer, disable with `text = None`."
)
with self.assertRaisesMessage(AssertionError, expected):
ExampleSerializer().fields
| TestSerializerMetaClass |
python | walkccc__LeetCode | solutions/648. Replace Words/648.py | {
"start": 0,
"end": 656
} | class ____:
def __init__(self):
self.root = {}
def insert(self, word: str) -> None:
node = self.root
for c in word:
if c not in node:
node[c] = {}
node = node[c]
node['word'] = word
def search(self, word: str) -> str:
node = self.root
for c in word:
if 'word' in node:
return node['word']
if c not in node:
return word
node = node[c]
return word
def replaceWords(self, dictionary: list[str], sentence: str) -> str:
for word in dictionary:
self.insert(word)
words = sentence.split(' ')
return ' '.join([self.search(word) for word in words])
| Solution |
python | kamyu104__LeetCode-Solutions | Python/palindrome-number.py | {
"start": 29,
"end": 314
} | class ____(object):
# @return a boolean
def isPalindrome(self, x):
if x < 0:
return False
copy, reverse = x, 0
while copy:
reverse *= 10
reverse += copy % 10
copy //= 10
return x == reverse
| Solution |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/kinesis_analytics.py | {
"start": 10646,
"end": 15920
} | class ____(AwsBaseOperator[KinesisAnalyticsV2Hook]):
"""
Stop an AWS Managed Service for Apache Flink application.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:KinesisAnalyticsV2StopApplicationOperator`
:param application_name: The name of your application. (templated)
:param force: Set to true to force the application to stop. If you set Force to true, Managed Service for
Apache Flink stops the application without taking a snapshot. (templated)
:param wait_for_completion: Whether to wait for job to stop. (default: True)
:param waiter_delay: Time in seconds to wait between status checks. (default: 60)
:param waiter_max_attempts: Maximum number of attempts to check for job completion. (default: 20)
:param deferrable: If True, the operator will wait asynchronously for the job to stop.
This implies waiting for completion. This mode requires aiobotocore module to be installed.
(default: False)
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param botocore_config: Configuration dictionary (key-values) for botocore client. See:
https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html
"""
aws_hook_class = KinesisAnalyticsV2Hook
ui_color = "#44b5e2"
template_fields: Sequence[str] = aws_template_fields(
"application_name",
"force",
)
def __init__(
self,
application_name: str,
force: bool = False,
wait_for_completion: bool = True,
waiter_delay: int = 60,
waiter_max_attempts: int = 20,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
):
super().__init__(**kwargs)
self.application_name = application_name
self.force = force
self.wait_for_completion = wait_for_completion
self.waiter_delay = waiter_delay
self.waiter_max_attempts = waiter_max_attempts
self.deferrable = deferrable
def execute(self, context: Context) -> dict[str, Any]:
msg = "AWS Managed Service for Apache Flink application"
try:
self.log.info("Stopping %s %s.", msg, self.application_name)
self.hook.conn.stop_application(ApplicationName=self.application_name, Force=self.force)
except ClientError as error:
raise AirflowException(
f"Failed to stop {msg} {self.application_name}: {error.response['Error']['Message']}"
)
describe_response = self.hook.conn.describe_application(ApplicationName=self.application_name)
if self.deferrable:
self.log.info("Deferring for %s to stop: %s.", msg, self.application_name)
self.defer(
trigger=KinesisAnalyticsV2ApplicationOperationCompleteTrigger(
application_name=self.application_name,
waiter_name="application_stop_complete",
aws_conn_id=self.aws_conn_id,
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
region_name=self.region_name,
verify=self.verify,
botocore_config=self.botocore_config,
),
method_name="execute_complete",
)
if self.wait_for_completion:
self.log.info("Waiting for %s to stop: %s.", msg, self.application_name)
self.hook.get_waiter("application_stop_complete").wait(
ApplicationName=self.application_name,
WaiterConfig={"Delay": self.waiter_delay, "MaxAttempts": self.waiter_max_attempts},
)
self.log.info("%s stopped successfully %s.", msg, self.application_name)
return {"ApplicationARN": describe_response["ApplicationDetail"]["ApplicationARN"]}
def execute_complete(self, context: Context, event: dict[str, Any] | None = None) -> dict[str, Any]:
validated_event = validate_execute_complete_event(event)
if validated_event["status"] != "success":
raise AirflowException("Error while stopping AWS Managed Service for Apache Flink application")
response = self.hook.conn.describe_application(
ApplicationName=validated_event["application_name"],
)
self.log.info(
"AWS Managed Service for Apache Flink application %s stopped successfully.",
validated_event["application_name"],
)
return {"ApplicationARN": response["ApplicationDetail"]["ApplicationARN"]}
| KinesisAnalyticsV2StopApplicationOperator |
python | encode__httpx | httpx/_exceptions.py | {
"start": 3943,
"end": 4033
} | class ____(NetworkError):
"""
Failed to establish a connection.
"""
| ConnectError |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/resource_requirement.py | {
"start": 6912,
"end": 7267
} | class ____(ResourceKeyRequirement):
key: str # pyright: ignore[reportIncompatibleMethodOverride]
type_display_name: str
def describe_requirement(self) -> str:
return (
f"resource with key '{self.key}' required by the loader on type"
f" '{self.type_display_name}'"
)
@record
| TypeLoaderResourceRequirement |
python | kamyu104__LeetCode-Solutions | Python/range-xor-queries-with-subarray-reversals.py | {
"start": 2055,
"end": 3050
} | class ____(object):
def getResults(self, nums, queries):
"""
:type nums: List[int]
:type queries: List[List[int]]
:rtype: List[int]
"""
def update(root, index, value):
left, mid = split(root, index)
mid, right = split(mid, 1)
mid.value = value
upd_cnt(mid)
return merge(merge(left, mid), right)
def query(root, left, right):
t1, t2 = split(root, left)
t2, t3 = split(t2, right - left + 1)
result = xor_sum(t2)
return merge(merge(t1, t2), t3), result
result = []
root = build(nums, 0, len(nums))
for q in queries:
if q[0] == 1:
root = update(root, q[1], q[2])
elif q[0] == 2:
root, x = query(root, q[1], q[2])
result.append(x)
elif q[0] == 3:
root = reverse(root, q[1], q[2])
return result
| Solution |
python | huggingface__transformers | tests/models/sew/test_modeling_sew.py | {
"start": 1389,
"end": 10378
} | class ____:
def __init__(
self,
parent,
batch_size=13,
seq_length=1024, # speech is longer
is_training=False,
hidden_size=32,
feat_extract_norm="group",
feat_extract_dropout=0.0,
feat_extract_activation="gelu",
conv_dim=(64, 32, 32),
conv_stride=(5, 2, 1),
conv_kernel=(10, 3, 1),
conv_bias=False,
num_conv_pos_embeddings=31,
num_conv_pos_embedding_groups=2,
squeeze_factor=2,
num_hidden_layers=2,
num_attention_heads=2,
hidden_dropout=0.1,
intermediate_size=20,
layer_norm_eps=1e-5,
hidden_act="gelu",
initializer_range=0.02,
vocab_size=32,
do_stable_layer_norm=False,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.hidden_size = hidden_size
self.feat_extract_norm = feat_extract_norm
self.feat_extract_dropout = feat_extract_dropout
self.feat_extract_activation = feat_extract_activation
self.conv_dim = conv_dim
self.conv_stride = conv_stride
self.conv_kernel = conv_kernel
self.conv_bias = conv_bias
self.num_conv_pos_embeddings = num_conv_pos_embeddings
self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
self.squeeze_factor = squeeze_factor
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_dropout = hidden_dropout
self.intermediate_size = intermediate_size
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.vocab_size = vocab_size
self.do_stable_layer_norm = do_stable_layer_norm
self.scope = scope
output_seq_length = self.seq_length
for kernel, stride in zip(self.conv_kernel, self.conv_stride):
output_seq_length = (output_seq_length - (kernel - 1)) / stride
self.output_seq_length = int(math.ceil(output_seq_length))
self.encoder_seq_length = self.output_seq_length // self.squeeze_factor
def prepare_config_and_inputs(self):
input_values = floats_tensor([self.batch_size, self.seq_length], scale=1.0)
attention_mask = random_attention_mask([self.batch_size, self.seq_length])
config = self.get_config()
return config, input_values, attention_mask
def get_config(self):
return SEWConfig(
hidden_size=self.hidden_size,
feat_extract_norm=self.feat_extract_norm,
feat_extract_dropout=self.feat_extract_dropout,
feat_extract_activation=self.feat_extract_activation,
conv_dim=self.conv_dim,
conv_stride=self.conv_stride,
conv_kernel=self.conv_kernel,
conv_bias=self.conv_bias,
num_conv_pos_embeddings=self.num_conv_pos_embeddings,
num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups,
squeeze_factor=self.squeeze_factor,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
hidden_dropout=self.hidden_dropout,
intermediate_size=self.intermediate_size,
layer_norm_eps=self.layer_norm_eps,
hidden_act=self.hidden_act,
initializer_range=self.initializer_range,
vocab_size=self.vocab_size,
)
def create_and_check_model(self, config, input_values, attention_mask):
model = SEWModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_values, attention_mask=attention_mask)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size)
)
def check_ctc_loss(self, config, input_values, *args):
model = SEWForCTC(config=config)
model.to(torch_device)
# make sure that dropout is disabled
model.eval()
input_values = input_values[:3]
attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long)
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths))
labels = ids_tensor((input_values.shape[0], min(max_length_labels) - 1), model.config.vocab_size)
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
attention_mask[i, input_lengths[i] :] = 0
model.config.ctc_loss_reduction = "sum"
sum_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item()
model.config.ctc_loss_reduction = "mean"
mean_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item()
self.parent.assertTrue(isinstance(sum_loss, float))
self.parent.assertTrue(isinstance(mean_loss, float))
def check_ctc_training(self, config, input_values, *args):
config.ctc_zero_infinity = True
model = SEWForCTC(config=config)
model.to(torch_device)
model.train()
# freeze feature encoder
model.freeze_feature_encoder()
input_values = input_values[:3]
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths))
labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size)
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
if max_length_labels[i] < labels.shape[-1]:
# it's important that we make sure that target lengths are at least
# one shorter than logit lengths to prevent -inf
labels[i, max_length_labels[i] - 1 :] = -100
loss = model(input_values, labels=labels).loss
self.parent.assertFalse(torch.isinf(loss).item())
loss.backward()
def check_seq_classifier_loss(self, config, input_values, *args):
model = SEWForSequenceClassification(config=config)
model.to(torch_device)
# make sure that dropout is disabled
model.eval()
input_values = input_values[:3]
attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long)
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label))
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
attention_mask[i, input_lengths[i] :] = 0
masked_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item()
unmasked_loss = model(input_values, labels=labels).loss.item()
self.parent.assertTrue(isinstance(masked_loss, float))
self.parent.assertTrue(isinstance(unmasked_loss, float))
self.parent.assertTrue(masked_loss != unmasked_loss)
def check_seq_classifier_training(self, config, input_values, *args):
config.ctc_zero_infinity = True
model = SEWForSequenceClassification(config=config)
model.to(torch_device)
model.train()
# freeze everything but the classification head
model.freeze_base_model()
input_values = input_values[:3]
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label))
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
loss = model(input_values, labels=labels).loss
self.parent.assertFalse(torch.isinf(loss).item())
loss.backward()
def check_labels_out_of_vocab(self, config, input_values, *args):
model = SEWForCTC(config)
model.to(torch_device)
model.train()
input_values = input_values[:3]
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths))
labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size + 100)
with pytest.raises(ValueError):
model(input_values, labels=labels)
def prepare_config_and_inputs_for_common(self):
config, input_values, attention_mask = self.prepare_config_and_inputs()
inputs_dict = {"input_values": input_values, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
| SEWModelTester |
python | scrapy__scrapy | tests/test_spidermiddleware_referer.py | {
"start": 36863,
"end": 38369
} | class ____(TestReferrerOnRedirect):
"""
Same Origin policy sends the full URL as "Referer" if the target origin
is the same as the parent response (same protocol, same domain, same port).
HTTP redirections to a different domain or a lower secure level
should have the "Referer" removed.
"""
settings = {"REFERRER_POLICY": "same-origin"}
scenarii = [
(
"http://scrapytest.org/101", # origin
"http://scrapytest.org/102", # target
(
# redirections: code, URL
(301, "http://scrapytest.org/103"),
(301, "http://scrapytest.org/104"),
),
b"http://scrapytest.org/101", # expected initial "Referer"
b"http://scrapytest.org/101", # expected referer for the redirection request
),
(
"https://scrapytest.org/201",
"https://scrapytest.org/202",
(
# redirecting from secure to non-secure URL == different origin
(301, "http://scrapytest.org/203"),
),
b"https://scrapytest.org/201",
None,
),
(
"https://scrapytest.org/301",
"https://scrapytest.org/302",
(
# different domain == different origin
(301, "http://example.com/303"),
),
b"https://scrapytest.org/301",
None,
),
]
| TestReferrerOnRedirectSameOrigin |
python | cherrypy__cherrypy | cherrypy/test/_test_states_demo.py | {
"start": 77,
"end": 2244
} | class ____:
"""A test web app."""
@cherrypy.expose
def index(self):
"""Produce HTTP response body of test app index URI."""
return 'Hello World'
@cherrypy.expose
def mtimes(self):
"""Respond with timestamps."""
return repr(cherrypy.engine.publish('Autoreloader', 'mtimes'))
@cherrypy.expose
def pid(self):
"""Respond with the current process ID."""
return str(os.getpid())
@cherrypy.expose
def start(self):
"""Respond with the start time."""
return repr(starttime)
@cherrypy.expose
def exit(self):
"""Stop the server."""
# This handler might be called before the engine is STARTED if an
# HTTP worker thread handles it before the HTTP server returns
# control to engine.start. We avoid that race condition here
# by waiting for the Bus to be STARTED.
cherrypy.engine.wait(state=cherrypy.engine.states.STARTED)
cherrypy.engine.exit()
@cherrypy.engine.subscribe('start', priority=100)
def unsub_sig():
"""Unsubscribe the default signal handler."""
cherrypy.log('unsubsig: %s' % cherrypy.config.get('unsubsig', False))
if cherrypy.config.get('unsubsig', False):
cherrypy.log('Unsubscribing the default cherrypy signal handler')
cherrypy.engine.signal_handler.unsubscribe()
try:
from signal import signal, SIGTERM
except ImportError:
pass
else:
def old_term_handler(signum=None, frame=None):
cherrypy.log('I am an old SIGTERM handler.')
sys.exit(0)
cherrypy.log('Subscribing the new one.')
signal(SIGTERM, old_term_handler)
@cherrypy.engine.subscribe('start', priority=6)
def starterror():
"""Error out on start."""
if cherrypy.config.get('starterror', False):
1 / 0
@cherrypy.engine.subscribe('start', priority=6)
def log_test_case_name():
"""Log test case name."""
if cherrypy.config.get('test_case_name', False):
cherrypy.log(
'STARTED FROM: %s' % cherrypy.config.get('test_case_name'),
)
cherrypy.tree.mount(Root(), '/', {'/': {}})
| Root |
python | chardet__chardet | chardet/utf1632prober.py | {
"start": 947,
"end": 8439
} | class ____(CharSetProber):
"""
This class simply looks for occurrences of zero bytes, and infers
whether the file is UTF16 or UTF32 (low-endian or big-endian)
For instance, files looking like ( \0 \0 \0 [nonzero] )+
have a good probability to be UTF32BE. Files looking like ( \0 [nonzero] )+
may be guessed to be UTF16BE, and inversely for little-endian varieties.
"""
# how many logical characters to scan before feeling confident of prediction
MIN_CHARS_FOR_DETECTION = 20
# a fixed constant ratio of expected zeros or non-zeros in modulo-position.
EXPECTED_RATIO = 0.94
def __init__(self) -> None:
super().__init__()
self.position = 0
self.zeros_at_mod = [0] * 4
self.nonzeros_at_mod = [0] * 4
self._state = ProbingState.DETECTING
self.quad = [0, 0, 0, 0]
self.invalid_utf16be = False
self.invalid_utf16le = False
self.invalid_utf32be = False
self.invalid_utf32le = False
self.first_half_surrogate_pair_detected_16be = False
self.first_half_surrogate_pair_detected_16le = False
self.reset()
def reset(self) -> None:
super().reset()
self.position = 0
self.zeros_at_mod = [0] * 4
self.nonzeros_at_mod = [0] * 4
self._state = ProbingState.DETECTING
self.invalid_utf16be = False
self.invalid_utf16le = False
self.invalid_utf32be = False
self.invalid_utf32le = False
self.first_half_surrogate_pair_detected_16be = False
self.first_half_surrogate_pair_detected_16le = False
self.quad = [0, 0, 0, 0]
@property
def charset_name(self) -> str:
if self.is_likely_utf32be():
return "utf-32be"
if self.is_likely_utf32le():
return "utf-32le"
if self.is_likely_utf16be():
return "utf-16be"
if self.is_likely_utf16le():
return "utf-16le"
# default to something valid
return "utf-16"
@property
def language(self) -> str:
return ""
def approx_32bit_chars(self) -> float:
return max(1.0, self.position / 4.0)
def approx_16bit_chars(self) -> float:
return max(1.0, self.position / 2.0)
def is_likely_utf32be(self) -> bool:
approx_chars = self.approx_32bit_chars()
return approx_chars >= self.MIN_CHARS_FOR_DETECTION and (
self.zeros_at_mod[0] / approx_chars > self.EXPECTED_RATIO
and self.zeros_at_mod[1] / approx_chars > self.EXPECTED_RATIO
and self.zeros_at_mod[2] / approx_chars > self.EXPECTED_RATIO
and self.nonzeros_at_mod[3] / approx_chars > self.EXPECTED_RATIO
and not self.invalid_utf32be
)
def is_likely_utf32le(self) -> bool:
approx_chars = self.approx_32bit_chars()
return approx_chars >= self.MIN_CHARS_FOR_DETECTION and (
self.nonzeros_at_mod[0] / approx_chars > self.EXPECTED_RATIO
and self.zeros_at_mod[1] / approx_chars > self.EXPECTED_RATIO
and self.zeros_at_mod[2] / approx_chars > self.EXPECTED_RATIO
and self.zeros_at_mod[3] / approx_chars > self.EXPECTED_RATIO
and not self.invalid_utf32le
)
def is_likely_utf16be(self) -> bool:
approx_chars = self.approx_16bit_chars()
return approx_chars >= self.MIN_CHARS_FOR_DETECTION and (
(self.nonzeros_at_mod[1] + self.nonzeros_at_mod[3]) / approx_chars
> self.EXPECTED_RATIO
and (self.zeros_at_mod[0] + self.zeros_at_mod[2]) / approx_chars
> self.EXPECTED_RATIO
and not self.invalid_utf16be
)
def is_likely_utf16le(self) -> bool:
approx_chars = self.approx_16bit_chars()
return approx_chars >= self.MIN_CHARS_FOR_DETECTION and (
(self.nonzeros_at_mod[0] + self.nonzeros_at_mod[2]) / approx_chars
> self.EXPECTED_RATIO
and (self.zeros_at_mod[1] + self.zeros_at_mod[3]) / approx_chars
> self.EXPECTED_RATIO
and not self.invalid_utf16le
)
def validate_utf32_characters(self, quad: List[int]) -> None:
"""
Validate if the quad of bytes is valid UTF-32.
UTF-32 is valid in the range 0x00000000 - 0x0010FFFF
excluding 0x0000D800 - 0x0000DFFF
https://en.wikipedia.org/wiki/UTF-32
"""
if (
quad[0] != 0
or quad[1] > 0x10
or (quad[0] == 0 and quad[1] == 0 and 0xD8 <= quad[2] <= 0xDF)
):
self.invalid_utf32be = True
if (
quad[3] != 0
or quad[2] > 0x10
or (quad[3] == 0 and quad[2] == 0 and 0xD8 <= quad[1] <= 0xDF)
):
self.invalid_utf32le = True
def validate_utf16_characters(self, pair: List[int]) -> None:
"""
Validate if the pair of bytes is valid UTF-16.
UTF-16 is valid in the range 0x0000 - 0xFFFF excluding 0xD800 - 0xFFFF
with an exception for surrogate pairs, which must be in the range
0xD800-0xDBFF followed by 0xDC00-0xDFFF
https://en.wikipedia.org/wiki/UTF-16
"""
if not self.first_half_surrogate_pair_detected_16be:
if 0xD8 <= pair[0] <= 0xDB:
self.first_half_surrogate_pair_detected_16be = True
elif 0xDC <= pair[0] <= 0xDF:
self.invalid_utf16be = True
else:
if 0xDC <= pair[0] <= 0xDF:
self.first_half_surrogate_pair_detected_16be = False
else:
self.invalid_utf16be = True
if not self.first_half_surrogate_pair_detected_16le:
if 0xD8 <= pair[1] <= 0xDB:
self.first_half_surrogate_pair_detected_16le = True
elif 0xDC <= pair[1] <= 0xDF:
self.invalid_utf16le = True
else:
if 0xDC <= pair[1] <= 0xDF:
self.first_half_surrogate_pair_detected_16le = False
else:
self.invalid_utf16le = True
def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
for c in byte_str:
mod4 = self.position % 4
self.quad[mod4] = c
if mod4 == 3:
self.validate_utf32_characters(self.quad)
self.validate_utf16_characters(self.quad[0:2])
self.validate_utf16_characters(self.quad[2:4])
if c == 0:
self.zeros_at_mod[mod4] += 1
else:
self.nonzeros_at_mod[mod4] += 1
self.position += 1
return self.state
@property
def state(self) -> ProbingState:
if self._state in {ProbingState.NOT_ME, ProbingState.FOUND_IT}:
# terminal, decided states
return self._state
if self.get_confidence() > 0.80:
self._state = ProbingState.FOUND_IT
elif self.position > 4 * 1024:
# if we get to 4kb into the file, and we can't conclude it's UTF,
# let's give up
self._state = ProbingState.NOT_ME
return self._state
def get_confidence(self) -> float:
return (
0.85
if (
self.is_likely_utf16le()
or self.is_likely_utf16be()
or self.is_likely_utf32le()
or self.is_likely_utf32be()
)
else 0.00
)
| UTF1632Prober |
python | pypa__hatch | backend/src/hatchling/builders/sdist.py | {
"start": 855,
"end": 3102
} | class ____:
def __init__(self, name: str, *, reproducible: bool) -> None:
"""
https://peps.python.org/pep-0517/#source-distributions
"""
self.name = name
self.reproducible = reproducible
self.timestamp: int | None = get_reproducible_timestamp() if reproducible else None
raw_fd, self.path = tempfile.mkstemp(suffix=".tar.gz")
self.fd = os.fdopen(raw_fd, "w+b")
self.gz = gzip.GzipFile(fileobj=self.fd, mode="wb", mtime=self.timestamp)
self.tf = tarfile.TarFile(fileobj=self.gz, mode="w", format=tarfile.PAX_FORMAT)
self.gettarinfo = lambda *args, **kwargs: self.normalize_tar_metadata(self.tf.gettarinfo(*args, **kwargs))
def create_file(self, contents: str | bytes, *relative_paths: str) -> None:
if not isinstance(contents, bytes):
contents = contents.encode("utf-8")
tar_info = tarfile.TarInfo(normalize_archive_path(os.path.join(self.name, *relative_paths)))
tar_info.size = len(contents)
if self.reproducible and self.timestamp is not None:
tar_info.mtime = self.timestamp
else:
tar_info.mtime = int(get_current_timestamp())
with closing(BytesIO(contents)) as buffer:
self.tf.addfile(tar_info, buffer)
def normalize_tar_metadata(self, tar_info: tarfile.TarInfo | None) -> tarfile.TarInfo | None:
if not self.reproducible or tar_info is None:
return tar_info
tar_info = copy(tar_info)
tar_info.uid = 0
tar_info.gid = 0
tar_info.uname = ""
tar_info.gname = ""
tar_info.mode = normalize_file_permissions(tar_info.mode)
if self.timestamp is not None:
tar_info.mtime = self.timestamp
return tar_info
def __getattr__(self, name: str) -> Any:
attr = getattr(self.tf, name)
setattr(self, name, attr)
return attr
def __enter__(self) -> SdistArchive: # noqa: PYI034
return self
def __exit__(
self, exc_type: type[BaseException] | None, exc_value: BaseException | None, traceback: TracebackType | None
) -> None:
self.tf.close()
self.gz.close()
self.fd.close()
| SdistArchive |
python | jazzband__django-simple-history | simple_history/registry_tests/tests.py | {
"start": 954,
"end": 2648
} | class ____(TestCase):
def test_register_no_args(self):
self.assertEqual(len(Choice.history.all()), 0)
poll = Poll.objects.create(pub_date=today)
choice = Choice.objects.create(poll=poll, votes=0)
self.assertEqual(len(choice.history.all()), 1)
def test_register_separate_app(self):
def get_history(model):
return model.history
self.assertRaises(AttributeError, get_history, User)
self.assertEqual(len(User.histories.all()), 0)
user = User.objects.create(username="bob", password="pass")
self.assertEqual(len(User.histories.all()), 1)
self.assertEqual(len(user.histories.all()), 1)
def test_reregister(self):
with self.assertRaises(exceptions.MultipleRegistrationsError):
register(Restaurant, manager_name="again")
def test_register_custome_records(self):
self.assertEqual(len(Voter.history.all()), 0)
poll = Poll.objects.create(pub_date=today)
choice = Choice.objects.create(poll=poll, votes=0)
user = User.objects.create(username="voter")
voter = Voter.objects.create(choice=choice, user=user)
self.assertEqual(len(voter.history.all()), 1)
expected = "Voter object changed by None as of "
self.assertEqual(expected, str(voter.history.all()[0])[: len(expected)])
def test_register_history_id_field(self):
self.assertEqual(len(UUIDRegisterModel.history.all()), 0)
entry = UUIDRegisterModel.objects.create()
self.assertEqual(len(entry.history.all()), 1)
history = entry.history.all()[0]
self.assertTrue(isinstance(history.history_id, uuid.UUID))
| RegisterTest |
python | great-expectations__great_expectations | great_expectations/execution_engine/sqlalchemy_dialect.py | {
"start": 272,
"end": 3746
} | class ____(Enum):
"""Contains sql dialects that have some level of support in Great Expectations.
Also contains an unsupported attribute if the dialect is not in the list.
"""
AWSATHENA = "awsathena"
BIGQUERY = "bigquery"
DATABRICKS = "databricks"
DREMIO = "dremio"
HIVE = "hive"
MSSQL = "mssql"
MYSQL = "mysql"
ORACLE = "oracle"
POSTGRESQL = "postgresql"
REDSHIFT = "redshift"
SNOWFLAKE = "snowflake"
SQLITE = "sqlite"
TERADATASQL = "teradatasql"
TRINO = "trino"
VERTICA = "vertica"
CLICKHOUSE = "clickhouse"
OTHER = "other"
@override
def __eq__(self, other: Union[str, bytes, GXSqlDialect]): # type: ignore[override] # supertype uses `object`
if isinstance(other, str):
return self.value.lower() == other.lower()
# Comparison against byte string, e.g. `b"hive"` should be treated as unicode
elif isinstance(other, bytes):
return self.value.lower() == other.lower().decode("utf-8")
return self.value.lower() == other.value.lower()
@override
def __hash__(self: GXSqlDialect):
return hash(self.value)
@classmethod
@override
def _missing_(cls, value: Any) -> Any:
try:
# Sometimes `value` is a byte string, e.g. `b"hive"`, it should be converted
return cls(value.decode())
except (UnicodeDecodeError, AttributeError):
return super()._missing_(value)
@classmethod
def get_all_dialect_names(cls) -> List[str]:
"""Get dialect names for all SQL dialects."""
return [dialect_name.value for dialect_name in cls if dialect_name != GXSqlDialect.OTHER]
@classmethod
def get_all_dialects(cls) -> List[GXSqlDialect]:
"""Get all dialects."""
return [dialect for dialect in cls if dialect != GXSqlDialect.OTHER]
DIALECT_IDENTIFIER_QUOTE_STRINGS: Final[Mapping[GXSqlDialect, Literal['"', "`"]]] = {
# TODO: add other dialects
GXSqlDialect.DATABRICKS: "`",
GXSqlDialect.MYSQL: "`",
GXSqlDialect.POSTGRESQL: '"',
GXSqlDialect.SNOWFLAKE: '"',
GXSqlDialect.SQLITE: '"',
GXSqlDialect.TRINO: "`",
}
def quote_str(unquoted_identifier: str, dialect: GXSqlDialect) -> str:
"""Quote a string using the specified dialect's quote character."""
quote_char = DIALECT_IDENTIFIER_QUOTE_STRINGS[dialect]
if unquoted_identifier.startswith(quote_char) or unquoted_identifier.endswith(quote_char):
raise ValueError( # noqa: TRY003 # FIXME CoP
f"Identifier {unquoted_identifier} already uses quote character {quote_char}"
)
return f"{quote_char}{unquoted_identifier}{quote_char}"
def _strip_quotes(s: str, dialect: GXSqlDialect) -> str:
quote_str = DIALECT_IDENTIFIER_QUOTE_STRINGS[dialect]
if s.startswith(quote_str) and s.endswith(quote_str):
return s[1:-1]
return s
@overload
def wrap_identifier(indentifier: str, dialect: GXSqlDialect = ...) -> quoted_name: ...
@overload
def wrap_identifier(
indentifier: quoted_name, dialect: GXSqlDialect | None = ...
) -> quoted_name: ...
def wrap_identifier(
indentifier: str | quoted_name, dialect: GXSqlDialect | None = None
) -> quoted_name:
if isinstance(indentifier, quoted_name):
return indentifier
wo_quotes = _strip_quotes(indentifier, dialect) # type: ignore[arg-type] # accounted for in overload
return quoted_name(wo_quotes, quote=True)
| GXSqlDialect |
python | pandas-dev__pandas | pandas/tests/indexes/multi/test_indexing.py | {
"start": 29389,
"end": 37656
} | class ____:
def test_contains_top_level(self):
midx = MultiIndex.from_product([["A", "B"], [1, 2]])
assert "A" in midx
assert "A" not in midx._engine
def test_contains_with_nat(self):
# MI with a NaT
mi = MultiIndex(
levels=[["C"], date_range("2012-01-01", periods=5)],
codes=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, "B"],
)
assert ("C", pd.Timestamp("2012-01-01")) in mi
for val in mi.values:
assert val in mi
def test_contains(self, idx):
assert ("foo", "two") in idx
assert ("bar", "two") not in idx
assert None not in idx
def test_contains_with_missing_value(self):
# GH#19132
idx = MultiIndex.from_arrays([[1, np.nan, 2]])
assert np.nan in idx
idx = MultiIndex.from_arrays([[1, 2], [np.nan, 3]])
assert np.nan not in idx
assert (1, np.nan) in idx
def test_multiindex_contains_dropped(self):
# GH#19027
# test that dropped MultiIndex levels are not in the MultiIndex
# despite continuing to be in the MultiIndex's levels
idx = MultiIndex.from_product([[1, 2], [3, 4]])
assert 2 in idx
idx = idx.drop(2)
# drop implementation keeps 2 in the levels
assert 2 in idx.levels[0]
# but it should no longer be in the index itself
assert 2 not in idx
# also applies to strings
idx = MultiIndex.from_product([["a", "b"], ["c", "d"]])
assert "a" in idx
idx = idx.drop("a")
assert "a" in idx.levels[0]
assert "a" not in idx
def test_contains_td64_level(self):
# GH#24570
tx = pd.timedelta_range("09:30:00", "16:00:00", freq="30 min")
idx = MultiIndex.from_arrays([tx, np.arange(len(tx))])
assert tx[0] in idx
assert "element_not_exit" not in idx
assert "0 day 09:30:00" in idx
def test_large_mi_contains(self, monkeypatch):
# GH#10645
with monkeypatch.context():
monkeypatch.setattr(libindex, "_SIZE_CUTOFF", 10)
result = MultiIndex.from_arrays([range(10), range(10)])
assert (10, 0) not in result
def test_timestamp_multiindex_indexer():
# https://github.com/pandas-dev/pandas/issues/26944
idx = MultiIndex.from_product(
[
date_range("2019-01-01T00:15:33", periods=100, freq="h", name="date"),
["x"],
[3],
]
)
df = DataFrame({"foo": np.arange(len(idx))}, idx)
result = df.loc[pd.IndexSlice["2019-1-2":, "x", :], "foo"]
qidx = MultiIndex.from_product(
[
date_range(
start="2019-01-02T00:15:33",
end="2019-01-05T03:15:33",
freq="h",
name="date",
),
["x"],
[3],
]
)
should_be = pd.Series(data=np.arange(24, len(qidx) + 24), index=qidx, name="foo")
tm.assert_series_equal(result, should_be)
@pytest.mark.parametrize(
"index_arr,expected,target,algo",
[
([[np.nan, "a", "b"], ["c", "d", "e"]], 0, np.nan, "left"),
([[np.nan, "a", "b"], ["c", "d", "e"]], 1, (np.nan, "c"), "right"),
([["a", "b", "c"], ["d", np.nan, "d"]], 1, ("b", np.nan), "left"),
],
)
def test_get_slice_bound_with_missing_value(index_arr, expected, target, algo):
# issue 19132
idx = MultiIndex.from_arrays(index_arr)
result = idx.get_slice_bound(target, side=algo)
assert result == expected
@pytest.mark.parametrize(
"index_arr,expected,start_idx,end_idx",
[
([[np.nan, 1, 2], [3, 4, 5]], slice(0, 2, None), np.nan, 1),
([[np.nan, 1, 2], [3, 4, 5]], slice(0, 3, None), np.nan, (2, 5)),
([[1, 2, 3], [4, np.nan, 5]], slice(1, 3, None), (2, np.nan), 3),
([[1, 2, 3], [4, np.nan, 5]], slice(1, 3, None), (2, np.nan), (3, 5)),
],
)
def test_slice_indexer_with_missing_value(index_arr, expected, start_idx, end_idx):
# issue 19132
idx = MultiIndex.from_arrays(index_arr)
result = idx.slice_indexer(start=start_idx, end=end_idx)
assert result == expected
@pytest.mark.parametrize(
"N, expected_dtype",
[
(1, "uint8"), # 2*4*N = 8
(2, "uint16"), # 2*4*N = 16
(4, "uint32"), # 2*4*N = 32
(8, "uint64"), # 2*4*N = 64
(10, "object"), # 2*4*N = 80
],
)
def test_pyint_engine(N, expected_dtype):
# GH#18519 : when combinations of codes cannot be represented in 64
# bits, the index underlying the MultiIndex engine works with Python
# integers, rather than uint64.
keys = [
tuple(arr)
for arr in [
[0] * 4 * N,
[1] * 4 * N,
[np.nan] * N + [0] * 3 * N,
[0] * N + [1] * 3 * N,
[np.nan] * N + [1] * 2 * N + [0] * N,
]
]
# Each level contains 3 elements (NaN, 0, 1), and it's represented
# in 2 bits to store 4 possible values (0=notfound, 1=NaN, 2=0, 3=1), for
# a total of 2*N*4 = 80 > 64 bits where N=10 and the number of levels is N*4.
# If we were using a 64 bit engine and truncating the first levels, the
# fourth and fifth keys would collide; if truncating the last levels, the
# fifth and sixth; if rotating bits rather than shifting, the third and fifth.
index = MultiIndex.from_tuples(keys)
assert index._engine.values.dtype == expected_dtype
for idx, key_value in enumerate(keys):
assert index.get_loc(key_value) == idx
expected = np.arange(idx + 1, dtype=np.intp)
result = index.get_indexer([keys[i] for i in expected])
tm.assert_numpy_array_equal(result, expected)
# With missing key:
idces = range(len(keys))
expected = np.array([-1] + list(idces), dtype=np.intp)
missing = tuple([0, 1, 0, 1] * N)
result = index.get_indexer([missing] + [keys[i] for i in idces])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"keys,expected",
[
((slice(None), [5, 4]), [1, 0]),
((slice(None), [4, 5]), [0, 1]),
(([True, False, True], [4, 6]), [0, 2]),
(([True, False, True], [6, 4]), [0, 2]),
((2, [4, 5]), [0, 1]),
((2, [5, 4]), [1, 0]),
(([2], [4, 5]), [0, 1]),
(([2], [5, 4]), [1, 0]),
],
)
def test_get_locs_reordering(keys, expected):
# GH48384
idx = MultiIndex.from_arrays(
[
[2, 2, 1],
[4, 5, 6],
]
)
result = idx.get_locs(keys)
expected = np.array(expected, dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
def test_get_indexer_for_multiindex_with_nans(nulls_fixture):
# GH37222
idx1 = MultiIndex.from_product([["A"], [1.0, 2.0]], names=["id1", "id2"])
idx2 = MultiIndex.from_product([["A"], [nulls_fixture, 2.0]], names=["id1", "id2"])
result = idx2.get_indexer(idx1)
expected = np.array([-1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
result = idx1.get_indexer(idx2)
expected = np.array([-1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
def test_get_loc_namedtuple_behaves_like_tuple():
# GH57922
NamedIndex = namedtuple("NamedIndex", ("a", "b"))
multi_idx = MultiIndex.from_tuples(
[NamedIndex("i1", "i2"), NamedIndex("i3", "i4"), NamedIndex("i5", "i6")]
)
for idx in (multi_idx, multi_idx.to_flat_index()):
assert idx.get_loc(NamedIndex("i1", "i2")) == 0
assert idx.get_loc(NamedIndex("i3", "i4")) == 1
assert idx.get_loc(NamedIndex("i5", "i6")) == 2
assert idx.get_loc(("i1", "i2")) == 0
assert idx.get_loc(("i3", "i4")) == 1
assert idx.get_loc(("i5", "i6")) == 2
multi_idx = MultiIndex.from_tuples([("i1", "i2"), ("i3", "i4"), ("i5", "i6")])
for idx in (multi_idx, multi_idx.to_flat_index()):
assert idx.get_loc(NamedIndex("i1", "i2")) == 0
assert idx.get_loc(NamedIndex("i3", "i4")) == 1
assert idx.get_loc(NamedIndex("i5", "i6")) == 2
assert idx.get_loc(("i1", "i2")) == 0
assert idx.get_loc(("i3", "i4")) == 1
assert idx.get_loc(("i5", "i6")) == 2
| TestContains |
python | weaviate__weaviate-python-client | weaviate/embedded.py | {
"start": 11209,
"end": 11821
} | class ____(_EmbeddedBase):
def is_listening(self) -> bool:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((self.options.hostname, self.options.port))
return True
except (socket.error, ConnectionRefusedError):
return False
finally:
s.close()
def start(self) -> None:
if self.is_listening():
logger.info(f"embedded weaviate is already listening on port {self.options.port}")
return
super().start()
EmbeddedDB = EmbeddedV3 # needed for BC from v3 -> v4
| EmbeddedV3 |
python | ray-project__ray | rllib/examples/catalogs/mobilenet_v2_encoder.py | {
"start": 902,
"end": 2668
} | class ____(PPOCatalog):
@classmethod
def _get_encoder_config(
cls,
observation_space: gym.Space,
**kwargs,
):
if (
isinstance(observation_space, gym.spaces.Box)
and observation_space.shape == MOBILENET_INPUT_SHAPE
):
# Inject our custom encoder here, only if the observation space fits it
return MobileNetV2EncoderConfig()
else:
return super()._get_encoder_config(observation_space, **kwargs)
# Create a generic config with our enhanced Catalog
ppo_config = (
PPOConfig()
.rl_module(rl_module_spec=RLModuleSpec(catalog_class=MobileNetEnhancedPPOCatalog))
.env_runners(num_env_runners=0)
# The following training settings make it so that a training iteration is very
# quick. This is just for the sake of this example. PPO will not learn properly
# with these settings!
.training(train_batch_size_per_learner=32, minibatch_size=16, num_epochs=1)
)
# CartPole's observation space is not compatible with our MobileNetV2 Encoder, so
# this will use the default behaviour of Catalogs
ppo_config.environment("CartPole-v1")
results = ppo_config.build().train()
print(results)
# For this training, we use a RandomEnv with observations of shape
# MOBILENET_INPUT_SHAPE. This will use our custom Encoder.
ppo_config.environment(
RandomEnv,
env_config={
"action_space": gym.spaces.Discrete(2),
# Test a simple Image observation space.
"observation_space": gym.spaces.Box(
0.0,
1.0,
shape=MOBILENET_INPUT_SHAPE,
dtype=np.float32,
),
},
)
results = ppo_config.build().train()
print(results)
# __sphinx_doc_end__
| MobileNetEnhancedPPOCatalog |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol34.py | {
"start": 226,
"end": 275
} | class ____(Protocol):
def f(self) -> int: ...
| X |
python | wandb__wandb | wandb/sdk/launch/builder/abstract.py | {
"start": 673,
"end": 5076
} | class ____(ABC):
"""Abstract plugin class defining the interface needed to build container images for W&B Launch."""
builder_type: str
environment: AbstractEnvironment
registry: AbstractRegistry
builder_config: Dict[str, Any]
@abstractmethod
def __init__(
self,
environment: AbstractEnvironment,
registry: AbstractRegistry,
verify: bool = True,
) -> None:
"""Initialize a builder.
Arguments:
builder_config: The builder config.
registry: The registry to use.
verify: Whether to verify the functionality of the builder.
Raises:
LaunchError: If the builder cannot be initialized or verified.
"""
raise NotImplementedError
@classmethod
@abstractmethod
def from_config(
cls,
config: dict,
environment: AbstractEnvironment,
registry: AbstractRegistry,
) -> "AbstractBuilder":
"""Create a builder from a config dictionary.
Arguments:
config: The config dictionary.
environment: The environment to use.
registry: The registry to use.
verify: Whether to verify the functionality of the builder.
login: Whether to login to the registry immediately.
Returns:
The builder.
"""
raise NotImplementedError
@abstractmethod
async def build_image(
self,
launch_project: LaunchProject,
entrypoint: EntryPoint,
job_tracker: Optional["JobAndRunStatusTracker"] = None,
) -> str:
"""Build the image for the given project.
Arguments:
launch_project: The project to build.
build_ctx_path: The path to the build context.
Returns:
The image name.
"""
raise NotImplementedError
@abstractmethod
async def verify(self) -> None:
"""Verify that the builder can be used to build images.
Raises:
LaunchError: If the builder cannot be used to build images.
"""
raise NotImplementedError
def registry_from_uri(uri: str) -> AbstractRegistry:
"""Create a registry helper object from a uri.
This function parses the URI and determines which supported registry it
belongs to. It then creates a registry helper object for that registry.
The supported remote registry types are:
- Azure Container Registry
- Google Container Registry
- AWS Elastic Container Registry
The format of the URI is as follows:
- Azure Container Registry: <registry-name>.azurecr.io/<repo-name>/<image-name>
- Google Container Registry: <location>-docker.pkg.dev/<project-id>/<repo-name>/<image-name>
- AWS Elastic Container Registry: <account-id>.dkr.ecr.<region>.amazonaws.com/<repo-name>/<image-name>
Our classification of the registry is based on the domain name. For example,
if the uri contains `.azurecr.io`, we classify it as an Azure
Container Registry. If the uri contains `.dkr.ecr`, we classify
it as an AWS Elastic Container Registry. If the uri contains
`-docker.pkg.dev`, we classify it as a Google Artifact Registry.
This function will attempt to load the appropriate cloud helpers for the
`https://` prefix is optional for all of the above.
Arguments:
uri: The uri to create a registry from.
Returns:
The registry.
Raises:
LaunchError: If the registry helper cannot be loaded for the given URI.
"""
if uri.startswith("https://"):
uri = uri[len("https://") :]
if AZURE_CONTAINER_REGISTRY_URI_REGEX.match(uri) is not None:
from wandb.sdk.launch.registry.azure_container_registry import (
AzureContainerRegistry,
)
return AzureContainerRegistry(uri=uri)
elif GCP_ARTIFACT_REGISTRY_URI_REGEX.match(uri) is not None:
from wandb.sdk.launch.registry.google_artifact_registry import (
GoogleArtifactRegistry,
)
return GoogleArtifactRegistry(uri=uri)
elif ELASTIC_CONTAINER_REGISTRY_URI_REGEX.match(uri) is not None:
from wandb.sdk.launch.registry.elastic_container_registry import (
ElasticContainerRegistry,
)
return ElasticContainerRegistry(uri=uri)
return AnonynmousRegistry(uri=uri)
| AbstractBuilder |
python | python-excel__xlwt | xlwt/antlr.py | {
"start": 63263,
"end": 67194
} | class ____(object):
def __init__(self, *args, **kwargs):
self.inputState = TreeParserSharedInputState()
self._retTree = None
self.tokenNames = []
self.returnAST = None
self.astFactory = ASTFactory()
self.traceDepth = 0
def getAST(self):
return self.returnAST
def getASTFactory(self):
return self.astFactory
def getTokenName(self,num) :
return self.tokenNames[num]
def getTokenNames(self):
return self.tokenNames
def match(self,t,set) :
assert isinstance(set,int) or isinstance(set,BitSet)
if not t or t == ASTNULL:
raise MismatchedTokenException(self.getTokenNames(), t,set, False)
if isinstance(set,int) and t.getType() != set:
raise MismatchedTokenException(self.getTokenNames(), t,set, False)
if isinstance(set,BitSet) and not set.member(t.getType):
raise MismatchedTokenException(self.getTokenNames(), t,set, False)
def matchNot(self,t, ttype) :
if not t or (t == ASTNULL) or (t.getType() == ttype):
raise MismatchedTokenException(self.getTokenNames(), t, ttype, True)
def reportError(self,ex):
print("error:",ex, file=sys.stderr)
def reportWarning(self, s):
print("warning:",s)
def setASTFactory(self,f):
self.astFactory = f
def setASTNodeType(self,nodeType):
self.setASTNodeClass(nodeType)
def setASTNodeClass(self,nodeType):
self.astFactory.setASTNodeType(nodeType)
def traceIndent(self):
print(" " * self.traceDepth)
def traceIn(self,rname,t):
self.traceDepth += 1
self.traceIndent()
print(("> " + rname + "(" +
ifelse(t,str(t),"null") + ")" +
ifelse(self.inputState.guessing>0,"[guessing]","")))
def traceOut(self,rname,t):
self.traceIndent()
print(("< " + rname + "(" +
ifelse(t,str(t),"null") + ")" +
ifelse(self.inputState.guessing>0,"[guessing]","")))
self.traceDepth -= 1
### wh: moved from ASTFactory to TreeParser
def addASTChild(self,currentAST, child):
if not child:
return
if not currentAST.root:
currentAST.root = child
elif not currentAST.child:
currentAST.root.setFirstChild(child)
else:
currentAST.child.setNextSibling(child)
currentAST.child = child
currentAST.advanceChildToEnd()
### wh: moved from ASTFactory to TreeParser
def makeASTRoot(self,currentAST,root):
if root:
### Add the current root as a child of new root
root.addChild(currentAST.root)
### The new current child is the last sibling of the old root
currentAST.child = currentAST.root
currentAST.advanceChildToEnd()
### Set the new root
currentAST.root = root
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### funcs to work on trees ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
def rightmost(ast):
if ast:
while(ast.right):
ast = ast.right
return ast
def cmptree(s,t,partial):
while(s and t):
### as a quick optimization, check roots first.
if not s.equals(t):
return False
### if roots match, do full list match test on children.
if not cmptree(s.getFirstChild(),t.getFirstChild(),partial):
return False
s = s.getNextSibling()
t = t.getNextSibling()
r = ifelse(partial,not t,not s and not t)
return r
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### AST ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
| TreeParser |
python | getsentry__sentry | src/sentry/preprod/api/models/launchpad.py | {
"start": 239,
"end": 490
} | class ____(BaseModel):
model_config = ConfigDict()
state: Literal[PreprodArtifactSizeMetrics.SizeAnalysisState.FAILED] = (
PreprodArtifactSizeMetrics.SizeAnalysisState.FAILED
)
error_code: int
error_message: str
| PutSizeFailed |
python | python-markdown__markdown | markdown/blockprocessors.py | {
"start": 6508,
"end": 10571
} | class ____(BlockProcessor):
""" Process children of list items.
Example
* a list item
process this part
or this part
"""
ITEM_TYPES = ['li']
""" List of tags used for list items. """
LIST_TYPES = ['ul', 'ol']
""" Types of lists this processor can operate on. """
def __init__(self, *args):
super().__init__(*args)
self.INDENT_RE = re.compile(r'^(([ ]{%s})+)' % self.tab_length)
def test(self, parent: etree.Element, block: str) -> bool:
return block.startswith(' '*self.tab_length) and \
not self.parser.state.isstate('detabbed') and \
(parent.tag in self.ITEM_TYPES or
(len(parent) and parent[-1] is not None and
(parent[-1].tag in self.LIST_TYPES)))
def run(self, parent: etree.Element, blocks: list[str]) -> None:
block = blocks.pop(0)
level, sibling = self.get_level(parent, block)
block = self.looseDetab(block, level)
self.parser.state.set('detabbed')
if parent.tag in self.ITEM_TYPES:
# It's possible that this parent has a `ul` or `ol` child list
# with a member. If that is the case, then that should be the
# parent. This is intended to catch the edge case of an indented
# list whose first member was parsed previous to this point
# see `OListProcessor`
if len(parent) and parent[-1].tag in self.LIST_TYPES:
self.parser.parseBlocks(parent[-1], [block])
else:
# The parent is already a `li`. Just parse the child block.
self.parser.parseBlocks(parent, [block])
elif sibling.tag in self.ITEM_TYPES:
# The sibling is a `li`. Use it as parent.
self.parser.parseBlocks(sibling, [block])
elif len(sibling) and sibling[-1].tag in self.ITEM_TYPES:
# The parent is a list (`ol` or `ul`) which has children.
# Assume the last child `li` is the parent of this block.
if sibling[-1].text:
# If the parent `li` has text, that text needs to be moved to a `p`
# The `p` must be 'inserted' at beginning of list in the event
# that other children already exist i.e.; a nested sub-list.
p = etree.Element('p')
p.text = sibling[-1].text
sibling[-1].text = ''
sibling[-1].insert(0, p)
self.parser.parseChunk(sibling[-1], block)
else:
self.create_item(sibling, block)
self.parser.state.reset()
def create_item(self, parent: etree.Element, block: str) -> None:
""" Create a new `li` and parse the block with it as the parent. """
li = etree.SubElement(parent, 'li')
self.parser.parseBlocks(li, [block])
def get_level(self, parent: etree.Element, block: str) -> tuple[int, etree.Element]:
""" Get level of indentation based on list level. """
# Get indent level
m = self.INDENT_RE.match(block)
if m:
indent_level = len(m.group(1))/self.tab_length
else:
indent_level = 0
if self.parser.state.isstate('list'):
# We're in a tight-list - so we already are at correct parent.
level = 1
else:
# We're in a loose-list - so we need to find parent.
level = 0
# Step through children of tree to find matching indent level.
while indent_level > level:
child = self.lastChild(parent)
if (child is not None and
(child.tag in self.LIST_TYPES or child.tag in self.ITEM_TYPES)):
if child.tag in self.LIST_TYPES:
level += 1
parent = child
else:
# No more child levels. If we're short of `indent_level`,
# we have a code block. So we stop here.
break
return level, parent
| ListIndentProcessor |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/compiler.py | {
"start": 265446,
"end": 272177
} | class ____(TypeCompiler):
def visit_FLOAT(self, type_: sqltypes.Float[Any], **kw: Any) -> str:
return "FLOAT"
def visit_DOUBLE(self, type_: sqltypes.Double[Any], **kw: Any) -> str:
return "DOUBLE"
def visit_DOUBLE_PRECISION(
self, type_: sqltypes.DOUBLE_PRECISION[Any], **kw: Any
) -> str:
return "DOUBLE PRECISION"
def visit_REAL(self, type_: sqltypes.REAL[Any], **kw: Any) -> str:
return "REAL"
def visit_NUMERIC(self, type_: sqltypes.Numeric[Any], **kw: Any) -> str:
if type_.precision is None:
return "NUMERIC"
elif type_.scale is None:
return "NUMERIC(%(precision)s)" % {"precision": type_.precision}
else:
return "NUMERIC(%(precision)s, %(scale)s)" % {
"precision": type_.precision,
"scale": type_.scale,
}
def visit_DECIMAL(self, type_: sqltypes.DECIMAL[Any], **kw: Any) -> str:
if type_.precision is None:
return "DECIMAL"
elif type_.scale is None:
return "DECIMAL(%(precision)s)" % {"precision": type_.precision}
else:
return "DECIMAL(%(precision)s, %(scale)s)" % {
"precision": type_.precision,
"scale": type_.scale,
}
def visit_INTEGER(self, type_: sqltypes.Integer, **kw: Any) -> str:
return "INTEGER"
def visit_SMALLINT(self, type_: sqltypes.SmallInteger, **kw: Any) -> str:
return "SMALLINT"
def visit_BIGINT(self, type_: sqltypes.BigInteger, **kw: Any) -> str:
return "BIGINT"
def visit_TIMESTAMP(self, type_: sqltypes.TIMESTAMP, **kw: Any) -> str:
return "TIMESTAMP"
def visit_DATETIME(self, type_: sqltypes.DateTime, **kw: Any) -> str:
return "DATETIME"
def visit_DATE(self, type_: sqltypes.Date, **kw: Any) -> str:
return "DATE"
def visit_TIME(self, type_: sqltypes.Time, **kw: Any) -> str:
return "TIME"
def visit_CLOB(self, type_: sqltypes.CLOB, **kw: Any) -> str:
return "CLOB"
def visit_NCLOB(self, type_: sqltypes.Text, **kw: Any) -> str:
return "NCLOB"
def _render_string_type(
self, name: str, length: Optional[int], collation: Optional[str]
) -> str:
text = name
if length:
text += f"({length})"
if collation:
text += f' COLLATE "{collation}"'
return text
def visit_CHAR(self, type_: sqltypes.CHAR, **kw: Any) -> str:
return self._render_string_type("CHAR", type_.length, type_.collation)
def visit_NCHAR(self, type_: sqltypes.NCHAR, **kw: Any) -> str:
return self._render_string_type("NCHAR", type_.length, type_.collation)
def visit_VARCHAR(self, type_: sqltypes.String, **kw: Any) -> str:
return self._render_string_type(
"VARCHAR", type_.length, type_.collation
)
def visit_NVARCHAR(self, type_: sqltypes.NVARCHAR, **kw: Any) -> str:
return self._render_string_type(
"NVARCHAR", type_.length, type_.collation
)
def visit_TEXT(self, type_: sqltypes.Text, **kw: Any) -> str:
return self._render_string_type("TEXT", type_.length, type_.collation)
def visit_UUID(self, type_: sqltypes.Uuid[Any], **kw: Any) -> str:
return "UUID"
def visit_BLOB(self, type_: sqltypes.LargeBinary, **kw: Any) -> str:
return "BLOB"
def visit_BINARY(self, type_: sqltypes.BINARY, **kw: Any) -> str:
return "BINARY" + (type_.length and "(%d)" % type_.length or "")
def visit_VARBINARY(self, type_: sqltypes.VARBINARY, **kw: Any) -> str:
return "VARBINARY" + (type_.length and "(%d)" % type_.length or "")
def visit_BOOLEAN(self, type_: sqltypes.Boolean, **kw: Any) -> str:
return "BOOLEAN"
def visit_uuid(self, type_: sqltypes.Uuid[Any], **kw: Any) -> str:
if not type_.native_uuid or not self.dialect.supports_native_uuid:
return self._render_string_type("CHAR", length=32, collation=None)
else:
return self.visit_UUID(type_, **kw)
def visit_large_binary(
self, type_: sqltypes.LargeBinary, **kw: Any
) -> str:
return self.visit_BLOB(type_, **kw)
def visit_boolean(self, type_: sqltypes.Boolean, **kw: Any) -> str:
return self.visit_BOOLEAN(type_, **kw)
def visit_time(self, type_: sqltypes.Time, **kw: Any) -> str:
return self.visit_TIME(type_, **kw)
def visit_datetime(self, type_: sqltypes.DateTime, **kw: Any) -> str:
return self.visit_DATETIME(type_, **kw)
def visit_date(self, type_: sqltypes.Date, **kw: Any) -> str:
return self.visit_DATE(type_, **kw)
def visit_big_integer(self, type_: sqltypes.BigInteger, **kw: Any) -> str:
return self.visit_BIGINT(type_, **kw)
def visit_small_integer(
self, type_: sqltypes.SmallInteger, **kw: Any
) -> str:
return self.visit_SMALLINT(type_, **kw)
def visit_integer(self, type_: sqltypes.Integer, **kw: Any) -> str:
return self.visit_INTEGER(type_, **kw)
def visit_real(self, type_: sqltypes.REAL[Any], **kw: Any) -> str:
return self.visit_REAL(type_, **kw)
def visit_float(self, type_: sqltypes.Float[Any], **kw: Any) -> str:
return self.visit_FLOAT(type_, **kw)
def visit_double(self, type_: sqltypes.Double[Any], **kw: Any) -> str:
return self.visit_DOUBLE(type_, **kw)
def visit_numeric(self, type_: sqltypes.Numeric[Any], **kw: Any) -> str:
return self.visit_NUMERIC(type_, **kw)
def visit_string(self, type_: sqltypes.String, **kw: Any) -> str:
return self.visit_VARCHAR(type_, **kw)
def visit_unicode(self, type_: sqltypes.Unicode, **kw: Any) -> str:
return self.visit_VARCHAR(type_, **kw)
def visit_text(self, type_: sqltypes.Text, **kw: Any) -> str:
return self.visit_TEXT(type_, **kw)
def visit_unicode_text(
self, type_: sqltypes.UnicodeText, **kw: Any
) -> str:
return self.visit_TEXT(type_, **kw)
def visit_enum(self, type_: sqltypes.Enum, **kw: Any) -> str:
return self.visit_VARCHAR(type_, **kw)
def visit_null(self, type_, **kw):
raise exc.CompileError(
"Can't generate DDL for %r; "
"did you forget to specify a "
"type on this Column?" % type_
)
def visit_type_decorator(
self, type_: TypeDecorator[Any], **kw: Any
) -> str:
return self.process(type_.type_engine(self.dialect), **kw)
def visit_user_defined(
self, type_: UserDefinedType[Any], **kw: Any
) -> str:
return type_.get_col_spec(**kw)
| GenericTypeCompiler |
python | pytorch__pytorch | torch/jit/_script.py | {
"start": 6646,
"end": 8982
} | class ____(OrderedDictWrapper):
def __init__(self, module, python_dict):
super().__init__(torch._C.ModuleDict(module))
# contains _both_ script modules and non-script python-only modules
# because script modules are subclassed in python and the
# C++ Module class will not hold references to them,
# to ensure that you always get the same python value here
# we store it in the python dict as well
self._python_modules = python_dict
def items(self):
r = self._python_modules.items()
return r
def __contains__(self, k):
return k in self._python_modules
def __setitem__(self, k, v):
# Cases where sub-module can be re-assigned after ScriptModule construction
# 1. If the attr is an module interface type, it's guaranteed that the module is
# not inlined in the graph, so it's safe to swap a new ScriptModule in.
# 2. if the new value if a ScriptModule with the same JIT type, IR won't change
# and it's legit to swap a new module in.
# In these two cases we allow swapping a new scripted module and update the
# corresponding python module dict to keep sync.
# Note: the value to be swapped in has to be ScriptModule instead of nn.Module,
# otherwise it's illegal and we throw error.
if isinstance(v, ScriptModule):
self._c.setattr(k, v)
self._python_modules[k] = v
else:
raise RuntimeError(
"Cannot re-assign modules in a ScriptModule with non-scripted "
f"module, tried to replace existing module '{k}': {v}"
)
def __getitem__(self, k):
return self._python_modules[k]
# For each user-defined class that subclasses ScriptModule, this meta-class:
# (1) finds all the methods annotated with @script_method in a ScriptModule and
# removes them from the class attributes
# (2) puts a wrapper around the class's __init__ method to recursively compile
# all of the script_methods with the module after the original __init__ has
# run. This has to occur after the user-defined __init__ so that submodules and
# parameters are initialized _before_ the script compiler resolve references to
# `self.param` or `self.module`.
| OrderedModuleDict |
python | Textualize__textual | tests/snapshot_tests/snapshot_apps/option_list.py | {
"start": 237,
"end": 1680
} | class ____(App[None]):
BINDINGS = [("a", "add", "add")]
def compose( self ) -> ComposeResult:
with Horizontal():
yield OptionList(
"One",
Option("Two"),
None,
Text.from_markup("[red]Three[/]")
)
yield OptionList(id="later-individual")
yield OptionList(id="later-at-once")
yield OptionList(id="after-mount")
def on_mount(self) -> None:
options: list[None | str | Text | Option] = [
"One",
Option("Two"),
None,
Text.from_markup("[red]Three[/]"),
]
option_list = self.query_one("#later-individual", OptionList)
for option in options:
option_list.add_option(option)
option_list.highlighted = 0
option_list = self.query_one("#later-at-once", OptionList)
option_list.add_options([
"One",
Option("Two"),
None,
Text.from_markup("[red]Three[/]"),
])
option_list.highlighted = 0
def action_add(self):
option_list = self.query_one("#after-mount", OptionList)
option_list.add_options([
"One",
Option("Two"),
None,
Text.from_markup("[red]Three[/]"),
])
option_list.highlighted = 0
if __name__ == "__main__":
OptionListApp().run()
| OptionListApp |
python | tensorflow__tensorflow | tensorflow/python/data/experimental/ops/random_ops.py | {
"start": 1472,
"end": 2028
} | class ____(dataset_ops.DatasetV1Adapter):
"""A `Dataset` of pseudorandom values."""
@functools.wraps(RandomDatasetV2.__init__)
def __init__(self, seed=None):
wrapped = RandomDatasetV2(seed)
super(RandomDatasetV1, self).__init__(wrapped)
if tf2.enabled():
RandomDataset = RandomDatasetV2
else:
RandomDataset = RandomDatasetV1
def _tf2_callback():
global RandomDataset
if tf2.enabled():
RandomDataset = RandomDatasetV2
else:
RandomDataset = RandomDatasetV1
v2_compat.register_data_v2_callback(_tf2_callback)
| RandomDatasetV1 |
python | pytorch__pytorch | test/inductor/test_external_callables.py | {
"start": 362,
"end": 1005
} | class ____(torch.nn.Module):
def __init__(self):
super().__init__()
self.matrix = torch.nn.Parameter(torch.eye(128, 128) * 2, requires_grad=True)
def forward(self, x):
return torch.matmul(x, self.matrix)
# torch.add performs better than torch.mm and got chosen during tuning
def matmul_cpu(a: torch.Tensor, b: torch.Tensor, out: torch.Tensor) -> None:
torch.add(a, b, out=out)
def matmul_dup(a: torch.Tensor, b: torch.Tensor, out: torch.Tensor) -> None:
torch.add(a, b, out=out)
def matmul_cuda(a: torch.Tensor, b: torch.Tensor, out: torch.Tensor) -> None:
torch.add(a, b, out=out)
| MatMulModule |
python | kamyu104__LeetCode-Solutions | Python/remove-letter-to-equalize-frequency.py | {
"start": 75,
"end": 645
} | class ____(object):
def equalFrequency(self, word):
"""
:type word: str
:rtype: bool
"""
cnt = collections.Counter(collections.Counter(word).itervalues())
if len(cnt) > 2:
return False
if len(cnt) == 1:
a = cnt.keys()[0]
return a == 1 or cnt[a] == 1
a, b = cnt.keys()
if a > b:
a, b = b, a
return (a == 1 and cnt[a] == 1) or (a+1 == b and cnt[b] == 1)
# Time: O(26 * n)
# Space: O(1)
import collections
# brute force, freq table
| Solution |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.